getting correct value from field - python

I am having issue on elasticsearch.
I can finally post to elasticsearch but the data is being written with a 'S' value instead of the actual value.
When I check my cloudwatch logs I see this.
{
"Records":[
{
"eventID":"d5d4955d706dd71348760a482f33735f",
"eventName":"INSERT",
"eventVersion":"1.1",
"eventSource":"aws:dynamodb",
"awsRegion":"us-east-1",
"dynamodb":{
"ApproximateCreationDateTime":1613816980.0,
"Keys":{
"uuid":{
"S":"c140a68de65301465cd1cd3d97cc4107"
}
},
"NewImage":{
"SmsStatus":{
"S":"received"
},
"streetname":{
"S":"King tut"
},
"timestampMessage":{
"S":"Sat Feb 20 2021 10:29:39 GMT+0000 (Coordinated Universal Time)"
}
}
}
}
]
}
this is the code I am using.
import os
import boto3
import requests
from requests_aws4auth import AWS4Auth
es_host = os.environ['ES_HOST']
es_index = "metadata"
es_type = "episodes"
url = es_host + '/' + es_index + '/' + es_type + '/'
region = 'us-east-1'
service = 'es'
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token)
def lambda_handler(event, context):
print(event)
for record in event['Records']:
id = str(record['dynamodb']['Keys']['uuid']['S'])
if record['eventName'] == 'REMOVE':
res = requests.delete(url + id, auth=awsauth)
else:
document = record['dynamodb']['NewImage']
res = requests.put(url + id, auth=awsauth, json=document, headers={"Content-Type": "application/json"})
Im sure this is something small to fix but im not certain how to please if someone could assist. thanks

Related

How do i loop through this API to get more results?

I am trying to call all the results from this API and into one big list. Right now the default value is set to 50. By having the paginated section in the code I am still getting 50 results. How will I be able to get more results?
nextRecordKey allows to paginate through the api
import os
import requests
import json
import requests
from http import HTTPStatus
client_id = ""
client_secret = ""
os.environ["DX_GATEWAY"] = "http://api.com"
os.environ["DX_CLIENT_ID"] = client_id
os.environ["DX_CLIENT_SECRET"] = client_secret
dx_request = requests.Request()
path = "/path/to/api"
params = {
"Type": "abc",
"Id": "def",
"limit": 999,
"Category": "abc"
}
params_str = "&".join([f"{k}={v}" for k, v in params.items()])
url = "?".join([path, params_str])
vulns = requests.get(
url=url,
version=1,
)
if vulns.status_code != int(HTTPStatus.OK):
raise RuntimeError("API call did not return expected response: " + str(vulns))
## loop through paginated API ##.
response_data = vulns.json()
while vulns["nextRecordKey"]:
vulns=requests.get(vulns["nextRecordKey"]).json()
print(json.dumps(response_data))```

Python - Converting urllib to requests

I'm writing code to access the MS365 API and the Python code example uses urllib. I want to instead use requests but I'm not sure how urllib translates into requests as my attempts of doing so have failed.
The code example can be found here:
https://learn.microsoft.com/en-us/microsoft-365/security/defender-endpoint/run-advanced-query-sample-python?view=o365-worldwide#get-token
import json
import urllib.request
import urllib.parse
tenantId = '00000000-0000-0000-0000-000000000000' # Paste your own tenant ID here
appId = '11111111-1111-1111-1111-111111111111' # Paste your own app ID here
appSecret = '22222222-2222-2222-2222-222222222222' # Paste your own app secret here
url = "https://login.microsoftonline.com/%s/oauth2/token" % (tenantId)
resourceAppIdUri = 'https://api.securitycenter.microsoft.com'
body = {
'resource' : resourceAppIdUri,
'client_id' : appId,
'client_secret' : appSecret,
'grant_type' : 'client_credentials'
}
data = urllib.parse.urlencode(body).encode("utf-8")
req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req)
jsonResponse = json.loads(response.read())
aadToken = jsonResponse["access_token"]
IIUC, this should work the same:
import requests
tenantId = '00000000-0000-0000-0000-000000000000' # Paste your own tenant ID here
appId = '11111111-1111-1111-1111-111111111111' # Paste your own app ID here
appSecret = '22222222-2222-2222-2222-222222222222' # Paste your own app secret here
url = "https://login.microsoftonline.com/%s/oauth2/token" % (tenantId)
resourceAppIdUri = 'https://api.securitycenter.microsoft.com'
params = {
'resource' : resourceAppIdUri,
'client_id' : appId,
'client_secret' : appSecret,
'grant_type' : 'client_credentials'
}
response = requests.get(url, params)
jsonResponse = response.json()
aadToken = jsonResponse["access_token"]
Modifying #BeRT2me's answer has made this work.
import requests
tenantId = '00000000-0000-0000-0000-000000000000' # Paste your own tenant ID here
appId = '11111111-1111-1111-1111-111111111111' # Paste your own app ID here
appSecret = '22222222-2222-2222-2222-222222222222' # Paste your own app secret here
url = "https://login.microsoftonline.com/%s/oauth2/token" % (tenantId)
resourceAppIdUri = 'https://api.securitycenter.microsoft.com'
data = {
'resource' : resourceAppIdUri,
'client_id' : appId,
'client_secret' : appSecret,
'grant_type' : 'client_credentials'
}
response = requests.post(url=url, data=data)
jsonResponse = response.json()
aadToken = jsonResponse["access_token"]

python gspread updating multiple cells from reponse body

I am using this python script to take a response from Progresso API:
http://docs.progresso.apiary.io/#reference/behaviour/behaviour-events-collection/get-behaviour-events
from urllib2 import Request, urlopen
import smtplib import gspread
from oauth2client.service_account import ServiceAccountCredentialseaders = {
'Authorization': 'Bearer [CURRENT_TOKEN]'
}
request = Request('https://private-anon-ae5edf57e7-progresso.apiary-
mock.com/BMEvents/?Behaviour=new', headers=headers)
response_body = urlopen(request).read()
scope = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('ProgressoAPI-
2f6ecaa6635c.json', scope)
gc = gspread.authorize(credentials)
wks = gc.open("Progresso Test").sheet1
wks.clear()
cell_list = wks.range('A1:H20')
for cell in cell_list:
cell.value = response_body
wks.update_cells(cell_list)
I know the cell.value = response body is wrong and I don't know how I can get it right - I am stuck.
it appears in every cell like this:
"{
""BehaviourEntryId"": 13798177,
""LearnerId"": 245277,
""LearnerCode"": ""2009-0080"",
""RegGroup"": ""U6-RWE"",
""Behaviour"": ""Negative"",
""IncidentDate"": ""2017-02-07"",
""Subject"": ""BE"",
""Location"": ""CLS"",
""Published"": ""Yes"",
""Creator"": ""DhDr"",
""Editor"": null,
""Assignee"": ""DiRo"",
""Status"": ""Completed"",
""Details"": [
{
""Category"": ""CL"",
""Type"": ""CLatt"",
""Severity"": ""S2"",
""point"": 0
},
{
""Category"": ""CL"",
""Type"": ""CLBEH"",
""Severity"": ""S2"",
""point"": 2
}
],
""Comments"": [
{
""BehaviourEntryCommentId"": 5648278,
""Confidential"": true,
""Comment"": ""Asked to go to the toilet and went to the one furthest away just to waste time.""
},
{
""BehaviourEntryCommentId"": 5648279,
""Confidential"": false,
""Comment"": ""Spat gum out on floor""
},
{
""BehaviourEntryCommentId"": 5648280,
""Confidential"": false,
""Comment"": ""Was rude to memeber of Staff""
}
],
""Actions"": [
""HTO"",
""ISO""
]
}"
How do I separate the text to how I want in the cell range and bulk update it?
If you mean something like two columns with one row being "BehaviourEntryId" and the other row being 13798177, you can try something like this:
import json
response = json.loads(response_body) #decode the json response string, returns a dict
response_pairs = list(response.items)
for i in range(1, len(response_body)+1):
current_pair = response_pairs[i-1]
current_key = current_pair[0]
current_value = current_pair[1]
wks.update_acell('A{}'.format(i), current_key)
wks.update_acell('B{}'.format(i), current_value)

adaccount/reportstats is deprecated for versions v2.4 and higher

I'm trying to follow some examples from Python Facebook Marketing Api but, when I run:
i_async_job = account.get_insights(params={'level': 'adgroup'}, async=True)
r_async_job = account.get_report_stats(
params={
'data_columns': ['adgroup_id'],
'date_preset': 'last_30_days'
},
async=True
)
I'm getting
Status: 400
Response:
{
"error": {
"message": "(#12) adaccount/reportstats is deprecated for versions v2.4 and higher",
"code": 12,
"type": "OAuthException"
}
}
Even from Facebook
I found this page, but there are only curl examples.
Is there a working example on how to get data from Insights edge with the Python Ads API?
Here is a full example of how to export some insights asynchronously from the new Insights endpoints:
from facebookads import test_config as config
from facebookads.objects import *
import time
account_id = <YOUR_ACCOUNT_ID>
account_id = 'act_' + str(account_id)
fields = [
Insights.Field.impressions,
Insights.Field.clicks,
Insights.Field.actions,
Insights.Field.spend,
Insights.Field.campaign_group_name,
]
params = {
'date_preset': Insights.Preset.last_7_days,
'level': Insights.Level.adgroup,
'sort_by': 'date_start',
'sort_dir': 'desc',
}
ad_account = AdAccount(account_id)
job = ad_account.get_insights(fields=fields, params=params, async=True)
insights = None
while insights is None:
time.sleep(1)
job.remote_read()
completition = job[AsyncJob.Field.async_percent_completion]
print("Percent done: " + str(completition))
if int(completition) is 100:
insights = job.get_result(params={'limit': 100})
for ad_insight in insights:
print(ad_insight)

YouTube API video upload error: parseError /w python

The example code for Google's YouTube Data API is a piece of junk. It's so complicated and tied to the oauth redirect flow that I can't use it. Trying to go raw with requests pip and not getting too far.
I've followed the instructions exactly (as far as I can tell), with the following code:
import json
import os
import sys
import urllib
import requests
payload_file = None
payload = None
print 'Loading Config'
# Get the directory path of this file. When using any relative file paths make
# sure they are relative to current_dir so that the script can be run from any CWD.
current_dir = os.path.dirname(os.path.abspath(__file__))
# Reads in the config.json file then parses it
config = json.loads(open(os.path.join(current_dir, '..', 'config.json')).read())
print 'Parsing Payload'
for i in range(len(sys.argv)):
if sys.argv[i] == "--json" and (i + 1) < len(sys.argv):
payload = json.loads(sys.argv[i + 1])
elif sys.argv[i] == "-payload" and (i + 1) < len(sys.argv):
payload_file = sys.argv[i + 1]
with open(payload_file,'r') as f:
payload = json.loads(f.read())
break
print 'Configuring youtube with token {0}'.format(payload['token'])
print 'Downloading video...'
# See how big it is
f = urllib.urlopen(payload['url'])
content_length = int(f.headers["Content-Length"])
# Download it
# urllib.urlretrieve(payload['url'], "video.mp4")
metadata = {
'snippet' : {
'title': payload['title'],
"categoryId": 22
},
'status' : {
"privacyStatus": "public",
"embeddable": True,
"license": "youtube"
}
}
if 'tags' in payload:
metadata['snippet']['tags'] = payload['tags']
if 'description' in payload:
metadata['snippet']['description'] = payload['description']
headers = {
'Authorization' : 'Bearer {0}'.format(payload['token']),
'Content-Type' : 'application/json; charset=UTF-8',
'Content-Length' : json.dumps(metadata).__len__(),
'X-Upload-Content-Length' : content_length,
'X-Upload-Content-Type' : 'video/*',
}
print 'Attempting to upload video'
print headers
# upload video file
r = requests.post('https://www.googleapis.com/upload/youtube/v3/videos?uploadType=resumable&part=snippet,status', data=metadata, headers=headers);
print "RESPONSE!"
print r.text
# files = {
# 'file': video_file,
# }
# r = requests.post('https://www.googleapis.com/upload/youtube/v3/videos', data={ "video" : video }, headers=headers);
Obviously its not finished, but its dying on the metadata upload request with the following output:
Loading Config
Parsing Payload
Configuring youtube with token <access-token>
Downloading video...
Attempting to upload video
{'X-Upload-Content-Length': 51998563, 'Content-Length': 578, 'Content-Type': 'application/json; charset=UTF-8', 'X-Upload-Content-Type': 'video/*', 'Authorization': 'Bearer <access-token>'}
RESPONSE!
{
"error": {
"errors": [
{
"domain": "global",
"reason": "parseError",
"message": "Parse Error"
}
],
"code": 400,
"message": "Parse Error"
}
}
This error is not even listed in their "Errors" docs.
What is wrong with my code?
Here is an example in python that works. It assumes you've already done the oauth part though.
import requests
from os import fstat
import json
fi = open('myvideo.mp4')
base_headers = {
'Authorization': '%s %s' % (auth_data['token_type'],
auth_data['access_token']),
'content-type': 'application/json'
}
initial_headers = base_headers.copy()
initial_headers.update({
'x-upload-content-length': fstat(fi.fileno()).st_size,
'x-upload-content-type': 'video/mp4'
})
initial_resp = requests.post(
'https://www.googleapis.com/upload/youtube/v3/videos?uploadType=resumable&part=snippet,status,contentDetails',
headers=initial_headers,
data=json.dumps({
'snippet': {
'title': 'my title',
},
'status': {
'privacyStatus': 'unlisted',
'embeddable': True
}
})
)
upload_url = initial_resp.headers['location']
resp = requests.put(
upload_url,
headers=base_headers,
data=fi
)
fi.close()
the above is graet, just adding: you can also get the youtube id from the response (for future use):
cont = json.loads(resp.content)
youtube_id = cont['id']

Categories