Using timestamp, json e python - python

This script makes the requisition fligts google every 1h, using time.sleep (3600) and generates a txt file with all phrases
he rolled over a day and a half.
I want do this properly using TIMESTAMP. Someone can help me?
import urllib
import urllib2
import json
import time
while 1:
url = "https://www.googleapis.com/qpxExpress/v1/trips/search?key=AIzaSyA3758yM14aTX7aI9_v5AvKI2X1m56HszI"
code = {
"request": {
"passengers": {
"adultCount": 1,
"childCount": 1
},
"slice": [
{
"origin": "SSA",
"destination": "GRU",
"date": "2015-06-19",
"permittedDepartureTime":
{
"kind": "qpxexpress#timeOfDayRange",
"earliestTime": "22:00",
"latestTime": "23:00"
}
},
{
"origin": "GRU",
"destination": "SSA",
"date": "2015-06-30",
"permittedDepartureTime":
{
"kind": "qpxexpress#timeOfDayRange",
"earliestTime": "05:00",
"latestTime": "12:00"
}
}
],
"solutions": 3
}
}
#hoje = "%s" % (time.strftime("%Y_%m_%d"))
jsonreq = json.dumps(code, encoding = 'utf-8')
req = urllib2.Request(url, jsonreq, {'Content-Type': 'application/json'})
flight = urllib2.urlopen(req)
response = flight.read()
flight.close()
#print(response)
print("----------------")
texto=(response)
v_file= open("ssaGRU.json" ,"a")
#hora = time.strftime("%H:%M:%S %Z")
v_file.write(texto)
#v_file.write("[%s] Hora do json.\r\n" % (hora))
v_file.close()
time.sleep(15)

current_time = time.strftime("%H:%M", time.localtime())
v_file = open("ssaGRU.json", "a")
v_file.write(str(current_time) + ': ')
v_file.write(texto + '\n')
v_file.close()
This will print your current time before every line inputted, and adds a an empty line at the end so your data from different times doesn't stay on one line.
You can also add %m.%d.%y to current_time if you need. In case texto isn't a string, make sure you add str(texto).

Related

Concatenate Items per Page from Json Path in a dataframe

I have a JSON Path avaliable by Dock API https://lighthouse.dock.tech/docs/cards-and-digital-banking-api-reference/1403b37717e98-list-pix-infractions and its have one limitation of 20 rows or registers per page - MaxItemsPerPage = 20- but i have more than 1000 items - totalItems = 1050.
{
"previousPage": 0,
"currentPage": 0,
"nextPage": 1,
"last": false,
"totalPages": 1,
"totalItems": 1050,
"maxItemsPerPage": 20,
"totalItemsPage": 1,
"items": [
{
"status": "OPEN",
"creditedParticipant": "08706265",
"infractionType": "FRAUD",
"reportedBy": "DEBITED_PARTICIPANT",
"lastModified": "2020-01-17T10:01:00Z",
"debitedParticipant": "99999010",
"creationTime": "2020-01-17T10:00:00Z",
"endToEndId": "E9999901012341234123412345678900",
"reportDetails": "Details that can help the receiving participant to analyze the id",
"responseTime": "2020-01-17T11:00:00Z",
"analysisResult": "AGREED",
"id": "91d65e98-97c0-4b0f-b577-73625da1f9fc",
"correlationId": "evp",
"analysisDetails": "Details of the infraction analysis"
}
]
}
how can i concatenate a set of registers in a multiples pages by period? I have this in python:
import http.client
conn = http.client.HTTPSConnection("pix-baas.caradhras.io")
headers = {
'Content-Type': "application/json",
'Authorization': "334jh89d0"
}
conn.request("GET", "/pix-infractions/v1/list?page=1&from=2020-01-17T10%3A01%3A00Zv&to=2022-01-17T10%3A01%3A00Z", headers=headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))

Adding variables together from 1 post request (python)

I have some code, it works as expected, but im having a difficult time trying to add my global_criticalvulnerabilities to a master variable, that adds up ALL post request responses
from logging import critical
from unittest import result
import requests, json
import requests
import pandas as pd
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=UTF-8',
'Authorization': 'Basic auth=='
}
payload = {
"match" : "all",
"filters" : [
{"field": "site-id",
"operator": "in",
"values": [
"22"]},
{"field": "owner-tag",
"operator": "contains",
"value":"DESKTOP_SUPPORT"},
{"field": "operating-system",
"operator": "contains",
"value":"microsoft"},
{"field": "vulnerability-category",
"operator": "does-not-contain",
"value":"microsoft_patch"},]}
global_CritiicalVulnerabilities1 = ""
global_CritiicalVulnerabilities = ""
global_SevereVulnerabilities = ""
global_ModerateVulnerabilities = ""
if global_CritiicalVulnerabilities == 0: #if nothing then continuely add up each criticalvuln from each post request/response
for page in range(1,249):
url1 = 'https://url:3780/api/3/assets/search?size=2&page=%s'%page
print(url1)
response = requests.post(url1, headers=headers, json=payload, verify=False)
json_response = response.json()
vulnerabilities = []
resources = json_response['resources']
for d in resources:
if 'vulnerabilities' in d:
vulnerabilities.append(d['vulnerabilities'])
criticalVuln = sum(x.get('critical', 0) for x in vulnerabilities)
print(vulnerabilities)
criticalVuln = str(criticalVuln)
global_CritiicalVulnerabilities1 = criticalVuln + criticalVuln
print(global_CritiicalVulnerabilities)
print("The grand total Critical Vuln: " + global_CritiicalVulnerabilities)
#print("The grand total Severe Vuln: " + totalSevere)
#print("The grand total Moderate Vuln: " + totalModerate)
Could someone help guide me in the direction i should go? I dont really want the answer right away, trying to learn and understand how i should be thinking like a computer...
im trying to add up criticalVuln to itself so it has the grand total of every critical Vulnerability
i tried this as well, and its not doing what i want it to do

Not updating my google sheet just getting: { "sheet1": { "id": 2 } }?

I've posted questions here before for this code, but every time we get past one error I get a new one. I'm not getting an error this time but it's still not updating my google sheet is there an error I'm missing or is this a problem with the Sheety API? Also, I'm coding on PyCharm using a Mac🖥 and the code is from lecture 333, of 100 days of code by Dr. Angela Yu. (don't know if that helps)
#------------New_code------------#
import requests
from datetime import datetime
GENDER = "male"
WEIGHT_KG = 58.740212
HEIGHT_CM = 177.8
AGE = 13
APP_ID = "be2*****"
API_KEY = "4fa82da*************************"
exercise_endpoint = "https://trackapi.nutritionix.com/v2/natural/exercise"
sheet_endpoint = "https://api.sheety.co/0a5644021c9c3815973ccd3f25595467/myWorkouts/sheet1"
exercise_text = input("Tell me which exercises you did: ")
headers = {
"x-app-id": APP_ID,
"x-app-key": API_KEY,
}
parameters = {
"query": exercise_text,
"gender": GENDER,
"weight_kg": WEIGHT_KG,
"height_cm": HEIGHT_CM,
"age": AGE
}
response = requests.post(exercise_endpoint, json=parameters, headers=headers)
result = response.json()
today_date = datetime.now().strftime("%d/%m/%Y")
now_time = datetime.now().strftime("%X")
bearer_headers = {
"Authorization": f"Bearer {'TOKEN'}"
}
for exercise in result["exercises"]:
sheet_inputs = {
"workout": {
"date": today_date,
"time": now_time,
"exercise": exercise["name"].title(),
"duration": exercise["duration_min"],
"calories": exercise["nf_calories"]
}
}
sheet_response = requests.post(sheet_endpoint, json={"sheet1": sheet_inputs}, headers=bearer_headers)
print(sheet_response.text)
#------------New_output------------#
Tell me which exercises you did: (I entered: i ran 3 miles)
{
"sheet1": {
"id": 2
}
}
Process finished with exit code 0

Retrieve a specific value from a JSON data and generate a CSV file

I am retrieving this data from api and it comes in JSON format. I only need the certain portion of the data and ignore all other data. Please check my Output csv how the final csv look like this. I need the result key, in that result value I need id and unid and userHierarchies field.
{
"apiVersion": "3.0",
"loggedInUser": {
"id": "api#api.com",
"unid": "192",
"userHierarchies": [
{
"hierarchyField": "Project",
"value": "Eli-f"
},
{
"hierarchyField": "Division",
"value": "DDD"
},
{
"hierarchyField": "Site",
"value": "RD02"
},
{
"hierarchyField": "Company",
"value": "Core"
},
{
"hierarchyField": "Department",
"value": "Operations"
}
]
},
"results":[
{
"id":"Random_Company_57",
"unid":"75",
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"gfds"
},
{
"hierarchyField":"Project",
"value":"JKL-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
},
{
"id":"xyz.abc#safe.net",
"unid":"98",
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"PUHJ"
},
{
"hierarchyField":"Project",
"value":"RPOJ-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
}
]
}
My Output CSV look like this:
id,unid,hierarchyField,value
Random_Company_57,75,Company,ABC Company
Random_Company_57,75,Department,gfds
Random_Company_57,75,Project,JKL-SDFGHJW
Random_Company_57,75,Division,Silver RC
Random_Company_57,75,Site,SQ06
xyz.abc#safe.net,98,Company,ABC Company
xyz.abc#safe.net,98,Department,PUHJ
xyz.abc#safe.net,98,Project,RPOJ-SDFGHJW
xyz.abc#safe.net,98,Division,Silver RC
My python Code look like this:
import requests
from pathlib import Path
from pprint import pprint
import pandas as pd
import time
import os
import argparse
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument("-path_save", help="define where to save the file")
parser.add_argument("--verbose", help="display processing information")
start = time.time()
def GetData(URL, endPoint, path_save, verbose):
response = requests.get(URL, auth=('api#api.net', 'uojk00'),
headers={
'Content-Type': 'application/json',
'x-api-key': 'ydVtsni1blwJHb65OJBrrtV',
})
print(endPoint,response)
df = pd.DataFrame(response.json()["results"])
print(df)
df.to_csv(os.path.join(path_save,f"{endPoint}.csv"), index=False)
if __name__ == '__main__':
start = time.time()
args = parser.parse_args()
path_save = Path(args.path_save)
verbose = args.verbose
endPoint=['users']
for endPt in endPoint:
URL = "https://api.com/v10/chor/" + endPt
GetData(URL, endPt, path_save, verbose)
print("Processed time:", time.time() - start) # Total Time
Any help how I generate that CSV???
If data is your data from api you have in your question, you can use next example how to save it to CSV in required format:
df = pd.DataFrame(data["results"]).explode("userHierarchies")
df = pd.concat([df, df.pop("userHierarchies").apply(pd.Series)], axis=1)
df = df[["id", "unid", "hierarchyField", "value"]]
df.to_csv("data.csv", index=False)
Saves data.csv:
id,unid,hierarchyField,value
Random_Company_57,75,Company,ABC Company
Random_Company_57,75,Department,gfds
Random_Company_57,75,Project,JKL-SDFGHJW
Random_Company_57,75,Division,Silver RC
Random_Company_57,75,Site,SQ06
xyz.abc#safe.net,98,Company,ABC Company
xyz.abc#safe.net,98,Department,PUHJ
xyz.abc#safe.net,98,Project,RPOJ-SDFGHJW
xyz.abc#safe.net,98,Division,Silver RC
xyz.abc#safe.net,98,Site,SQ06

Grab element from json dump

I'm using the following python code to connect to a jsonrpc server and nick some song information. However, I can't work out how to get the current title in to a variable to print elsewhere. Here is the code:
TracksInfo = []
for song in playingSongs:
data = { "id":1,
"method":"slim.request",
"params":[ "",
["songinfo",0,100, "track_id:%s" % song, "tags:GPASIediqtymkovrfijnCYXRTIuwxN"]
]
}
params = json.dumps(data, sort_keys=True, indent=4)
conn.request("POST", "/jsonrpc.js", params)
httpResponse = conn.getresponse()
data = httpResponse.read()
responce = json.loads(data)
print json.dumps(responce, sort_keys=True, indent=4)
TrackInfo = responce['result']["songinfo_loop"][0]
TracksInfo.append(TrackInfo)
This brings me back the data in json format and the print json.dump brings back:
pi#raspberrypi ~/pithon $ sudo python tom3.py
{
"id": 1,
"method": "slim.request",
"params": [
"",
[
"songinfo",
"0",
100,
"track_id:-140501481178464",
"tags:GPASIediqtymkovrfijnCYXRTIuwxN"
]
],
"result": {
"songinfo_loop": [
{
"id": "-140501481178464"
},
{
"title": "Witchcraft"
},
{
"artist": "Pendulum"
},
{
"duration": "253"
},
{
"tracknum": "1"
},
{
"type": "Ogg Vorbis (Spotify)"
},
{
"bitrate": "320k VBR"
},
{
"coverart": "0"
},
{
"url": "spotify:track:2A7ZZ1tjaluKYMlT3ItSfN"
},
{
"remote": 1
}
]
}
}
What i'm trying to get is result.songinfoloop.title (but I tried that!)
The songinfo_loop structure is.. peculiar. It is a list of dictionaries each with just one key.
Loop through it until you have one with a title:
TrackInfo = next(d['title'] for d in responce['result']["songinfo_loop"] if 'title' in d)
TracksInfo.append(TrackInfo)
A better option would be to 'collapse' all those dictionaries into one:
songinfo = reduce(lambda d, p: d.update(p) or d,
responce['result']["songinfo_loop"], {})
TracksInfo.append(songinfo['title'])
songinfo_loop is a list not a dict. That means you need to call it by position, or loop through it and find the dict with a key value of "title"
positional:
responce["result"]["songinfo_loop"][1]["title"]
loop:
for info in responce["result"]["songinfo_loop"]:
if "title" in info.keys():
print info["title"]
break
else:
print "no song title found"
Really, it seems like you would want to have the songinfo_loop be a dict, not a list. But if you need to leave it as a list, this is how you would pull the title.
The result is really a standard python dict, so you can use
responce["result"]["songinfoloop"]["title"]
which should work

Categories