Adding variables together from 1 post request (python) - python

I have some code, it works as expected, but im having a difficult time trying to add my global_criticalvulnerabilities to a master variable, that adds up ALL post request responses
from logging import critical
from unittest import result
import requests, json
import requests
import pandas as pd
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json;charset=UTF-8',
'Authorization': 'Basic auth=='
}
payload = {
"match" : "all",
"filters" : [
{"field": "site-id",
"operator": "in",
"values": [
"22"]},
{"field": "owner-tag",
"operator": "contains",
"value":"DESKTOP_SUPPORT"},
{"field": "operating-system",
"operator": "contains",
"value":"microsoft"},
{"field": "vulnerability-category",
"operator": "does-not-contain",
"value":"microsoft_patch"},]}
global_CritiicalVulnerabilities1 = ""
global_CritiicalVulnerabilities = ""
global_SevereVulnerabilities = ""
global_ModerateVulnerabilities = ""
if global_CritiicalVulnerabilities == 0: #if nothing then continuely add up each criticalvuln from each post request/response
for page in range(1,249):
url1 = 'https://url:3780/api/3/assets/search?size=2&page=%s'%page
print(url1)
response = requests.post(url1, headers=headers, json=payload, verify=False)
json_response = response.json()
vulnerabilities = []
resources = json_response['resources']
for d in resources:
if 'vulnerabilities' in d:
vulnerabilities.append(d['vulnerabilities'])
criticalVuln = sum(x.get('critical', 0) for x in vulnerabilities)
print(vulnerabilities)
criticalVuln = str(criticalVuln)
global_CritiicalVulnerabilities1 = criticalVuln + criticalVuln
print(global_CritiicalVulnerabilities)
print("The grand total Critical Vuln: " + global_CritiicalVulnerabilities)
#print("The grand total Severe Vuln: " + totalSevere)
#print("The grand total Moderate Vuln: " + totalModerate)
Could someone help guide me in the direction i should go? I dont really want the answer right away, trying to learn and understand how i should be thinking like a computer...
im trying to add up criticalVuln to itself so it has the grand total of every critical Vulnerability
i tried this as well, and its not doing what i want it to do

Related

How to unlist(Remove) number from truecaller with using script?

Hey Guys I am trying to (remove) unlist number from Truecaller using following link
https://www.truecaller.com/unlisting
I want to automate this process but because of recaptcha of google the requests are limited and cant possible so it it any way to do this using any library like unofficial libraries of Truecaller like Trunofficial .
https://www.truecaller.com/unlisting
Unlisting your phone number requires reCaptcha, you can bypass it.
This my be helpful
import requests
from requests.structures import CaseInsensitiveDict
url = "https://asia-south1-truecaller-web.cloudfunctions.net/api/noneu/unlist/v1"
headers = CaseInsensitiveDict()
headers["Authorization"] = "Bearer null"
headers["Content-Type"] = "application/json"
headers["Content-Length"] = "612"
headers["accept"] = "*/*"
headers["sec-gpc"] = "1"
headers["origin"] = "https://www.truecaller.com"
headers["sec-fetch-site"] = "cross-site"
headers["sec-fetch-mode"] = "cors"
headers["sec-fetch-dest"] = "empty"
headers["referer"] = "https://www.truecaller.com/"
headers["accept-encoding"] = "gzip, deflate, br"
headers["accept-language"] = "en-IN,en-GB;q=0.9,en-US;q=0.8,en;q=0.7"
data = {
"phoneNumber": "+919912345678",
"recaptcha": "03ANYolqtbEiFqaQ8wBrDF3kKqkCzIaH4r79oA2hCNd80gZGENvff9fPKocccytf6QXpPvQfQ12WMvgfdP1IKggff6lTY_0ucZxFB7r6A_dbNjfp_NSYtrkU4NX1h_LBQgnCO0ALkWS8CMjaIEjhxclfeClFv4EmFNEQis1OvrSVgvB8nJipuUxGakpa0eB8yWrEQCUfy0Gs7VA2hO4VaeLRTwr6BaxYsJsCP_3-vaMP2crZDDrIm8on_0H0vqh-1S44y69b0rSM6_ornuVZxeNQkpe_3NvPjQQxqQtdyQl
d55OQkK67PH7OH_A7s3GVgMa0VCOuX_UdBsPkd8mKf708GgutggfggvVrbe3DrBsUnpXMYchsv_revkhknej0G_SxAtqtwQoGPtt5iKSKHRmlelDJpYuQs6Lwi-4Umn_E
clRPT2iaohxZ3r8O_4jaGP9yhRiMyVkgTm6mutJn50nPFbyabjSqgC2ShlMEI7IoOqWp9g90b2bl4qw4h6k9vP4AVy36sCx2z_gksBEgxT1zsM3P77PQ_guo12k7rtFlUAmvdqhqgwowaKQFMMBfjWDo40"
}
resp = requests.post(url, headers=headers, data=data)
print(resp.json())
# {
# "lastUpdated": "2022-07-11T03:27:30.306713Z",
# "phoneNumber": "919912345678",
# "status": "unlisted"
# }

Print specific instances of json response

So I am using the urban dictionary api, and the code works perfectly as I would like: I'm going to use chicken as my term.
import json
import requests
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
querystring = {"term":"chicken"}
headers = {
'x-rapidapi-host': "mashape-community-urban-dictionary.p.rapidapi.com",
'x-rapidapi-key': "KEY"
}
response = requests.request("GET", url, headers=headers, params=querystring)
json_data = response.text
json_object = json.loads(json_data)
print(json.dumps(json_object, indent = 4))
If I run this, I get the following which is the correct output, but I want to only print out the definition.
{
"list": [
{
"definition": "A [kilogram] of [cocain]. Dealers started calling kilos \"[birds]\" which then evolved into \"chicken.\"",
"permalink": "http://chicken.urbanup.com/1180537",
"thumbs_up": 2947,
"sound_urls": [
"http://api.twilio.com/2008-08-01/Accounts/ACd09691b82112e4b26fce156d7c01d0ed/Recordings/RE18c37ff43a6fc6dce8d9d533e7e4042b"
],
"author": "DEKE",
"word": "chicken",
"defid": 1180537,
"current_vote": "",
"written_on": "2005-04-11T11:41:04.000Z",
"example": "Person 1) [How much] you [got left]?\r\n\r\nPerson 2) A [quarter] chicken.",
"thumbs_down": 941
},
{
"definition": "To lack courage and [bravery]. [Unskilled], stupid, afraid, loser, [coward]",
"permalink": "http://chicken.urbanup.com/7399878",
"thumbs_up": 180,
"sound_urls": [
"http://api.twilio.com/2008-08-01/Accounts/ACd09691b82112e4b26fce156d7c01d0ed/Recordings/RE18c37ff43a6fc6dce8d9d533e7e4042b"
],
"author": "Freak Out Guy",
"word": "chicken",
"defid": 7399878,
"current_vote": "",
"written_on": "2013-12-10T16:49:06.660Z",
"example": "He was so afraid she [thought] he was [a chicken].",
"thumbs_down": 49
}
]
}
I've seen that you can do print(json_object['list'][0]['definition']), but it only prints out the first definition. How can I print out all the instances of definition like:
Definition 1: A [kilogram] of [cocain]. Dealers started calling kilos \"[birds]\" which then evolved into \"chicken.\"
Definition 2: To lack courage and [bravery]. [Unskilled], stupid, afraid, loser, [coward]
for idx, entry in enumerate(json_object['list'], 1):
print(f'Definition {idx}: {entry["definition"]}')
output
Definition 1: A [kilogram] of [cocain]. Dealers started calling kilos "[birds]" which then evolved into "chicken."
Definition 2: To lack courage and [bravery]. [Unskilled], stupid, afraid, loser, [coward]
You dont need to use the dump, u can use the json_object instead. Like this:
import json
import requests
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
querystring = {"term":"chicken"}
headers = {
'x-rapidapi-host': "mashape-community-urban-dictionary.p.rapidapi.com",
'x-rapidapi-key': "KEY"
}
response = requests.request("GET", url, headers=headers, params=querystring)
json_data = response.text
json_object = json.loads(json_data)
#json_object = json.dumps(json_object, indent = 4)
print(json_object["list"][0]["definition"]) # you can use the for statement to get all the definitions
(Edit) All the definitions example:
import json
import requests
url = "https://mashape-community-urban-dictionary.p.rapidapi.com/define"
querystring = {"term":"chicken"}
headers = {
'x-rapidapi-host': "mashape-community-urban-dictionary.p.rapidapi.com",
'x-rapidapi-key': "KEY"
}
response = requests.request("GET", url, headers=headers, params=querystring)
json_data = response.text
json_object = json.loads(json_data)
#json_object = json.dumps(json_object, indent = 4)
for i in json_object["list"]:
print(i["definition"])

Update Sharepoint 2013 using Python3

I am currently trying to update a Sharepoint 2013 list.
This is the module that I am using using to accomplish that task. However, when I run the post task I receive the following error:
"b'{"error":{"code":"-1, Microsoft.SharePoint.Client.InvalidClientQueryException","message":{"lang":"en-US","value":"Invalid JSON. A token was not recognized in the JSON content."}}}'"
Any idea of what I am doing wrong?
def update_item(sharepoint_user, sharepoint_password, ad_domain, site_url, sharepoint_listname):
login_user = ad_domain + '\\' + sharepoint_user
auth = HttpNtlmAuth(login_user, sharepoint_password)
sharepoint_url = site_url + '/_api/web/'
sharepoint_contextinfo_url = site_url + '/_api/contextinfo'
headers = {
'accept': 'application/json;odata=verbose',
'content-type': 'application/json;odata=verbose',
'odata': 'verbose',
'X-RequestForceAuthentication': 'true'
}
r = requests.post(sharepoint_contextinfo_url, auth=auth, headers=headers, verify=False)
form_digest_value = r.json()['d']['GetContextWebInformation']['FormDigestValue']
item_id = 4991 # This id is one of the Ids returned by the code above
api_page = sharepoint_url + "lists/GetByTitle('%s')/items(%d)" % (sharepoint_listname, item_id)
update_headers = {
"Accept": "application/json; odata=verbose",
"Content-Type": "application/json; odata=verbose",
"odata": "verbose",
"X-RequestForceAuthentication": "true",
"X-RequestDigest": form_digest_value,
"IF-MATCH": "*",
"X-HTTP-Method": "MERGE"
}
r = requests.post(api_page, {'__metadata': {'type': 'SP.Data.TestListItem'}, 'Title': 'TestUpdated'}, auth=auth, headers=update_headers, verify=False)
if r.status_code == 204:
print(str('Updated'))
else:
print(str(r.status_code))
I used your code for my scenario and fixed the problem.
I also faced the same problem. I think the way that data passed for update is not correct
Pass like below:
json_data = {
"__metadata": { "type": "SP.Data.TasksListItem" },
"Title": "updated title from Python"
}
and pass json_data to requests like below:
r= requests.post(api_page, json.dumps(json_data), auth=auth, headers=update_headers, verify=False).text
Note: I used SP.Data.TasksListItem as it is my type. Use http://SharePointurl/_api/web/lists/getbytitle('name')/ListItemEntityTypeFullName to find the type

Using timestamp, json e python

This script makes the requisition fligts google every 1h, using time.sleep (3600) and generates a txt file with all phrases
he rolled over a day and a half.
I want do this properly using TIMESTAMP. Someone can help me?
import urllib
import urllib2
import json
import time
while 1:
url = "https://www.googleapis.com/qpxExpress/v1/trips/search?key=AIzaSyA3758yM14aTX7aI9_v5AvKI2X1m56HszI"
code = {
"request": {
"passengers": {
"adultCount": 1,
"childCount": 1
},
"slice": [
{
"origin": "SSA",
"destination": "GRU",
"date": "2015-06-19",
"permittedDepartureTime":
{
"kind": "qpxexpress#timeOfDayRange",
"earliestTime": "22:00",
"latestTime": "23:00"
}
},
{
"origin": "GRU",
"destination": "SSA",
"date": "2015-06-30",
"permittedDepartureTime":
{
"kind": "qpxexpress#timeOfDayRange",
"earliestTime": "05:00",
"latestTime": "12:00"
}
}
],
"solutions": 3
}
}
#hoje = "%s" % (time.strftime("%Y_%m_%d"))
jsonreq = json.dumps(code, encoding = 'utf-8')
req = urllib2.Request(url, jsonreq, {'Content-Type': 'application/json'})
flight = urllib2.urlopen(req)
response = flight.read()
flight.close()
#print(response)
print("----------------")
texto=(response)
v_file= open("ssaGRU.json" ,"a")
#hora = time.strftime("%H:%M:%S %Z")
v_file.write(texto)
#v_file.write("[%s] Hora do json.\r\n" % (hora))
v_file.close()
time.sleep(15)
current_time = time.strftime("%H:%M", time.localtime())
v_file = open("ssaGRU.json", "a")
v_file.write(str(current_time) + ': ')
v_file.write(texto + '\n')
v_file.close()
This will print your current time before every line inputted, and adds a an empty line at the end so your data from different times doesn't stay on one line.
You can also add %m.%d.%y to current_time if you need. In case texto isn't a string, make sure you add str(texto).

YouTube API video upload error: parseError /w python

The example code for Google's YouTube Data API is a piece of junk. It's so complicated and tied to the oauth redirect flow that I can't use it. Trying to go raw with requests pip and not getting too far.
I've followed the instructions exactly (as far as I can tell), with the following code:
import json
import os
import sys
import urllib
import requests
payload_file = None
payload = None
print 'Loading Config'
# Get the directory path of this file. When using any relative file paths make
# sure they are relative to current_dir so that the script can be run from any CWD.
current_dir = os.path.dirname(os.path.abspath(__file__))
# Reads in the config.json file then parses it
config = json.loads(open(os.path.join(current_dir, '..', 'config.json')).read())
print 'Parsing Payload'
for i in range(len(sys.argv)):
if sys.argv[i] == "--json" and (i + 1) < len(sys.argv):
payload = json.loads(sys.argv[i + 1])
elif sys.argv[i] == "-payload" and (i + 1) < len(sys.argv):
payload_file = sys.argv[i + 1]
with open(payload_file,'r') as f:
payload = json.loads(f.read())
break
print 'Configuring youtube with token {0}'.format(payload['token'])
print 'Downloading video...'
# See how big it is
f = urllib.urlopen(payload['url'])
content_length = int(f.headers["Content-Length"])
# Download it
# urllib.urlretrieve(payload['url'], "video.mp4")
metadata = {
'snippet' : {
'title': payload['title'],
"categoryId": 22
},
'status' : {
"privacyStatus": "public",
"embeddable": True,
"license": "youtube"
}
}
if 'tags' in payload:
metadata['snippet']['tags'] = payload['tags']
if 'description' in payload:
metadata['snippet']['description'] = payload['description']
headers = {
'Authorization' : 'Bearer {0}'.format(payload['token']),
'Content-Type' : 'application/json; charset=UTF-8',
'Content-Length' : json.dumps(metadata).__len__(),
'X-Upload-Content-Length' : content_length,
'X-Upload-Content-Type' : 'video/*',
}
print 'Attempting to upload video'
print headers
# upload video file
r = requests.post('https://www.googleapis.com/upload/youtube/v3/videos?uploadType=resumable&part=snippet,status', data=metadata, headers=headers);
print "RESPONSE!"
print r.text
# files = {
# 'file': video_file,
# }
# r = requests.post('https://www.googleapis.com/upload/youtube/v3/videos', data={ "video" : video }, headers=headers);
Obviously its not finished, but its dying on the metadata upload request with the following output:
Loading Config
Parsing Payload
Configuring youtube with token <access-token>
Downloading video...
Attempting to upload video
{'X-Upload-Content-Length': 51998563, 'Content-Length': 578, 'Content-Type': 'application/json; charset=UTF-8', 'X-Upload-Content-Type': 'video/*', 'Authorization': 'Bearer <access-token>'}
RESPONSE!
{
"error": {
"errors": [
{
"domain": "global",
"reason": "parseError",
"message": "Parse Error"
}
],
"code": 400,
"message": "Parse Error"
}
}
This error is not even listed in their "Errors" docs.
What is wrong with my code?
Here is an example in python that works. It assumes you've already done the oauth part though.
import requests
from os import fstat
import json
fi = open('myvideo.mp4')
base_headers = {
'Authorization': '%s %s' % (auth_data['token_type'],
auth_data['access_token']),
'content-type': 'application/json'
}
initial_headers = base_headers.copy()
initial_headers.update({
'x-upload-content-length': fstat(fi.fileno()).st_size,
'x-upload-content-type': 'video/mp4'
})
initial_resp = requests.post(
'https://www.googleapis.com/upload/youtube/v3/videos?uploadType=resumable&part=snippet,status,contentDetails',
headers=initial_headers,
data=json.dumps({
'snippet': {
'title': 'my title',
},
'status': {
'privacyStatus': 'unlisted',
'embeddable': True
}
})
)
upload_url = initial_resp.headers['location']
resp = requests.put(
upload_url,
headers=base_headers,
data=fi
)
fi.close()
the above is graet, just adding: you can also get the youtube id from the response (for future use):
cont = json.loads(resp.content)
youtube_id = cont['id']

Categories