I am performing the API Testing and using the pytest framework. Test is failing all the time with 401 error. Couldn't figure out what was the issue.
Here is the code :
import requests
import json,jsonpath
import urllib3
import constants
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# variables
dumpFile = "somepath"
url = "someUrl"
headers = {'Authorization' : constants.consts['siteToken'],
'accept':'application/json',
'content-type':'application/json'}
#siteToken = 'Bearer jwt token'
# read json input file
input_file = open("json file path", 'r')
json_input = input_file.read()
request_json = json.loads(json_input)
# make POST request with JSON Input Body
r = requests.post(url, request_json, headers=headers)
# Verification of the response
assert r.status_code == 200
def test_json_result():
# fetch header from response
print(r.headers.get("Date"))
# parse response to JSON Format
response_json = json.loads(r.text)
# validate response using Json Path
name = jsonpath.jsonpath(response_json, 'name')
print(name)
I solved this by just putting json=your_payload.
import requests
import json,jsonpath
import urllib3
import constants
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# variables
dumpFile = "somepath"
url = "someUrl"
headers = {'Authorization' : constants.consts['siteToken'],
'accept':'application/json',
'content-type':'application/json'}
#siteToken = 'Bearer jwt token'
# read json input file
input_file = open("json file path", 'r')
json_input = input_file.read()
request_json = json.loads(json_input)
def test_json_result():
# make POST request with JSON Input Body
r = requests.post(url, json=request_json, headers=headers)
# Verification of the response
assert r.status_code == 200
# fetch header from response
print(r.headers.get("Date"))
# parse response to JSON Format
response_json = json.loads(r.text)
# validate response using Json Path
name = jsonpath.jsonpath(response_json, 'name')
print(name)
Related
Basically, I am trying to pass a list of ids in payloads of 100 from a spreadsheet to delete organizations using the destroy many endpoint.
import json
import xlrd
import requests
session = requests.Session()
session.headers = {'Content-Type': 'application/json'}
session.auth = 'my email', 'password'
url = 'https://domain.zendesk.com/api/v2/organizations/destroy_many.json'
payloads = []
organizations_dict = {}
book = xlrd.open_workbook('orgs_list_destroy.xls')
sheet = book.sheet_by_name('Sheet1')
for row in range(1, sheet.nrows):
if sheet.row_values(row)[2]:
organizations_dict = {'ids': int(sheet.row_values(row)[2])}
if len(organizations_dict) == 100:
payloads.append(json.dumps(organizations_dict))
organizations_dict = {}
if organizations_dict:
payloads.append(json.dumps(organizations_dict))
for payload in payloads:
response = session.delete(url, data=payload)
if response.status_code != 200:
print('Import failed with status {}'.format(response.status_code))
exit()
print('Successfully imported a batch of organizations')
Try placing it outside the for loop, where you're defining your request headers:
url = 'https://{{YOURDOMAIN}}.zendesk.com/api/v2/organizations/destroy_many.json'
user = 'YOUR_EMAIL#DOMAIN.com' + '/token'
pwd = '{{YOUR_TOKEN}}'
headers = {'Content-Type': 'application/json'}
response = requests.delete(url, auth=(user, pwd), headers=headers)
In order to stream data, I enter the following to the command line:
curl -X POST -H "Authorization: Bearer {my_access_token}"
https://example1.example2.com/example3/example4/example5/example6
I get:
<data>
<sessionid>{sessionid}</sessionid>
<url>https://example1.example2.com/example3/example4/
example5</url>
</data>
Then, I enter:
curl -X POST -d "person=Adam&sessionid={sessionid}"
https://example6.example2.com/example3/example4/example5
In response, my data stream is outputted continuously in the terminal.
However, I would like to make the same requests programmatically using Python. I use the requests library. I have the following code:
import httplib
import urllib
import requests
person = "Adam"
connection = httplib.HTTPSConnection('example1.example2.com', 443,
timeout
= 30)
# Headers
headers = {"Accept":"application/json",
"Authorization":"Bearer {my_access_token}"
# Send synchronously
connection.request('POST', '/example3/example4/example5/example6',
None,
headers)
#Connection for streaming here
try:
response = connection.getresponse()
content = response.read()
# Success
# print('Response status ' + str(response.status))
stringOption = content.decode('UTF-8')
json_objOption = json.loads(stringOption)
# Put the data into a text file
with open('data.txt', 'w') as outfile:
json.dump(json_objOption, outfile)
sessionid = json_objOption["data"]["sessionid"]
except httplib.HTTPException, e:
# Exception
print('Exception during request')
httplib.HTTPConnection._http_vsn = 10
httplib.HTTPConnection._http_vsn_str = 'HTTP/1.0'
API_ENDPOINT =
"https://example6.example2.com/example3/example4/example5"
headers = {"Accept":"application/json"}
data = {'person': person,
'sessionid': sessionid}
r = requests.post(url = API_ENDPOINT, headers = headers, data =
data)
print(r.text)
print(r)
print(r.status_code)
print(r.headers)
None of the print statements output anything. There is no output after running this script. Why is this the case?
I am attempting to use the Google UrlShortener API to retrieve history with OAuth2 and an API key. I am getting a 200 OK response but when I try and get subsequent pages using pagetoken or pageToken as a query parameter I always get the same nextPageToken and the same page of results. Oddly, the browser based Google API interaction uses start-token not pagetoken or pageToken but when I use start-token I don't get a 200 OK.
How do I get pagination to work with the UrlShortener API?
Here is my code:
import requests
import json
import time
import settings
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from oauth2client.file import Storage
def history():
"""Look up a user's history"""
flow = OAuth2WebServerFlow(client_id=settings.OAUTH2_CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope='https://www.googleapis.com/auth/urlshortener',
redirect_uri='http://127.0.0.1:5000/callback')
storage = Storage('creds.data')
credentials = run_flow(flow, storage)
print("access_token: {}".format(credentials.access_token))
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(credentials.access_token)}
raw_url = 'https://www.googleapis.com/urlshortener/v1/url/history'
url = raw_url + '?key={}'.format(settings.API_KEY)
r = requests.get(url=url, headers=headers)
if r.ok:
output = "The history is {}.".format(r.json())
print(output)
if 'nextPageToken' in r.json().keys():
morePages = True
npt = r.json()['nextPageToken']
r_paged = None
while morePages:
time.sleep(2)
url = raw_url + '?pagetoken={}&key={}'.format(npt, settings.API_KEY)
r_paged = requests.get(url=url, headers=headers)
if r_paged.ok:
if 'nextPageToken' in r_paged.json().keys():
npt = r_paged.json()['nextPageToken']
morePages = True
else:
morePages = False
break
output = "The history is {}.".format(r_paged.json())
print(output)
else:
output = "Invalid request. Status code = {}, json = {}".format(r_paged.status_code, r_paged.json())
print(output)
else:
output = "Invalid request. Status code = {}, json = {}".format(r.status_code, r.json())
print(output)
Fixed code follows:
# New import:
import urllib.parse
# // snip
time.sleep(2)
f = {'start-token':npt, 'key': settings.API_KEY}
formatted = '?' + urllib.parse.urlencode(f)
url = raw_url + formatted
r_paged = requests.get(url=url, headers=headers)
# // snip
Basically, ignore the documentation. Do NOT use pageToken, use start-token. Furthermore, you need to use the url parser suitable for Python 3 for urlencoding.
I'm trying to send a HTTP POST request with Python. I can get it to work with 3.0, but I couldn't find a good example on 2.7.
hdr = {"content-type": "application/json"}
payload= ("<html><body><h1>Sorry it's not Friday yet</h1> </body></html>")
r = requests.post("http://my-url/api/Html", json={"HTML": payload})
with open ('c:/temp/a.pdf', 'wb') as f:
b64str = json.loads(r.text)['BinaryData'] #base 64 string is in BinaryData attr
binStr = binascii.a2b_base64(b64str) #convert base64 string to binary
f.write(binStr)
The api takes a json in this format:
{
HTML : "a html string"
}
and returns a json in this format:
{
BinaryData: 'base64 encoded string'
}
In Python 2.x it should be like this
import json
import httplib
body =("<html><body><h1>Sorry it's not Friday yet</h1> </body></html>")
payload = {'HTML' : body}
hdr = {"content-type": "application/json"}
conn = httplib.HTTPConnection('my-url')
conn.request('POST', '/api/Html', json.dumps(payload), hdr)
response = conn.getresponse()
data = response.read() # same as r.text in 3.x
The standard way is with the urllib2 module
from urllib import urlencode
import urllib2
def http_post(url, data):
post = urlencode(data)
req = urllib2.Request(url, post)
response = urllib2.urlopen(req)
return response.read()
I am trying to convert the following Java code to Python. Not sure what I am doing wrong, but I end up with an internal server error 500 with python.
Is the "body" in httplib.httpConnection method equivalent to Java httpentity?
Any other thoughts on what could be wrong?
The input information I collect is correct for sure.
Any help will be really appreciated. I have tried several things, but end up with the same internal server error.
Java Code:
HttpEntity reqEntitiy = new StringEntity("loginTicket="+ticket);
HttpRequestBase request = reMethod.getRequest(uri, reqEntitiy);
request.addHeader("ticket", ticket);
HttpResponse response = httpclient.execute(request);
HttpEntity responseEntity = response.getEntity();
StatusLine responseStatus = response.getStatusLine();
Python code:
url = serverURL + "resources/slmservices/templates/"+templateId+"/options"
#Create the request
ticket = ticket.replace("'",'"')
headers = {"ticket":ticket}
print "ticket",ticket
reqEntity = "loginTicket="+ticket
body = "loginTicket="+ticket
url2 = urlparse.urlparse(serverURL)
h1 = httplib.HTTPConnection(url2.hostname,8580)
print "h1",h1
url3 = urlparse.urlparse(url)
print "url path",url3.path
ubody = {"loginTicket":ticket}
data = urllib.urlencode(ubody)
conn = h1.request("GET",url3.path,data,headers)
#conn = h1.request("GET",url3.path)
response = h1.getresponse()
lines = response.read()
print "response.status",response.status
print "response.reason",response.reason
You don't need to go this low level. Using urllib2 instead:
import urllib2
from urllib import urlencode
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
url = '{}?{}'.format(url, urlencode(params))
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print 'Status', response.getcode()
print 'Response data', response.read()
Note that the parameters are added to the URL to form URL query parameters.
You can do this simpler still by installing the requests library:
import requests
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
response = requests.get(url, params=params, headers=headers)
print 'Status', response.status
print 'Response data', response.content # or response.text for Unicode
Here requests takes care of URL-encoding the URL query string parameters and adding it to the URL for you, just like Java does.