Equivalent Python code for the following Java http get requests - python

I am trying to convert the following Java code to Python. Not sure what I am doing wrong, but I end up with an internal server error 500 with python.
Is the "body" in httplib.httpConnection method equivalent to Java httpentity?
Any other thoughts on what could be wrong?
The input information I collect is correct for sure.
Any help will be really appreciated. I have tried several things, but end up with the same internal server error.
Java Code:
HttpEntity reqEntitiy = new StringEntity("loginTicket="+ticket);
HttpRequestBase request = reMethod.getRequest(uri, reqEntitiy);
request.addHeader("ticket", ticket);
HttpResponse response = httpclient.execute(request);
HttpEntity responseEntity = response.getEntity();
StatusLine responseStatus = response.getStatusLine();
Python code:
url = serverURL + "resources/slmservices/templates/"+templateId+"/options"
#Create the request
ticket = ticket.replace("'",'"')
headers = {"ticket":ticket}
print "ticket",ticket
reqEntity = "loginTicket="+ticket
body = "loginTicket="+ticket
url2 = urlparse.urlparse(serverURL)
h1 = httplib.HTTPConnection(url2.hostname,8580)
print "h1",h1
url3 = urlparse.urlparse(url)
print "url path",url3.path
ubody = {"loginTicket":ticket}
data = urllib.urlencode(ubody)
conn = h1.request("GET",url3.path,data,headers)
#conn = h1.request("GET",url3.path)
response = h1.getresponse()
lines = response.read()
print "response.status",response.status
print "response.reason",response.reason

You don't need to go this low level. Using urllib2 instead:
import urllib2
from urllib import urlencode
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
url = '{}?{}'.format(url, urlencode(params))
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print 'Status', response.getcode()
print 'Response data', response.read()
Note that the parameters are added to the URL to form URL query parameters.
You can do this simpler still by installing the requests library:
import requests
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
response = requests.get(url, params=params, headers=headers)
print 'Status', response.status
print 'Response data', response.content # or response.text for Unicode
Here requests takes care of URL-encoding the URL query string parameters and adding it to the URL for you, just like Java does.

Related

trying to make an Yelp API call with a list of business IDs

When I run the code it's giving me this syntax error:
requests.exceptions.MissingSchema: Invalid URL 'h': No scheme supplied. Perhaps you meant http://h?
Here is the code I am working with:
from yelp_api_key import YELP_KEY
from yelp_api_location import loc_ids
MY_API_KEY = YELP_KEY
BUSINESS_PATH = f'https://api.yelp.com/v3/businesses/{loc_ids}/reviews'
HEADERS = {'Authorization': 'bearer %s' % MY_API_KEY}
PARAMETERS = {'locale': 'en_US'
}
for links in BUSINESS_PATH:
response = requests.get (url=links,
params=PARAMETERS,
headers=HEADERS)
business_data = response.json()
data = business_data['reviews']
print(data)
for x in data:
quotes = (x['text'])
print(quotes)
Below is the code that is working for me. I just want to be able to call multiple APIs without having to list the endpoints every time. Any suggestions would be great, TIA!
MY_API_KEY = YELP_KEY
BUSINESS_PATH = [f'https://api.yelp.com/v3/businesses/eL4d1tHv1mFoepoS_3rGbw/reviews',
f'https://api.yelp.com/v3/businesses/RzS-wNTycqB5WA34JfgW0g/reviews',
f'https://api.yelp.com/v3/businesses/PyV1e_OebaWm1cGUwtDvHA/reviews',
f'https://api.yelp.com/v3/businesses/dcbALMl6oyv_fdJ6dZGxzA/reviews',
f'https://api.yelp.com/v3/businesses/4uRA53NIl82a3QeZX-PcRw/reviews']
HEADERS = {'Authorization': 'bearer %s' % MY_API_KEY}
PARAMETERS = {'locale': 'en_US'
}
reviews = []
for links in BUSINESS_PATH:
# file_name = uuid.uuid1 ()
response = requests.get (url=links,
params=PARAMETERS,
headers=HEADERS)
business_data = response.json()
data = business_data['reviews']
for x in data:
quotes = (x['text'])
# print(quotes)
reviews.append(quotes)

Bing api Advanced operator for web search using python

I want to use an advanced operator to filter my search result. Search result should only contain PDF. I added the advanced operator (filetype: pdf). But seems it not working.
subscription_key = "My_ACCESS_KEY"
assert subscription_key
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/search"
search_term = "NASA"
import requests
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term, "filetype":"pdf", "responseFilter":"Webpages", textDecorations":True, "textFormat":"HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
I could not figure it how to use advanced operator (filetype:pdf) to filter search results.
Could anyone please suggestion me how to use it?.
Thanks
I just tried the example code from their documentation. I added the filetype as url query parameter and seems working.
import requests
subscription_key = "..."
assert subscription_key
search_url = "https://api.cognitive.microsoft.com/bing/v5.0/search"
search_term = "Machine%20Learning&filetype=pdf"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
print(search_results)

PYTHON: requests and response 401

I have a little problem with authentication. I am writting a script, which is getting login and password from user(input from keyboard) and then I want to get some data from the website(http not https), but every time I run the script the response is 401.I read some similar posts from stack and I tried this solutions:
Solution 1
c = HTTPConnection("somewebsite")
userAndPass = b64encode(b"username:password").decode("ascii")
headers = { 'Authorization' : 'Basic %s' % userAndPass }
c.request('GET', '/', headers=headers)
res = c.getresponse()
data = res.read()
Solution 2
with requests.Session() as c:
url = 'somewebsite'
USERNAME = 'username'
PASSWORD = 'password'
c.get(url)
login_data = dict(username = USERNAME, password = PASSWORD)
c.post(url,data = login_data)
page = c.get('somewebsite', headers = {"Referer": "somwebsite"})
print(page)
Solution 3
www = 'somewebsite'
value ={'filter':'somefilter'}
data = urllib.parse.urlencode(value)
data=data.encode('utf-8')
req = urllib.request.Request(www,data)
resp = urllib.request.urlopen(req)
respData = resp.read()
print(respData)
x = urllib.request.urlopen(www,"username","password")
print(x.read())'
I don't know how to solve this problem. Can somebody give me some link or tip ?
Have you tried the Basic Authentication example from requests?
>>> from requests.auth import HTTPBasicAuth
>>> requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
<Response [200]>
Can I know what type of authentication on the website?
this is an official Basic Auth example (http://docs.python-requests.org/en/master/user/advanced/#http-verbs)
from requests.auth import HTTPBasicAuth
auth = HTTPBasicAuth('fake#example.com', 'not_a_real_password')
r = requests.post(url=url, data=body, auth=auth)
print(r.status_code)
To use api with authentication, we need to have token_id or app_id that will provide the access for our request. Below is an example how we can formulate the url and get the response:
strong text
import requests
city = input()
api_call = "http://api.openweathermap.org/data/2.5/weather?"
app_id = "892d5406f4811786e2b80a823c78f466"
req_url = api_call + "q=" + city + "&appid=" + app_id
response = requests.get(req_url)
data = response.json()
if (data["cod"] == 200):
hum = data["main"]["humidity"]
print("Humidity is % d " %(hum))
elif data["cod"] != 200:
print("Error occurred : " ,data["cod"], data["message"])

Trying to get json data from API, got TypeError

TypeError: POST data should be bytes, an iterable of bytes, or a file object. It cannot be of type str.
import json
import urllib.request as req
from urllib.parse import urlencode
url = "https://apiurl.example/search/"
payload = {"SearchString":"mysearch"}
response = req.urlopen(url, urlencode(payload))
data = response.read()
print(data.decode("utf-8"))
What am I doing wrong? There is nothing wrong with the url or "payload" as i tried it in the API's online interface. Before I added the urlencode and utf-8 decode I got an error saying: "TypeError: can't concat str to bytes". At some point it returned an empty list, but don't remember what I did then. Anyway it should return some data as mentioned. Thanks for your time.
I've never used requests that way. Here's an example of how I've done it, checking the result code and decoding the JSON if it was successful:
import json
import requests
action_url = "https://apiurl.example/search/"
# Prepare the headers
header_dict = {}
header_dict['Content-Type'] = 'application/json'
# make the URL request
result = requests.get(action_url, headers=header_dict)
status_code = result.status_code
if (status_code == requests.codes.ok):
records = json.loads(result.content)
print 'Success. Records:'
print records
else:
print 'ERROR. Status: {0}'.format(status_code)
print 'headers: {0}'.format(header_dict)
print 'action_url: {0}'.format(action_url)
# Show the error messages.
print result.text
I found out now.
import urllib.request
import urllib.parse
url = "https://apiurl.example/search"
search_input = input("Search ")
payload = {"SearchString":search_input}
params = urllib.parse.urlencode(payload)
params = params.encode('utf-8')
f = urllib.request.urlopen(url, params)
output = f.read()
print(output)

Google URL Shortener API always returns same page with pagetoken

I am attempting to use the Google UrlShortener API to retrieve history with OAuth2 and an API key. I am getting a 200 OK response but when I try and get subsequent pages using pagetoken or pageToken as a query parameter I always get the same nextPageToken and the same page of results. Oddly, the browser based Google API interaction uses start-token not pagetoken or pageToken but when I use start-token I don't get a 200 OK.
How do I get pagination to work with the UrlShortener API?
Here is my code:
import requests
import json
import time
import settings
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from oauth2client.file import Storage
def history():
"""Look up a user's history"""
flow = OAuth2WebServerFlow(client_id=settings.OAUTH2_CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope='https://www.googleapis.com/auth/urlshortener',
redirect_uri='http://127.0.0.1:5000/callback')
storage = Storage('creds.data')
credentials = run_flow(flow, storage)
print("access_token: {}".format(credentials.access_token))
headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer {}'.format(credentials.access_token)}
raw_url = 'https://www.googleapis.com/urlshortener/v1/url/history'
url = raw_url + '?key={}'.format(settings.API_KEY)
r = requests.get(url=url, headers=headers)
if r.ok:
output = "The history is {}.".format(r.json())
print(output)
if 'nextPageToken' in r.json().keys():
morePages = True
npt = r.json()['nextPageToken']
r_paged = None
while morePages:
time.sleep(2)
url = raw_url + '?pagetoken={}&key={}'.format(npt, settings.API_KEY)
r_paged = requests.get(url=url, headers=headers)
if r_paged.ok:
if 'nextPageToken' in r_paged.json().keys():
npt = r_paged.json()['nextPageToken']
morePages = True
else:
morePages = False
break
output = "The history is {}.".format(r_paged.json())
print(output)
else:
output = "Invalid request. Status code = {}, json = {}".format(r_paged.status_code, r_paged.json())
print(output)
else:
output = "Invalid request. Status code = {}, json = {}".format(r.status_code, r.json())
print(output)
Fixed code follows:
# New import:
import urllib.parse
# // snip
time.sleep(2)
f = {'start-token':npt, 'key': settings.API_KEY}
formatted = '?' + urllib.parse.urlencode(f)
url = raw_url + formatted
r_paged = requests.get(url=url, headers=headers)
# // snip
Basically, ignore the documentation. Do NOT use pageToken, use start-token. Furthermore, you need to use the url parser suitable for Python 3 for urlencoding.

Categories