I can't seem to get the last recorded price from a website API using JSON. I tried finding the error but it seems okay to me. The code is in python
This is the Url that I have to GET: https://api.independentreserve.com/Public/GetMarketSummary?primaryCurrencyCode=xbt&secondaryCurrencyCode=aud
Python 3.7
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, '&secondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
here is the fixed code
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, 'SecondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
the problem is in the PARAMS dict. you need to change "&secondaryCurrencyCode" to "SecondaryCurrencyCode".
if you had printed the data dict, you would see this:
{'Message': 'Secondary Currency Code is required'}
Removing & in "&secondaryCurrencyCode" will fix the issue.
Fixed code below:
import requests
URL = "https://api.independentreserve.com/Public/GetMarketSummary?"
CurrencyCode = "xbt"
SecondaryCode = "aud"
PARAMS = {'primaryCurrencyCode': CurrencyCode, 'secondaryCurrencyCode': SecondaryCode}
r = requests.get(url=URL, params=PARAMS)
data = r.json()
lastprice = data['LastPrice']
print("Last Price:%s" % lastprice)
API is expecting secondaryCurrencyCode not &secondaryCurrencyCode.
You don't need & sign when you use params.
Related
When I run the code it's giving me this syntax error:
requests.exceptions.MissingSchema: Invalid URL 'h': No scheme supplied. Perhaps you meant http://h?
Here is the code I am working with:
from yelp_api_key import YELP_KEY
from yelp_api_location import loc_ids
MY_API_KEY = YELP_KEY
BUSINESS_PATH = f'https://api.yelp.com/v3/businesses/{loc_ids}/reviews'
HEADERS = {'Authorization': 'bearer %s' % MY_API_KEY}
PARAMETERS = {'locale': 'en_US'
}
for links in BUSINESS_PATH:
response = requests.get (url=links,
params=PARAMETERS,
headers=HEADERS)
business_data = response.json()
data = business_data['reviews']
print(data)
for x in data:
quotes = (x['text'])
print(quotes)
Below is the code that is working for me. I just want to be able to call multiple APIs without having to list the endpoints every time. Any suggestions would be great, TIA!
MY_API_KEY = YELP_KEY
BUSINESS_PATH = [f'https://api.yelp.com/v3/businesses/eL4d1tHv1mFoepoS_3rGbw/reviews',
f'https://api.yelp.com/v3/businesses/RzS-wNTycqB5WA34JfgW0g/reviews',
f'https://api.yelp.com/v3/businesses/PyV1e_OebaWm1cGUwtDvHA/reviews',
f'https://api.yelp.com/v3/businesses/dcbALMl6oyv_fdJ6dZGxzA/reviews',
f'https://api.yelp.com/v3/businesses/4uRA53NIl82a3QeZX-PcRw/reviews']
HEADERS = {'Authorization': 'bearer %s' % MY_API_KEY}
PARAMETERS = {'locale': 'en_US'
}
reviews = []
for links in BUSINESS_PATH:
# file_name = uuid.uuid1 ()
response = requests.get (url=links,
params=PARAMETERS,
headers=HEADERS)
business_data = response.json()
data = business_data['reviews']
for x in data:
quotes = (x['text'])
# print(quotes)
reviews.append(quotes)
I need to pull data from an Odata API. With code below I do receive data, but only 250 rows.
The JSON contains a key called: #odata.nextLink that contains one value, this is the BASE_URL + endpoint + ?$skip=250
How can I loop through the next pages?
import requests
import pandas as pd
import json
BASE_URL = "base_url"
def session_token():
url = BASE_URL + '/api/oauth/token'
headers = {"Accept": "application\json",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
body = {"username":"user",
"password": "pwd",
"grant_type": "password"}
return "Bearer "+ requests.post(url, headers = headers, data = body).json()["access_token"]
def make_request(endpoint, token = session_token()):
headers = {"Authorization": token}
response = requests.get(BASE_URL + endpoint, headers = headers)
if response.status_code == 200:
json_data = json.loads(response.text)
return json_data
make_request("/odata/endpoint")
Following #Marek Piotrowski's advise I modified and came to a solution:
def main():
url = "endpoint"
while True:
if not url:
break
response = make_request("endpoint")
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data["#odata.nextLink"] # Fetch next link
yield json_data['value']
result = pd.concat((json_normalize(row) for row in main()))
print(result) # Final dataframe, works like a charm :)
Something like that would retrieve all records, I believe (assuming there's #odata.nextLink in json_data indeed):
def retrieve_all_records(endpoint, token = session_token()):
all_records = []
headers = {"Authorization": token}
url = BASE_URL + endpoint
while True:
if not url:
break
response = requests.get(url, headers = headers)
if response.status_code == 200:
json_data = json.loads(response.text)
all_records = all_records + json_data['records']
url = json_data['#odata.nextLink']
return all_records
The code is untested, though. Let me know if it works. Alternatively, you could make some recursive call to make_request, I believe, but you'd have to store results somewhere above the function itself then.
I know that this is late, but you could look at this article from Towards Data Science of Ephram Mwai
He pretty solved the problem with a good script.
I want to use an advanced operator to filter my search result. Search result should only contain PDF. I added the advanced operator (filetype: pdf). But seems it not working.
subscription_key = "My_ACCESS_KEY"
assert subscription_key
search_url = "https://api.cognitive.microsoft.com/bing/v7.0/search"
search_term = "NASA"
import requests
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term, "filetype":"pdf", "responseFilter":"Webpages", textDecorations":True, "textFormat":"HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
I could not figure it how to use advanced operator (filetype:pdf) to filter search results.
Could anyone please suggestion me how to use it?.
Thanks
I just tried the example code from their documentation. I added the filetype as url query parameter and seems working.
import requests
subscription_key = "..."
assert subscription_key
search_url = "https://api.cognitive.microsoft.com/bing/v5.0/search"
search_term = "Machine%20Learning&filetype=pdf"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": search_term}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
print(search_results)
I am trying to get the geolocation data from the following website: https://www.findlatitudeandlongitude.com/, however it does not get the desired geolocation for the specific address
from urllib.parse import urlencode
import requests
params = {'address':'Any Address'}
search_url = 'https://www.findlatitudeandlongitude.com/?'
url = search_url + urlencode(params)
r = requests.get(url)
r.content
I also tried using requests.post but it does not give the correct geolocation data
import requests
url = "https://www.findlatitudeandlongitude.com/"
values = {'address':'Any Address'}
r = requests.post(url, params = values)
r.content
I am trying to convert the following Java code to Python. Not sure what I am doing wrong, but I end up with an internal server error 500 with python.
Is the "body" in httplib.httpConnection method equivalent to Java httpentity?
Any other thoughts on what could be wrong?
The input information I collect is correct for sure.
Any help will be really appreciated. I have tried several things, but end up with the same internal server error.
Java Code:
HttpEntity reqEntitiy = new StringEntity("loginTicket="+ticket);
HttpRequestBase request = reMethod.getRequest(uri, reqEntitiy);
request.addHeader("ticket", ticket);
HttpResponse response = httpclient.execute(request);
HttpEntity responseEntity = response.getEntity();
StatusLine responseStatus = response.getStatusLine();
Python code:
url = serverURL + "resources/slmservices/templates/"+templateId+"/options"
#Create the request
ticket = ticket.replace("'",'"')
headers = {"ticket":ticket}
print "ticket",ticket
reqEntity = "loginTicket="+ticket
body = "loginTicket="+ticket
url2 = urlparse.urlparse(serverURL)
h1 = httplib.HTTPConnection(url2.hostname,8580)
print "h1",h1
url3 = urlparse.urlparse(url)
print "url path",url3.path
ubody = {"loginTicket":ticket}
data = urllib.urlencode(ubody)
conn = h1.request("GET",url3.path,data,headers)
#conn = h1.request("GET",url3.path)
response = h1.getresponse()
lines = response.read()
print "response.status",response.status
print "response.reason",response.reason
You don't need to go this low level. Using urllib2 instead:
import urllib2
from urllib import urlencode
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
url = '{}?{}'.format(url, urlencode(params))
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print 'Status', response.getcode()
print 'Response data', response.read()
Note that the parameters are added to the URL to form URL query parameters.
You can do this simpler still by installing the requests library:
import requests
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
response = requests.get(url, params=params, headers=headers)
print 'Status', response.status
print 'Response data', response.content # or response.text for Unicode
Here requests takes care of URL-encoding the URL query string parameters and adding it to the URL for you, just like Java does.