Timestring passed into URL to output JSON file - Python API Call - python

I'm getting the following error for my python scraper:
import requests
import json
symbol_id = 'COINBASE_SPOT_BTC_USDT'
time_start = '2022-11-20T17:00:00'
time_end = '2022-11-21T05:00:00'
limit_levels = 100000000
limit = 100000000
url = 'https://rest.coinapi.io/v1/orderbooks/{symbol_id}/history?time_start={time_start}limit={limit}&limit_levels={limit_levels}'
headers = {'X-CoinAPI-Key' : 'XXXXXXXXXXXXXXXXXXXXXXX'}
response = requests.get(url, headers=headers)
print(response)
with open('raw_coinbase_ob_history.json', 'w') as json_file:
json.dump(response.json(), json_file)
with open('raw_coinbase_ob_history.json', 'r') as handle:
parsed = json.load(handle)
with open('coinbase_ob_history.json', 'w') as coinbase_ob:
json.dump(parsed, coinbase_ob, indent = 4)
<Response [400]>
And in my written json file, I'm outputted
{"error": "Wrong format of 'time_start' parameter."}
I assume a string goes into a url, so I flattened the timestring to a string. I don't understand why this doesn't work. This is the documentation for the coinAPI call I'm trying to make with 'timestring'. https://docs.coinapi.io/?python#historical-data-get-4

Incorrect syntax for python. To concatenate strings, stick them together like such:
a = 'a' + 'b' + 'c'

string formatting is invalid, and also need use & in between different url params
# python3
url = f"https://rest.coinapi.io/v1/orderbooks/{symbol_id}/history?time_start={time_start}&limit={limit}&limit_levels={limit_levels}"
# python 2
url = "https://rest.coinapi.io/v1/orderbooks/{symbol_id}/history?time_start={time_start}&limit={limit}&limit_levels={limit_levels}".format(symbol_id=symbol_id, time_start=time_start, limit=limit, limit_levels=limit_levels)
https://docs.python.org/3/tutorial/inputoutput.html
https://docs.python.org/2/tutorial/inputoutput.html

Related

How to add strings in the file at the end of all lines

I am trying to download files using python and then add lines at the end of the downloaded files, but it returns an error:
f.write(data + """<auth-user-pass>
TypeError: can't concat str to bytes
Edit: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
I also tried something like this but it also did not work: f.write(str(data) + "< auth-user-pass >")
here is my full code:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
for r in results:
print(r)
EDIT: Thanks, it works now when I do this b"""< auth-user-pass >""", but I only want to add the string at the end of the file. When I run the code, it adds the string for every line.
Try this:
import requests
from multiprocessing.pool import ThreadPool
def download_url(url):
print("downloading: ", url)
# assumes that the last segment after the / represents the file name
# if url is abc/xyz/file.txt, the file name will be file.txt
file_name_start_pos = url.rfind("/") + 1
file_name = url[file_name_start_pos:]
save_path = 'ovpns/'
complete_path = os.path.join(save_path, file_name)
print(complete_path)
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(complete_path, 'wb') as f:
for data in r:
f.write(data)
return url
servers = [
"us-ca72.nordvpn.com",
"us-ca73.nordvpn.com"
]
urls = []
for server in servers:
urls.append("https://downloads.nordcdn.com/configs/files/ovpn_legacy/servers/" + server + ".udp1194.ovpn")
# Run 5 multiple threads. Each call will take the next element in urls list
results = ThreadPool(5).imap_unordered(download_url, urls)
with open(complete_path, 'ab') as f:
f.write(b"""<auth-user-pass>
username
password
</auth-user-pass>""")
for r in results:
print(r)
You are using binary mode, encode your string before concat, that is replace
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""")
using
for data in r:
f.write(data + """<auth-user-pass>
username
password
</auth-user-pass>""".encode())
You open the file as a write in binary.
Because of that you cant use normal strings like the comment from #user56700 said.
You either need to convert the string or open it another way(ex. 'a' = appending).
Im not completly sure but it is also possible that the write binary variant of open the data of the file deletes. Normally open with write deletes existing data, so its quite possible that you need to change it to 'rwb'.

Input CSV file of lat and long coordinates into API to extract the weather data?

Here is my code below where I used long and lat coordinates in locations variable and attached it to the URL via coordinates_str. SInce I have CSV file which has latitude and longitude coordinates of around many locations and then call that CSV file as a input to this API(that needs authentication).
How do I input CSV file into this code instead of locations variable?
import requests
import pprint
locations = [(13.84, -12.57), (12.21, -14.69)]
coordinates_str = ','.join(map(lambda a: ' '.join(f'{f:.3f}' for f in a), locations))
# Replace "poi-settings" with the endpoint you would like to call.
URL = f'https://ubiconnect-eu.ubimet.com:8090/pinpoint-data?coordinates={coordinates_str}'
TOKEN = 'TOKEN KEY'
# Create session object that can be used for all requests.
session = requests.Session()
session.headers['Authorization'] = 'Token {token}'.format(token=TOKEN)
# Send GET request to UBIconnect.
res = session.get(URL)
res.raise_for_status()
# Decode JSON response.
poi_info = res.json()
pprint.pprint(poi_info, indent=2, compact=True)
Then I tried this way: in place of coordinates_str I did this
import requests
import pprint
import pandas as pd
df = pd.read_csv(r'E:\route_points.csv')
print(df)
# Replace "poi-settings" with the endpoint you would like to call.
URL = f'https://ubiconnect-eu.ubimet.com:8090/pinpoint-data?'
TOKEN = 'API TOKEN'
params= {'coordinates':(df)}
# Create session object that can be used for all requests.
session = requests.Session()
session.headers['Authorization'] = 'Token {token}'.format(token=TOKEN)
# Send GET request to UBIconnect.
res = session.get(URL, params= params)
res.raise_for_status()
# Decode JSON response.
poi_info = res.json()
pprint.pprint(poi_info, indent=2, compact=True)
Still not working.
Format needed to call the API from Documentation is:
# Replace "poi-settings" with the endpoint you would like to call.
URL = 'https://ubiconnect-eu.ubimet.com:8090/poi-settings'
TOKEN = '<YOUR TOKEN GOES HERE>'
so I replaced the poi-settings by pinpoint-data
URL = 'https://ubiconnect-eu.ubimet.com:8090/pinpoint-data?coordinates=longitude<space<latitude'
For Example: I put one coordinate set into API URL
URL = 'https://ubiconnect-eu.ubimet.com:8090/pinpoint-data?coordinates=132.85 12.84'
then with above URL I get the weather data for that location.
If you just want to submit a block of coordinates at a time from your CSV file then something like the following should suffice:
from itertools import islice
import requests
import pprint
import csv
def grouper(n, iterable):
it = iter(iterable)
return iter(lambda: tuple(islice(it, n)), ())
block_size = 10 # how many pairs to submit per request
TOKEN = 'TOKEN KEY'
# Create session object that can be used for all requests.
session = requests.Session()
session.headers['Authorization'] = 'Token {token}'.format(token=TOKEN)
with open('coordinates.csv', newline='') as f_input:
csv_input = csv.reader(f_input)
header = next(csv_input) # skip the header
for coords in grouper(block_size, csv_input):
coordinates = ','.join(f'{float(long):.3f} {float(lat):.3f}' for long, lat in coords)
print(coordinates)
URL = f'https://ubiconnect-eu.ubimet.com:8090/pinpoint-data?coordinates={coordinates}'
# Send GET request to UBIconnect.
res = session.get(URL)
res.raise_for_status()
# Decode JSON response.
poi_info = res.json()
pprint.pprint(poi_info, indent=2, compact=True)
(obviously this was not tested - no token). Make sure there are no blank lines in your CSV file.
To output to a file add an output file:
with open('coordinates.csv', newline='') as f_input, open('output.json', 'w', encoding='utf-8') as f_output:
and use this in the pprint() call:
pprint.pprint(poi_info, f_output, indent=2, compact=True)
f_output.write('\n') # add blank line if needed
Hope this is what you are looking for
import csv
locations = list()
with open("foo.csv") as csvf:
csvreader = csv.DictReader(csvf)
for row in csvreader:
locations.append((float(row["lat"]), float(row["long"])))
# now add your code
coordinates_str = ','.join(map(lambda a: ' '.join(f'{f:.3f}' for f in a), locations))

Request Status Code 500 when running Python Script

This is what i am suppose to do:
List all files in data/feedback folder
Scan all the files, and make a nested dictionary with Title, Name, Date & Feedback (All the files are in Title,Name, Date & Feedback format with each in a different line of file, that’s why using rstrip function)
Post the dictionary in The given url
Following is my code:
#!/usr/bin/env python3
import os
import os.path
import requests
import json
src = '/data/feedback/'
entries = os.listdir(src)
Title, Name, Date, Feedback = 'Title', 'Name', 'Date', 'Feedback'
inputDict = {}
for i in range(len(entries)):
fileName = entries[i]
completeName = os.path.join(src, fileName)
with open(completeName, 'r') as f:
line = f.readlines ()
line tuple = (line[0],line[1],line[2],line[3])
inputDict[fileName] = {}
inputDict[fileName][Title] = line_tuple[0].rstrip()
inputDict[fileName][Name] = line_tuple[1].rstrip()
inputDict[fileName][Date] = line_tuple[2].rstrip()
inputDict[fileName][Feedback] = line_tuple[3].rstrip()
x = requests.get ("http://website.com/feedback")
print (x.status_code)
r = requests.post ("http://Website.com/feedback” , data=inputDict)
print (r.status_code)
After i run it, get gives 200 code but post gives 500 code.
I just want to know if my script is causing the error or not ?
r = requests.post ("http://Website.com/feedback” , data=inputDict)
If your rest api endpoint is expecting json data then the line above is not doing that; it is sending the dictionary inputDict as form-encoded, as though you were submitting a form on an HTML page.
You can either use the json parameter in the post function, which sets the content-type in the headers to application/json:
r = requests.post ("http://Website.com/feedback", json=inputDict)
or set the header manually:
headers = {'Content-type': 'application/json'}
r = requests.post("http://Website.com/feedback", data=json.dumps(inputDict), headers=headers)

loading json from text file

I am trying to run this code but it creates error.
import json
import requests
import pprint
data = []
with open('data.txt') as o1:
for line in o1:
data.append(json.loads(line))
print(data)
print(" \n")
print(data)
url = 'http://xyz.abcdfx.in/devicedata'
body_json=json.dumps(data)
headers = {'Content-Type':'application/json'}
d = requests.post(url, data = body_json, headers=headers)
pprint.pprint(d.json())
it shows
Value Error: No json object could be Decoded
I am new to programming and not able to figure out what is the problem.
It seems like you are trying to parse the json file line by line, but the json objects may (and usually are) span more than one line. You need to have the entire file in order to parse it:
with open('data.txt') as o1:
data = json.loads(o1.read()) # read ALL the file and parse. no loops
print(data)
i solved my problem using this:
data =[]
with open('data.txt') as f:
for line in f:
data = json.loads(line)
print(data)
url = 'http://xyz.abcdfx.cn/devicedata'
body_json=json.dumps(data)
headers = {'Content-Type':'application/json'}
d = requests.post(url, data = body_json, headers=headers)
pprint.pprint(d.json())

TypeError: expected string or buffer in Google App Engine's Python

I want to show the content of an object using the following code:
def get(self):
url="https://www.googleapis.com/language/translate/v2?key=MY-BILLING-KEY&q=hello&source=en&target=ja"
data = urllib2.urlopen(url)
parse_data = json.load(data)
parsed_data = parse_data['data']['translations']
// This command is ok
self.response.out.write("<br>")
// This command shows above error
self.response.out.write(str(json.loads(parsed_data[u'data'][u'translations'][u'translatedText'])))
But the error
TypeError: expected string or buffer
appears as a result of the line:
self.response.out.write(str(json.loads(parsed_data[u'data'][u'translations'][u'translatedText'])))
or
self.response.out.write(json.loads(parsed_data[u'data'][u'translations'][u'translatedText']))
UPDATE (fix):
I needed to convert from string to JSON object:
# Convert to String
parsed_data = json.dumps(parsed_data)
# Convert to JSON Object
json_object = json.loads(parsed_data)
# Parse JSON Object
translatedObject = json_object[0]['translatedText']
# Output to page, by using HTML
self.response.out.write(translatedObject)
parse_data = json.load(data)
parsed_data = parse_data['data']['translations']
Those lines already did the json.load, and extracted 'data' and 'translations'. Then instead of:
self.response.out.write(str(
json.loads(parsed_data)[u'data'][u'translations'][u'translatedText']))
you should:
self.response.out.write(str(
parsed_data[u'translatedText']))
All I need, is convert from String to JSON object, as the following code :
# Convert to String
parsed_data = json.dumps(parsed_data)
# Convert to JSON Object
json_object = json.loads(parsed_data)
# Parse JSON Object
translatedObject = json_object[0]['translatedText']
# Output to page, by using HTML
self.response.out.write(translatedObject)
The urllib2.urlopen function return a file-like object, not a string. You should read the response first.
url = "http://www.example.com/data"
f = urllib2.urlopen(url)
data = f.read()
print json.loads(data)

Categories