Error 400 to limit number in Openweathermap API - python

I'm trying to use Openweathermap and I'm using their docs to get the calls.
I don't know why but I get always this error:
{'cod': '400', 'message': '{limit} is not a number'}
This is my code:
import requests
import json
API_KEY = "49edcdeb737a08b5371c42f85fb4ce3d"
weather_url = "http://api.openweathermap.org/geo/1.0/direct?q={city_name},{country_code}&limit={limit}&appid="
final_url = weather_url + API_KEY
limit = "1"
city_name = "Brindisi"
country_code = "3166"
weather_data = requests.get(final_url).json()
print(weather_data)

You are not replacing the query parameters with actual values. q={city_name},{country_code}&limit={limit} is hard coded in the url and is invalid.
You can use the F-string in python to replace placeholder value with actual value.
limit = "1"
city_name = "Brindisi"
country_code = "3166"
weather_url = f"http://api.openweathermap.org/geo/1.0/direct?q={city_name},{country_code}&limit={limit}&appid="
final_url = weather_url + API_KEY

Related

Python API script

I am making a python script using API of a free test automation website called TestProject.
Link to their API: https://api.testproject.io/docs/v2/
Basically what i want to do is grab pdf of reports of all tests and save them somewhere.
But to make the GET request to do that i first need projectID and jobID which i already wrote functions getting them and saving them in the array.
But now i have a problem where its looping through both lists and not using correct projectID and jobID and its throwing errors because it does not exist.
So what i need is something to check if jobID is in projectID so that way i can make a GET request to get all the executionID's to get the PDF of the report.
I am kinda new to programming so i would love any help i can get. If anyone has any better solutions please feel free to let me know.
My script:
import requests
import json
import csv
from datetime import datetime
from jsonpath_ng import jsonpath, parse
API_key = 'api_key'
headers = {'Authorization':'{}'.format(API_key)}
list_projectId = []
list_jobId = []
list_executionId = []
ParseData_projectId = parse('$..id')
ParseData_jobId = parse('$..id')
ParseData_executionId = parse('$..id')
def parsing (response,ParseData,list_data):
# parses data and appends it to the list
Data = json.loads(response)
Parsaj = ParseData
Podatki = Parsaj.find(Data)
for i in range(0, len(Podatki)):
vrednost = Podatki[i].value
list_data.append(vrednost)
def projectId():
# gets all projectId's and saves them in list_projectId
url = 'https://api.testproject.io/v2/projects?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_projectId,list_projectId)
def jobId():
# gets all jobId's and saves them in list_jobId
for i in range(0, len(list_projectId)):
id = list_projectId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(id) + '/jobs?onlyScheduled=false&_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_jobId,list_jobId)
def executionId():
# Their API link:
# https://api.testproject.io/v2/projects/{projectId}/jobs/{jobId}/reports?_start=0
# the for loop below does not work here is where i need the help:
for i in range(0, len(list_projectId)):
project_id = list_projectId[i]
job_id = list_jobId[i]
url = 'https://api.testproject.io/v2/projects/{}'.format(project_id) + '/jobs/{}'.format(job_id) + '/reports?_start=0'
response = requests.get(url,headers=headers)
response_json = response.json()
converted = json.dumps(response_json)
parsing(converted,ParseData_executionId,list_executionId)
projectId()
print("----------LIST PROJECT ID: ----------")
print(list_projectId)
print("")
jobId()
print("----------LIST JOB ID: ----------")
print(list_jobId)
executionId()
print("----------LIST EXECUTION ID: ----------")
print(list_executionId)
you have to use 'in' operator to check the value exist in the list data structure.

FTX get account info using Python gives Not logged in error

I tried to connect to FTX using Python to get account information but it keeps giving me not logged in error; my code shown below:
from requests import Request, Session
import time
import hmac
method = 'GET'
ENDPOINT = 'https://ftx.com/api/'
# ENDPOINT = 'https://ftx.us/api/'
path = 'account'
API_KEY = '...'
API_SECRET = '...'
request = Request(method, ENDPOINT + path)
ts = int(time.time() * 1000)
prepared = request.prepare()
print('{0}{1}{2}'.format(ts, prepared.method, prepared.path_url))
signature_payload = '{0}{1}{2}'.format(ts, prepared.method, prepared.path_url).encode()
if prepared.body:
signature_payload += prepared.body
signature = hmac.new(API_SECRET.encode(), signature_payload, 'sha256').hexdigest()
request.headers['FTX-KEY'] = API_KEY
request.headers['FTX-SIGN'] = signature
request.headers['FTX-TS'] = str(ts)
# get data...
request1 = request.prepare()
print('{0}{1}{2}'.format(ts, request1.method, request1.path_url))
response = Session().send(request1)
data = response.json()
the data gives me:
{'success': False, 'error': 'Not logged in'}
I wonder where I did wrong? I locate in US is this might cause issue? and my account hasn't traded anything yet not sure if that's the issue.
fwiw, it's been a while and I haven't done much more work on this.. but I believe it's because of the location.. if the account is registered under FTX international this should work...
Please change headers to the following and url to ftx.us/api
request.headers['FTXUS-KEY'] = API_KEY
request.headers['FTXUS-SIGN'] = signature
request.headers['FTXUS-TS'] = str(ts)

Search on Splunk via Python SDK

I'm trying to run simple search via Python SDK (Python 3.8.5, splunk-sdk 1.6.14). Examples that are presented on dev.splunk.com are clear but something goes wrong when I run search with my own parameters
The code is as simple as this
search_kwargs_params = {
"exec_mode": "blocking",
"earliest_time": "2020-09-04T06:57:00.000-00:00",
"latest_time": "2020-11-08T07:00:00.000-00:00",
}
search_query = 'search index=qwe1 trace=111-aaa-222 action=Event.OpenCase'
job = self.service.jobs.create(search_query, **search_kwargs_params)
for result in results.ResultsReader(job.results()):
print(result)
But search returns no results. When I run same query manually in Splunk web GUI it works fine.
I've also tried to put all parameters in 'search_kwargs_params' dictionary, widened search time period and got some search results but they seem to be inappropriate to what I got in GUI.
Can someone advise?
This worked for me. You may also try this:
import requests
import time
import json
scheme = 'https'
host = '<your host>'
username = '<your username>'
password = '<your password>'
unique_id = '2021-03-22T18-43-00' #You may give any unique identifier here
search_query = 'search <your splunk query>'
post_data = { 'id' : unique_id,
'search' : search_query,
'earliest_time' : '1',
'latest_time' : 'now',
}
#'earliest_time' : '1', 'latest_time' : 'now'
#This will run the search query for all time
splunk_search_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs'.format(username)
resp = requests.post(splunk_search_base_url, data = post_data, verify = False, auth =
(username, password))
print(resp.text)
is_job_completed = ''
while(is_job_completed != 'DONE'):
time.sleep(5)
get_data = {'output_mode' : 'json'}
job_status_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs/{}'.format(username, unique_id)
resp_job_status = requests.post(job_status_base_url, data = get_data, verify =
False, auth = (username, password))
resp_job_status_data = resp_job_status.json()
is_job_completed = resp_job_status_data['entry'][0]['content']['dispatchState']
print("Current job status is {}".format(is_job_completed))
splunk_summary_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs/{}/results?count=0'.format(username, unique_id)
splunk_summary_results = requests.get(splunk_summary_base_url, data = get_data, verify
= False, auth = (username, password))
splunk_summary_data = splunk_summary_results.json()
#Print the results in python format (strings will be in single quotes)
for data in splunk_summary_data['results']:
print(data)
print('status code...')
print(splunk_summary_results.status_code)
print('raise for status...')
print(splunk_summary_results.raise_for_status())
print('Results as JSON : ')
#Print the results in valid JSON format (Strings will be in double quotes)
#To get complete json data:
print(json.dumps(splunk_summary_data))
#To get only the relevant json data:
print(json.dumps(splunk_summary_data['results']))
Cheers!
You may also like to have a look at this very handy tutorial. https://www.youtube.com/watch?v=mmTzzp2ldgU

How to return unique values from Google Places API?

I'm making a request from Google Places API with a query "Hotels in Brazil", with the following code:
# url variable store url
url = "https://maps.googleapis.com/maps/api/place/textsearch/json?"
# The text string on which to search
query = input('Search query: ')
# get method of requests module
# return response object
r = requests.get(url + 'query=' + query +
'&key=' + api_key +
'&maxResults=1000')
# json method of response object convert
x = r.json()
# store the value of result key in variable y
y = x['results']
#Getting information from other pages with "next_page_token" param
places=[]
params = {
'query': query,
'key' : api_key,
'maxResults': '1000'
}
while "next_page_token" in x:
params['pageToken'] = x['next_page_token'],
res = requests.get(url, params = params)
z = json.loads(res.content)
places.extend(z['results'])
print(places)
But it's taking too long to return the results ( more than 1 day). So i stopped the execution and when i printed it, i got many repeated names, for an example, i got 16K places but only 26 unique places.
Is it possible to return unique values from Google Places API?

Cannot get DELETE working with liburl2 with python for REST api

Okay so I'm using code very similar to this (https://gist.github.com/metadaddy-sfdc/1374762)
to get authentication token and do simple query's using the libur2 for the rest api in python for a sales force database, but when I tried to follow the instructions which were given in this answer How to make HTTP DELETE method using urllib2?,
I cannot get it to work so that I can use delete, both codes use liburl but they seem to be in different format, so that I don't know how to apply the solution offered on stack exchange, to my code, as you can tell I am a beginner so any help would be greatly appreciated
edit:
here is the code I'm using with keys/passwords blanked
import urllib
import urllib2
import json
import pprint
import re
import subprocess
def authorise():
consumer_key = '**********************'
consumer_secret = '**************'
username = '***********'
password = '*****************'
login_server = 'https://login.salesforce.com'
token_url = login_server+'/services/oauth2/token'
params = urllib.urlencode({
'grant_type': 'password',
'client_id': consumer_key,
'client_secret': consumer_secret,
'username': username,
'password': password
})
data = urllib2.urlopen(token_url, params).read()
oauth = json.loads(data)
return oauth
def country_id_query(params):
query_url = oauth['instance_url']+'/services/data/v23.0/query?%s' % params
headers = {
'Authorization': 'OAuth '+oauth['access_token']
}
req = urllib2.Request(query_url, None, headers)
data = urllib2.urlopen(req).read()
result = json.loads(data)
id = result['records'][0]['Id']
return id
oauth = authorise()
token = oauth['access_token']
print "\ntoken is = " + token
params = urllib.urlencode({
'q': 'SELECT id from Country__c WHERE name = \'A New Found Land\''
})
id = country_id_query(params)
print "\nCountry id is "+id + "\n"
I am looking to find out what I need to add to this to get DELETE working
Okay, found the solution to above for anyone with a similar problem:
def delete_country(id):
query_url = oauth['instance_url']+'/services/data/v23.0/sobjects/Country__c/%s' % id + '/'
headers = {
'Authorization': 'OAuth '+oauth['access_token']
}
opener = urllib2.build_opener(urllib2.HTTPHandler)
req = urllib2.Request(query_url, None, headers)
req.get_method = lambda: 'DELETE' # creates the delete method
url = urllib2.urlopen(req) # deletes database item

Categories