I managed to scrape some data from a dynamic website and my output is in json format with only value, How do I modify this code to get both key and value json format and write into a file using python
import requests
import json
URL='http://tfda.go.tz/portal/en/trader_module/trader_module/getRegisteredDrugs_products'
payload = "draw=1&columns%5B0%5D%5Bdata%5D=no&columns%5B0%5D%5Bname%5D=&columns%5B0%5D%5Bsearchable%5D=True&columns%5B0%5D%5Borderable%5D=True&columns%5B0%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B0%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B1%5D%5Bdata%5D=certificate_no&columns%5B1%5D%5Bname%5D=&columns%5B1%5D%5Bsearchable%5D=True&columns%5B1%5D%5Borderable%5D=True&columns%5B1%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B1%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B2%5D%5Bdata%5D=brand_name&columns%5B2%5D%5Bname%5D=&columns%5B2%5D%5Bsearchable%5D=True&columns%5B2%5D%5Borderable%5D=True&columns%5B2%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B2%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B3%5D%5Bdata%5D=classification_name&columns%5B3%5D%5Bname%5D=&columns%5B3%5D%5Bsearchable%5D=True&columns%5B3%5D%5Borderable%5D=True&columns%5B3%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B3%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B4%5D%5Bdata%5D=common_name&columns%5B4%5D%5Bname%5D=&columns%5B4%5D%5Bsearchable%5D=True&columns%5B4%5D%5Borderable%5D=True&columns%5B4%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B4%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B5%5D%5Bdata%5D=dosage_form&columns%5B5%5D%5Bname%5D=&columns%5B5%5D%5Bsearchable%5D=True&columns%5B5%5D%5Borderable%5D=True&columns%5B5%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B5%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B6%5D%5Bdata%5D=product_strength&columns%5B6%5D%5Bname%5D=&columns%5B6%5D%5Bsearchable%5D=True&columns%5B6%5D%5Borderable%5D=True&columns%5B6%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B6%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B7%5D%5Bdata%5D=registrant&columns%5B7%5D%5Bname%5D=&columns%5B7%5D%5Bsearchable%5D=True&columns%5B7%5D%5Borderable%5D=True&columns%5B7%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B7%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B8%5D%5Bdata%5D=registrant_country&columns%5B8%5D%5Bname%5D=&columns%5B8%5D%5Bsearchable%5D=True&columns%5B8%5D%5Borderable%5D=True&columns%5B8%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B8%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B9%5D%5Bdata%5D=manufacturer&columns%5B9%5D%5Bname%5D=&columns%5B9%5D%5Bsearchable%5D=True&columns%5B9%5D%5Borderable%5D=True&columns%5B9%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B9%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B10%5D%5Bdata%5D=manufacturer_country&columns%5B10%5D%5Bname%5D=&columns%5B10%5D%5Bsearchable%5D=True&columns%5B10%5D%5Borderable%5D=True&columns%5B10%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B10%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B11%5D%5Bdata%5D=expiry_date&columns%5B11%5D%5Bname%5D=&columns%5B11%5D%5Bsearchable%5D=True&columns%5B11%5D%5Borderable%5D=True&columns%5B11%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B11%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B12%5D%5Bdata%5D=id&columns%5B12%5D%5Bname%5D=&columns%5B12%5D%5Bsearchable%5D=True&columns%5B12%5D%5Borderable%5D=True&columns%5B12%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B12%5D%5Bsearch%5D%5Bregex%5D=False&order%5B0%5D%5Bcolumn%5D=0&order%5B0%5D%5Bdir%5D=asc&start=0&length=3911&search%5Bvalue%5D=&search%5Bregex%5D=False"
with requests.Session() as s:
s.headers={"User-Agent":"Mozilla/5.0"}
s.headers.update({'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'})
res = s.post(URL, data = payload)
for data in res.json()['data']:
serial = data['no']
certno = data['certificate_no']
brndname = data['brand_name']
clssification = data['classification_name']
common_name = data['common_name']
dosage_form = data['dosage_form']
expiry_date = data['expiry_date']
manufacturer = data['manufacturer']
manufacturer_country = data['manufacturer_country']
product_strength = data['product_strength']
registrant = data['registrant']
registrant_country = data['registrant_country']
output = (dataserial,certno,brndname,clssification,
common_name,dosage_form,expiry_date,m anufacturer, manufacturer_country,
product_strength,registrant,registrant_country)
data = {'brandname':brndname, 'cerficate_number':certno,'expiry_date':expiry_date,'product_strength':product_strength}
output = json.dumps(data, ensure_ascii=True, sort_keys=True)
with open('drugs.json', 'w') as file:
json.dump(output, file)
file.write('file')
file.close()
Here is the output I managed to get screenshot but an example of what I require is in this format
{
"brand_name":"Supirocin"
"certificate_no":"TAN 00,1820 D01A GLE"
"classification_name":"Human Medicinal Products"
"common_name":"Mupirocin"
"dosage_form":"Ointment"
"expiry_date":"22-06-2018"
"id":"18345"
"manufacturer":"Glenmark Pharmaceuticals Limited"
"manufacturer_country":"INDIA"
"no":"6"
"product_strength":"2 %w/w"
"registrant":"Glenmark Pharmaceuticals Limited"
"registrant_country":"INDIA"
}
Try this to get the exact output you have mentioned in your post:
import requests
import json
URL='http://tfda.go.tz/portal/en/trader_module/trader_module/getRegisteredDrugs_products'
payload = "draw=1&columns%5B0%5D%5Bdata%5D=no&columns%5B0%5D%5Bname%5D=&columns%5B0%5D%5Bsearchable%5D=True&columns%5B0%5D%5Borderable%5D=True&columns%5B0%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B0%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B1%5D%5Bdata%5D=certificate_no&columns%5B1%5D%5Bname%5D=&columns%5B1%5D%5Bsearchable%5D=True&columns%5B1%5D%5Borderable%5D=True&columns%5B1%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B1%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B2%5D%5Bdata%5D=brand_name&columns%5B2%5D%5Bname%5D=&columns%5B2%5D%5Bsearchable%5D=True&columns%5B2%5D%5Borderable%5D=True&columns%5B2%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B2%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B3%5D%5Bdata%5D=classification_name&columns%5B3%5D%5Bname%5D=&columns%5B3%5D%5Bsearchable%5D=True&columns%5B3%5D%5Borderable%5D=True&columns%5B3%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B3%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B4%5D%5Bdata%5D=common_name&columns%5B4%5D%5Bname%5D=&columns%5B4%5D%5Bsearchable%5D=True&columns%5B4%5D%5Borderable%5D=True&columns%5B4%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B4%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B5%5D%5Bdata%5D=dosage_form&columns%5B5%5D%5Bname%5D=&columns%5B5%5D%5Bsearchable%5D=True&columns%5B5%5D%5Borderable%5D=True&columns%5B5%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B5%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B6%5D%5Bdata%5D=product_strength&columns%5B6%5D%5Bname%5D=&columns%5B6%5D%5Bsearchable%5D=True&columns%5B6%5D%5Borderable%5D=True&columns%5B6%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B6%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B7%5D%5Bdata%5D=registrant&columns%5B7%5D%5Bname%5D=&columns%5B7%5D%5Bsearchable%5D=True&columns%5B7%5D%5Borderable%5D=True&columns%5B7%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B7%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B8%5D%5Bdata%5D=registrant_country&columns%5B8%5D%5Bname%5D=&columns%5B8%5D%5Bsearchable%5D=True&columns%5B8%5D%5Borderable%5D=True&columns%5B8%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B8%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B9%5D%5Bdata%5D=manufacturer&columns%5B9%5D%5Bname%5D=&columns%5B9%5D%5Bsearchable%5D=True&columns%5B9%5D%5Borderable%5D=True&columns%5B9%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B9%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B10%5D%5Bdata%5D=manufacturer_country&columns%5B10%5D%5Bname%5D=&columns%5B10%5D%5Bsearchable%5D=True&columns%5B10%5D%5Borderable%5D=True&columns%5B10%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B10%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B11%5D%5Bdata%5D=expiry_date&columns%5B11%5D%5Bname%5D=&columns%5B11%5D%5Bsearchable%5D=True&columns%5B11%5D%5Borderable%5D=True&columns%5B11%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B11%5D%5Bsearch%5D%5Bregex%5D=False&columns%5B12%5D%5Bdata%5D=id&columns%5B12%5D%5Bname%5D=&columns%5B12%5D%5Bsearchable%5D=True&columns%5B12%5D%5Borderable%5D=True&columns%5B12%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B12%5D%5Bsearch%5D%5Bregex%5D=False&order%5B0%5D%5Bcolumn%5D=0&order%5B0%5D%5Bdir%5D=asc&start=0&length=3911&search%5Bvalue%5D=&search%5Bregex%5D=False"
with requests.Session() as s:
s.headers={"User-Agent":"Mozilla/5.0"}
s.headers.update({'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'})
res = s.post(URL, data = payload)
itemlist = []
for data in res.json()['data']:
item = {}
item['serial'] = data['no']
item['certno'] = data['certificate_no']
item['brndname'] = data['brand_name']
item['clssification'] = data['classification_name']
item['common_name'] = data['common_name']
item['dosage_form'] = data['dosage_form']
item['expiry_date'] = data['expiry_date']
item['manufacturer'] = data['manufacturer']
item['manufacturer_country'] = data['manufacturer_country']
item['product_strength'] = data['product_strength']
item['registrant'] = data['registrant']
item['registrant_country'] = data['registrant_country']
itemlist.append(item)
print(itemlist)
You first create a dictionary and then convert it to JSON .for example :
name ='ali'
family='shahabi'
output={'name' :name ,'family': family}
json.dumps(output)
print :
'{"name": "ali", "family": "shahabi"}'
I suggest convert :
output = (dataserial,certno,brndname,clssification, common_name,dosage_form,expiry_date,m anufacturer, manufacturer_country, product_strength,registrant,registrant_country)
to
output = {'dataserial':dataserial,'certno':certno,'brndname':brndname , ....}
Related
I am trying to collect data from from e-conomic.dk, but the API will only let me retrieve 1000 rows per call, so I think I need to create a list indicating 1 row for every 1000 rows in the datasource. After that I need a logic that skips the first 1000 rows if already retrieved and then continue to retrieve the next 1000 rows etc.
API doc: https://restdocs.e-conomic.com/#endpoints
This is my Python code:
#This code calls the ENTRIES API from e-conomic and sends data to SQL
#API doc = https://restdocs.e-conomic.com/
import requests
import json
import openpyxl
import pandas as pd
import sqlalchemy.engine as sqle
HEADERS = {
"X-AgreementGrantToken": "demo",
"X-AppSecretToken": "demo",
}
def get_api_data(endpoint):
url = "https://restapi.e-conomic.com"
query = {
"pagesize": "1000",
"skippages": str(0)
}
response = requests.get(f'{url}/{endpoint}', headers = HEADERS, params = {**query})
data = response.json()
return data
def get_db_engine():
conn_str = "DRIVER={SQL SERVER};SERVER=JAKOB-MSI;DATABASE=MightyMonday;TRUSTED_CONNECTION=yes"
conn_url = sqle.URL.create("mssql+pyodbc", query = {'odbc_connect': conn_str})
engine = sqle.create_engine(conn_url)
return engine
source = get_api_data("accounting-years")
collection = source["collection"]
# print(source)
entries = pd.DataFrame(collection)
res = requests.get(entries['entries'][0] + '?skippages=0&pagesize=30', headers = HEADERS)
data = res.json()
dataset = pd.DataFrame(data['collection'])
dataset['accountNumber'] = [d.get('accountNumber') for d in dataset.account]
dataset = dataset[['accountNumber', 'amountInBaseCurrency', 'date', 'text']]
#Change the target SQL table here
dataset.to_sql('Entries_demo_values', con = get_db_engine(), if_exists = 'replace', index = False)
This is a fully functional M code written using PowerBI that skips the rows as intended.
let
function1 = (EndPoint as text, PagesToSkip as number) =>
let
Url = "https://restapi.e-conomic.com",
Headers = [#"X-AgreementGrantToken"="demo", #"X-AppSecretToken"="demo"],
Query = [pagesize = "1000", skippages = Text.From(PagesToSkip)],
data = Json.Document(Web.Contents(Url, [Headers = Headers, Query = Query, RelativePath = EndPoint]))
in data,
function2 = (tal) => List.Generate(()=>0, each _ <= tal, each _ +1),
Source = function1("accounts", 0),
collection = Source[collection],
#"Converted to Table" = Table.FromList(collection, Splitter.SplitByNothing(), null, null, ExtraValues.Error),
#"Expanded Column1" = Table.ExpandRecordColumn(#"Converted to Table", "Column1", {"accountNumber", "accountType", "balance", "blockDirectEntries", "debitCredit", "name", "accountingYears", "self", "vatAccount", "totalFromAccount", "openingAccount", "accountsSummed"}, {"accountNumber", "accountType", "balance", "blockDirectEntries", "debitCredit", "name", "accountingYears", "self", "vatAccount", "totalFromAccount", "openingAccount", "accountsSummed"}),
#"Removed Other Columns" = Table.SelectColumns(#"Expanded Column1",{"accountNumber", "accountType", "name"}),
#"Added Conditional Column" = Table.AddColumn(#"Removed Other Columns", "KontoOverskrift", each if [accountType] = "heading" then [name] else null),
#"Filled Down" = Table.FillDown(#"Added Conditional Column",{"KontoOverskrift"}),
#"Changed Type" = Table.TransformColumnTypes(#"Filled Down",{{"accountNumber", Int64.Type}, {"accountType", type text}, {"name", type text}, {"KontoOverskrift", type text}})
in
#"Changed Type"
With the this Python code I am able to retrieve the data and send to Excel. But the skip 1000 rows logic explained above
#This code calls the account API from e-conomic and writes a Excel file with the data
import requests
import json
import openpyxl
import pandas as pd
def Function1(endpoint):
url = "https://restapi.e-conomic.com"
headers = {
"X-AgreementGrantToken": "demo",
"X-AppSecretToken": "demo",
}
query = {
"pagesize": "1000",
"skippages": str(0)
}
response = requests.get(f'{url}/{endpoint}', headers = headers, params = {**query})
data = response.json()
return data
def function2(tal):
return range(0, tal+1)
source = Function1("accounts")
# source = function1("accounts", 0)
collection = source["collection"]
dataset = pd.DataFrame(collection)
data = dataset[['accountNumber','accountType','name']]
data.to_excel('AccountDataAuto.xlsx', index = False)
This is my list:
unique_IMO = [94229,95986,96967,94731,95731,96612]
I need to pass these numbers to the following request:
url = 'https://api.lloydslistintelligence.com/v1/aispositionhistory?output=json&vesselImo={0}&pageNumber={1}'.format(unique_IMO,1)
I was able to call the endpoint for each number using a for loop but I don't know how to pass all the numbers at once.
I tried the below code but it still gave an error.
test1 = format(','.join(map(str,unique_IMO)))
Can someone please help me with this?
I have a list of numbers which I am trying to pass all at once to an API call. I did check using Postman to see if the endpoint accepts multiple values and it does.
API documentation snippet
So below is what I'm doing right now and it works. I am trying to make the api calls faster/efficient.
df_list = []
for ind,row in vessels.iterrows():
vesselImo = int(row['Imo'])
#Retrieve data from aispositionhistory endpoint
vessel_hist = pd.DataFrame()
total_recs = 0
for date_string in date_list:
url = 'https://api.lloydslistintelligence.com/v1/aispositionhistory?output=json&vesselImo={0}&dateRange={1}&pageNumber={2}'.format(vesselImo,date_string,1)
head = {'Authorization': '{}'.format(api_token)}
response = requests.get(url, headers=head)
#****DEBUGGING****
#print("status code: ", response.status_code )
if(response.json()['Data']['totalRecords'] != 0):
tmp = response.json()['Data']['items']
df = json_normalize(tmp)
vessel_hist = vessel_hist.append(df,ignore_index=True)
#Get reported number of records for validation
total_recs = total_recs + response.json()['Data']['totalRecords']
#Identify if API response is multiple pages
if(response.json()['Data']['totalPages'] > 1):
num_pages = response.json()['Data']['totalPages']
#print('API pull had more than one page: ' + date_string)
for page_no in range(2,num_pages+1):
url = 'https://api.lloydslistintelligence.com/v1/aispositionhistory?output=json&vesselImo={0}&dateRange={1}&pageNumber={2}'.format(vesselImo,date_string,1)
response = requests.get(url, headers=head)
tmp = response.json()['Data']['items']
df = json_normalize(tmp)
vessel_hist = vessel_hist.append(df,ignore_index=True)
# Validation based on record count
if(total_recs != vessel_hist.shape[0]):
print('Validation Error: reported records do not match dataframe')
if(vessel_hist.shape[0]>0):
#Format Dataframe
new_columns = ['vesselId','MMSI','PositionTimestamp','Latitude','Longitude','Speed','Course','Rot','Heading',
'nearestPlace','nearestPlaceId','nearestCountry','Distance','Destination','Eta','Draught',
'Dimensions','Status','Ship_type','Source']
vessel_hist.columns = new_columns
vessel_hist = vessel_hist[['MMSI','PositionTimestamp','Status','Latitude','Longitude','Speed','Course','Rot',
'Heading','Draught','Destination','Eta','Source','Ship_type','Dimensions',
'Distance','nearestCountry','nearestPlace','nearestPlaceId','vesselId']]
vessel_hist['PositionTimestamp'] = pd.to_datetime(vessel_hist['PositionTimestamp'],dayfirst=False)
vessel_hist.sort_values('PositionTimestamp', inplace=True)
vessel_hist.reset_index(drop=True, inplace=True)
df_list.append(vessel_hist)
print('Input vessel Id: ' + str(vesselImo))
print('Input Date Range: ' + start_input + ' - ' + end_input)
print('No. of AIS records: ' + str(vessel_hist.shape[0]))
df_list
vessels is a dataframe which contains the IMO numbers
vessels = pd.DataFrame((94229,95986,96967,94731,95731,96612),columns=['Imo'])
date_list is a list created based on the desired time range.
Hope this example will help
import requests
def main():
unique_IMO = [94229, 95986, 96967, 94731, 95731, 96612]
base_url = "http://httpbin.org"
query_params = {
"output": "json",
"vesselImo": unique_IMO,
"pagerNumber": 1
}
response = requests.get(url=base_url + "/get", params=query_params)
print(response.json())
if __name__ == '__main__':
main()
GET query parameters will be:
{'args': {'output': 'json', 'pagerNumber': '1', 'vesselImo': ['94229', '95986', '96967', '94731', '95731', '96612']}, 'headers': {'Accept': '*/*', 'Accept-Encoding': 'gzip, deflate', 'Host': 'httpbin.org', 'User-Agent': 'python-requests/2.27.1', 'X-Amzn-Trace-Id': 'Root=1-633bc95b-2771014e17fa5dc6580e4e3e'}, 'origin': 'x.x.x.x', 'url': 'http://httpbin.org/get?output=json&vesselImo=94229&vesselImo=95986&vesselImo=96967&vesselImo=94731&vesselImo=95731&vesselImo=96612&pagerNumber=1'}```
I have 2 django server. I want to send some POST data from the server A to server B.
I use this code on server A to send data (I simply follow the tutorial ) :
payload = {"contenu" : Contenu, "ID" : hashage}
payload_json = json.dumps(payload, separators=(',', ': '))
with open('backend/config.json') as json_data:
facto = json.load(json_data)
json_data.close
hostnamefacto = facto["Factory"]["IP"]
portFacto = facto["Factory"]["port"]
reponse = requests.post('http://'+hostnamefacto+':'+portFacto+'/outil/test/', data = payload_json)
On server B, I use this code to get data :
try:
contenu = request.POST['contenu']
except KeyError:
contenu = None
try:
ID = request.POST['ID']
except KeyError:
ID = None
But ID and contenu are equal None. Does someone have an idea of how to do it ?
Thanks a lot.
You're reading the POST parameters where you want the raw body parsed as JSON:
data = json.loads(request.body)
id = data['ID']
Let the requests library do the JSON encoding for you:
payload = {"contenu" : Contenu, "ID" : hashage}
r = requests.post('http://'+hostnamefacto+':'+portFacto+'/outil/test/', json=paylod)
I want to send a PUT request with the following data structure:
{ body : { version: integer, file_id: string }}
Here is the client code:
def check_id():
id = request.form['id']
res = logic.is_id_valid(id)
file_uuid = request.form['file_id']
url = 'http://localhost:8050/firmwares'
r = requests.put(url = url, data = {'body' : {'version': id, 'file_id': str(file_uuid)}})
Here is the server code:
api.add_resource(resources.FirmwareNewVerUpload, '/firmwares')
class FirmwareNewVerUpload(rest.Resource):
def put(self):
try:
args = parser.parse_args()
except:
print traceback.format_exc()
print 'data: ', str(args['body']), ' type: ', type(args['body'])
return
The server prints:
data: version type: <type 'unicode'>
And this result is not what I want. Instead of inner dictionary I got a string with name of one dictionary key. If I change 'version' to 'ver'
r = requests.put(url = url, data = {'body' : {'ver': id, 'file_id': str(file_uuid)}})
server prints
data: ver type: <type 'unicode'>
How to send a dictionary with inner dictionary?
Use json= instead of data= when doing requests.put and headers = {'content-type':'application/json'}:
r = requests.put(url = url,
json = {'body' : {'version': id, 'file_id': str(file_uuid)}},
headers = {'content-type':'application/json'})
In official doc you found a topic called More complicated POST requests
There are many times that you want to send data that is not form-encoded. If you pass in a string instead of a dict, that data will be posted directly.
>>> import json
>>> url = 'https://api.github.com/some/endpoint'
>>> payload = {'some': 'data'}
>>> r = requests.post(url, data=json.dumps(payload))
Maybe convert your data to JSON could be a good approach.
import json
def check_id():
id = request.form['id']
res = logic.is_id_valid(id)
file_uuid = request.form['file_id']
url = 'http://localhost:8050/firmwares'
payload = {'body' : {'version': id, 'file_id': str(file_uuid)}}
r = requests.put(url=url, data=json.dumps(payload))
import requests
import json
import urllib2
data = '{"userId":"faraz#wittyparrot.com","password":"73-rRWk_"}'
response = requests.post(url, data=data, headers=
{"ContentType":"application/json"})
dataa = json.loads(response.content)
a = dataa['accessToken']
print a
tiketId = a['tokenType'] + a['tokenValue']
print tiketId
wit = '{ "name": "wit along with the attachment","parentId": "6d140705-c178-4410-bac3-b15507a5415e", "content": "faraz khan wit", "desc": "This is testing of Authorization wit","note": "Hello auto wit"}'
response = requests.post(URLcreatewit, data=wit , headers={"Content-Type":"application/json","Authorization":tiketId} )
createwit = json.loads(response.content)
print createwit
Id = createwit['id']
WitId = Id
print WitId
so here witId is 2d81dc7e-fc34-49d4-b4a7-39a8179eaa55 that comes as response
now i want to use that witId into below json as a input:
Sharewit = '{ "contentEntityIds":["'+WitId+'"],"userEmailIds": ["ediscovery111#gmail.com"],"permission":{"canComment": false,"canRead": true,"canEditFolderAndWits": false,"canFurtherShare": false,"canEditWits": false}, "inherit":true}'
response = requests.post(URLcreatewit, data= Sharewit , headers={"Content-Type":"application/json","Authorization":tiketId} )
print response.status_code
so in the last json, it seems it does not take the value of witId and gives 400 status error
I was trying to do the similar thing and Here is how I have done.
Assuming the rest api responds with a Json Object.
id = response.json()["id"]
However if the response is a Json Array
Looped in through the array and got the ids appended to an array
item = []
for item in response.json():
ids.append(item["id")
Also, I have used a Json Object - to be able to change values.
Instead of creating it as Json String.
sharewit["userEmailIds"] = ["ediscovery111#gmail.com"]
sharewit["contentEntityIds"] = [id]
response = requests.post(URLcreatewit, data=json.dumps(sharewit), headers={"Content-Type":"application/json","Authorization":tiketId} )