With this code
import pandas as pd
import requests
link = "http://sp.kaola.com/api/category/goods?pageNo=1&pageSize=20&search=%7B%0A%20%20%22sortType%22%20%3A%20%7B%0A%20%20%20%20%22type%22%20%3A%200%0A%20%20%7D%2C%0A%20%20%22isNavigation%22%20%3A%20%220%22%2C%0A%20%20%22filterTypeList%22%20%3A%20%5B%0A%20%20%20%20%7B%0A%20%20%20%20%20%20%22id%22%20%3A%20%5B%0A%20%20%20%20%20%20%20%204055%0A%20%20%20%20%20%20%5D%2C%0A%20%20%20%20%20%20%22type%22%20%3A%201%2C%0A%20%20%20%20%20%20%22category%22%20%3A%20%7B%0A%20%20%20%20%20%20%20%20%22parentCategoryId%22%20%3A%200%2C%0A%20%20%20%20%20%20%20%20%22categoryId%22%20%3A%204055%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%20%20%5D%2C%0A%20%20%22source%22%20%3A%201%2C%0A%20%20%22noStoreCount%22%20%3A%200%2C%0A%20%20%22isActivity%22%20%3A%200%2C%0A%20%20%22storeCount%22%20%3A%2060%0A%7D"
df = reqeusts.get(link).json()
print df
I can get the response for the URL I'm querying.
But how can I get the data from when the url's GET arg becomes pageNo = 3, 4 and so on?
I want get all of the responses from all the pages in one request. If this possible ?
In each page I can get 20 responses. How can I get all of them ?
update:
i use this method to clearn the json:
from pandas.io.json import json_normalize
df1 = df['body']
df_final = json_normalize(df1['result'],'goodsList')
HOW CAN I get all the response into only one dataframe?
Getting all the responses on one page doesn't seem possible. This is something you cannot control, and only the person who made the website can control.
But, what you can do is loop through the pages of the search result and add them together. I notice you have a hasMore variable which tells if there are more search results. This gives something like this:
import requests
link = "http://sp.kaola.com/api/category/goods?pageSize=20&search=%7B%0A%20%20%22sortType%22%20%3A%20%7B%0A%20%20%20%20%22type%22%20%3A%200%0A%20%20%7D%2C%0A%20%20%22isNavigation%22%20%3A%20%220%22%2C%0A%20%20%22filterTypeList%22%20%3A%20%5B%0A%20%20%20%20%7B%0A%20%20%20%20%20%20%22id%22%20%3A%20%5B%0A%20%20%20%20%20%20%20%204055%0A%20%20%20%20%20%20%5D%2C%0A%20%20%20%20%20%20%22type%22%20%3A%201%2C%0A%20%20%20%20%20%20%22category%22%20%3A%20%7B%0A%20%20%20%20%20%20%20%20%22parentCategoryId%22%20%3A%200%2C%0A%20%20%20%20%20%20%20%20%22categoryId%22%20%3A%204055%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%7D%0A%20%20%5D%2C%0A%20%20%22source%22%20%3A%201%2C%0A%20%20%22noStoreCount%22%20%3A%200%2C%0A%20%20%22isActivity%22%20%3A%200%2C%0A%20%20%22storeCount%22%20%3A%2060%0A%7D"
max_pages = 100
data = {}
for page_no in range(max_pages):
try:
req = reqeusts.get(link + "&pageNo=" + str(page_no))
except reqeusts.ConnectionError:
break # Stop loop if the url was not found.
df = req.json()
if df["body"]["result"]["hasMore"] == 0:
break # Page says it has no more results
# Here, add whatever data you want to save from df to data
Related
I'm using Python to do some data cleaning/task automation, but am having a hard time reading in data through an API with multiple conditions. My data is as follows:
url = "https://data.cityofnewyork.us/resource/erm2-nwe9.json?descriptor='Social Distancing' or descriptor='Face Covering Violation' or descriptor='Business not in compliance'"
r = requests.get(url)
x = r.json()
df = pd.DataFrame(x)
When I pull it, it only provides me with data where the descriptor is 'Social Distancing'. Any tips on how to change this so that it filters for all of the needed data?
Make three requests and merge their responses:
def get_data(filter):
url = "https://data.cityofnewyork.us/resource/erm2-nwe9.json?descriptor='{0}'".format(filter)
r = requests.get(url)
return pd.DataFrame(r.json())
df = pd.concat([
get_data('Social Distancing'),
get_data('Face Covering Violation'),
get_data('Business not in compliance')
])
My script is looping through a get request and concatenating them into a pandas data frame to export to excel. Everything works good until the loop goes through 5 times, and then the site gives a 403 error. Somehow the site know once i have made requests for 50k rows and gives the 403 error. Is there a way around this that anyone can share with me please.step is a variable at the end of URL string that tells how many rows to bring back. I can only do 10k at a time or the it lags so much it wont work.SKIP is a another variable in the URL string that skips forward a set amount of rows. The script is super slow too if anyone can give any hints on how to make it faster too it would be much appreciated. Thanks.
from selenium import webdriver
import time
import json
import pandas as pd
import requests
driver = webdriver.Chrome()
executor_url = driver.command_executor._url
session_id = driver.session_id
#put the url/website you are trying to scrape from here > this should be the url you go to when you login
driver.get(r"http://10.131.178.162:9090/xGLinear/login.html")
#waits 60 secs to give you time to login manually
time.sleep(60)
#this will copy all the cookies and login info you need from chrome and now you can start using requests
cookies = driver.get_cookies()
s = requests.Session()
for cookie in cookies:
s.cookies.set(cookie['name'], cookie['value'])
res = s.get(r"http://10.131.178.162:9090/orders/OrderStatus?$dataAccess=ALL&$&$dateRange=ordered&$endDate=06%2F28%2F2020&$filter=%7B%22operator%22:%22AND%22,%22criteria%22:%5B%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22lineMode%22,%22value%22:%22R%22%7D,%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22creditHold%22,%22value%22:%22N%22%7D,%7B%22operator%22:%22OR%22,%22criteria%22:%5B%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22AP%22%7D,%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22SC%22%7D%5D%7D%5D%7D&$skip=0&$sortBy=%5B%22-key.orderlineId%22%5D&$startDate=06%2F22%2F2020&$top=10000")
data = json.loads(res.text)
TotalR=data['totalRows']
SKIP=10000
skip1=10000
total_count= int(TotalR/skip1)
step=10000
Count=0
df = pd.DataFrame()
try:
while Count < total_count :
res1= s.get(f"http://10.131.178.162:9090/orders/OrderStatus?$dataAccess=ALL&$skip={SKIP}&$dateRange=ordered&$endDate=06%2F28%2F2020&$filter=%7B%22operator%22:%22AND%22,%22criteria%22:%5B%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22lineMode%22,%22value%22:%22R%22%7D,%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22creditHold%22,%22value%22:%22N%22%7D,%7B%22operator%22:%22OR%22,%22criteria%22:%5B%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22AP%22%7D,%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22SC%22%7D%5D%7D%5D%7D&$skip=0&$sortBy=%5B%22-key.orderlineId%22%5D&$startDate=06%2F22%2F2020&$top={step}")
data1 = json.loads(res1.text)
for d in data1['data']:
dict_new = pd.DataFrame(d)
df = pd.concat([df,dict_new])
SKIP+=10000
Count+=1
except:
print(res1.status_code)
final=pd.DataFrame(data['data'])
final1=pd.DataFrame(final)
final2= pd.concat([df,final1])
final2.to_excel(r'C:\Users\c\Desktop\xg.xlsx',index= False)
There is no way to work around this, you just reached your limit.
A solution would be to look at the documentation, and know how often this count resets.
Then, you'll be able to add a wait, in order to keep the good rythm and get rid of 403 error code.
import time
try:
cpt = 0
while Count < total_count :
res1= s.get(f"http://10.131.178.162:9090/orders/OrderStatus?$dataAccess=ALL&$skip={SKIP}&$dateRange=ordered&$endDate=06%2F28%2F2020&$filter=%7B%22operator%22:%22AND%22,%22criteria%22:%5B%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22lineMode%22,%22value%22:%22R%22%7D,%7B%22operator%22:%22EQUALS%22,%22fieldName%22:%22creditHold%22,%22value%22:%22N%22%7D,%7B%22operator%22:%22OR%22,%22criteria%22:%5B%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22AP%22%7D,%7B%22fieldName%22:%22status%22,%22operator%22:%22EQUALS%22,%22value%22:%22SC%22%7D%5D%7D%5D%7D&$skip=0&$sortBy=%5B%22-key.orderlineId%22%5D&$startDate=06%2F22%2F2020&$top={step}")
data1 = json.loads(res1.text)
for d in data1['data']:
dict_new = pd.DataFrame(d)
df = pd.concat([df,dict_new])
SKIP+=10000
Count+=1
cpt += 1
if cpt == 5:
cpt = 0
time.wait(x) // X is how many seconds you'll need to wait
except:
print(res1.status_code)
I am a beginner in Python Programming and I am practicing scraping different values from websites.
I have extracted the items from a particular website and now want to write them onto a .xls file.
The whole web page has 714 records including duplicate records but the excel sheet is displaying only 707 records because of the zip() function which stops when the smallest list gets exhausted. Here the smallest list is the email list. So it is getting exhausted and the iteration stops due to the property of zip() function.I have even kept a checking for it within a if condition for the records which has no email address so that it displays "No email address" but still the same result is displayed with 704 with duplicates records. Kindly tell where am I going wrong and if possible suggest what to be done regarding removing duplicate records and displaying "No email address" where there is no email.
from bs4 import BeautifulSoup as bs
import pandas as pd
res = requests.get('https://www.raywhite.com/contact/?type=People&target=people&suburb=Sydney%2C+NSW+2000&radius=50%27%27&firstname=&lastname=&_so=contact', headers = {'User-agent': 'Super Bot 9000'})
soup = bs(res.content, 'lxml')
names=[]
positions=[]
phone=[]
emails=[]
links=[l1['href'] for l1 in soup.select('.agent-name a')]
nlist = soup.find_all('li', class_='agent-name')
plist= soup.find_all('li',class_='agent-role')
phlist = soup.find_all('li', class_='agent-officenum')
elist = soup.find_all('a',class_='val withicon')
for n1 in nlist:
names.append(n1.text)
for p1 in plist:
positions.append(p1.text)
for ph1 in phlist:
phone.append(ph1.text)
for e1 in elist:
emails.append(e1.get('href') if e1.get('href') is not None else 'No Email address')
df = pd.DataFrame(list(zip(names,positions,phone,emails,links)),columns=['Names','Position','Phone','Email','Link'])
df.to_excel(r'C:\Users\laptop\Desktop\RayWhite.xls', sheet_name='MyData2', index = False, header=True)
The excel sheet looks like this where we can see the last records name and it's email address does not match:
Ray White Excel Sheet
It looks like you are doing many find_all's and then stitching them together. My advice would be to do one find_all then iterate through that. It makes it a lot easier to build out the columns of your dataframe when all your data is in one place.
I have updated the below code to successfully extract links without error. With any code there is a number of ways to perform the same task. This one may not be the most elegant but it does get the job done.
import requests
from bs4 import BeautifulSoup
import pandas as pd
r = requests.get('https://www.raywhite.com/contact/?type=People&target=people&suburb=Sydney%2C+NSW+2000&radius=50%27%27&firstname=&lastname=&_so=contact', headers = {'User-agent': 'Super Bot 9000'})
soup = BeautifulSoup(r.text, 'html.parser')
get_cards = soup.find_all("div",{"class":"card horizontal-split vcard"})
agent_list = []
for item in get_cards:
name = item.find('li', class_='agent-name').text
position = item.find('li', class_='agent-role').text
phone = item.find('li', class_='agent-officenum').text
link = item.find('li', class_='agent-name').a['href']
try:
email = item.find('a',class_='val withicon')['href'].replace('mailto:','')
except:
email = 'No Email address'
agent_list.append({'name':name,'position':position,'email':email,'link':link})
df = pd.DataFrame(agent_list)
Above is some sample code I have put together to create the dataframe. The key here is to do one find_all on "class":"card horizontal-split vcard"}
Hope that has been some help.
Cheers,
Adam
I have a code column which I would like to pass to a web service and update two fields in the dataframe (dfMRD1['Cache_Ticker']and dfMRD1['Cache_Product'] with two values from the returned JSON (RbcSecurityDescription and RbcSecurityType1). I have achieved this by iteration but I'd like to know if there is a more efficient way to do it?
# http://postgre01:5002/bond/912828XU9
import requests
url = 'http://postgre01:5002/bond/'
def fastquery(code):
response = requests.get(url + code)
return response.json()
Here is the sample return call:
Here is the update of dfMRD1['Cache_Ticker']anddfMRD1['Cache_Product']
dfMRD1 = df[['code']].drop_duplicates()
dfMRD1['Cache_Ticker'] = ""
dfMRD1['Cache_Product'] = ""
for index, row in dfMRD1.iterrows():
result = fastquery(row['code'])
row['Cache_Ticker'] = result['RbcSecurityDescription']
row['Cache_Product'] = result['RbcSecurityType1']
display(dfMRD1.head(5))
Would it be best to just return the json array, unest it and dump all fields in its contents to another df which I can be join with dfMRD1? Best way to achieve this?
The most time-consuming part of your code is likely to be in making synchronous requests. Instead, you could leverage requests-futures to make asynchronous requests, construct the columns as lists of results and assign back to the DF. We have nothing to test with but the approach would look like this:
from requests_futures.sessions import FuturesSession
session = FuturesSession(max_workers = 10)
codes = df[['code']].drop_duplicates().values.tolist() # Take out of DF
url = 'http://postgre01:5002/bond/'
fire_requests = [session.get(url + code) for code in codes] # Async requests
responses = [item.result() for item in fire_requests] # Grab the results
dfMRD1['Cache_Ticker'] = [result['RbcSecurityDescription']
for result in responses]
dfMRD1['Cache_Product'] = [result['RbcSecurityType1']
for result in responses]
Depending on the size of the DF, you may get a lot of data in memory. If that becomes an issue, you'll want a background callback trimming your JSON responses as they come back.
I am having difficulty increasing the amount of requests I can make per second with Google Maps Geocoder. I am using a paid account (at $.50/1000 requests), so according to the Google Geocoder API I should be able to make up to 50 requests per second.
I have a list of 15k address which I am trying to get GPS coordinates for. I am storing them as a Pandas Dataframe and looping over them. To make sure this wasn't due to slow looping, I tested how fast it loops over all 15k, and it only took 1.5 seconds. But I was only able to make less than 1 request per second. I realized this might be due to my slow internet connection, so I fired up a Windows Google Cloud VM with obviously fast internet. I was able to speed up the requests to about 1.5 requests/ second, but still way slower than theoretically possible.
I thought this might be due to using a python library Geocoder, so I tried making the request directly using python requests, but this didn't speed things up either.
Does this have something to do with the fact that I'm not using a server? I would think this wouldn't matter since I'm using a Google Cloud VM. Also, I know this doesn't have to do with multithreading, since it can already iterate through the loop using 1 core with extreme speed. Thanks in advance for any thoughts.
import geocoder
import pandas as pd
import time
import requests
startTime = time.time()
#Read File Name with all transactions up to October 4th
input_filename = "C:/Users/username/Downloads/transaction-export 10-04-2017.csv"
df = pd.read_csv(input_filename, header=0, error_bad_lines=False)
#Only look at customer addresses
df = df['Customer Address']
#Drop duplicates and NAs
df = df.drop_duplicates(keep='first')
df = df.dropna()
#convert dataframe to string
addresses = df.tolist()
#Google Api Key
api_key = 'my_api_key'
#create empty array
address_gps = []
#google api address
url = 'https://maps.googleapis.com/maps/api/geocode/json'
#For each address return its geocoded latlng coordinates
for int, val in enumerate(addresses):
''' Direct way to make call without geocoder
params = {'sensor': 'false', 'address': address, 'key': api_key}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
print location['lat'], location['lng']
num_address = num_address+1;
'''
endTime = time.time()
g = geocoder.google(val, key=api_key, exactly_one=True)
print "Address,", (val), "Number,", int, "Total,", len(addresses), "Time,", endTime-startTime
if g.ok:
address_gps.append(g.latlng)
print g.latlng
else:
address_gps.append(0)
print("Error")
#save every 100 iterations
if int%100==0:
# save as csv
df1 = pd.DataFrame({'Address GPS': address_gps})
df1.to_csv('C:/Users/username/Downloads/AllCustomerAddressAsGPS.csv')
# save as csv
df1 = pd.DataFrame({'Address GPS': address_gps})
df1.to_csv('C:/Users/username/Downloads/AllCustomerAddressAsGPS.csv')
One way to increase the speed of this is to maintain the requests session with Google, rather than creating a new session with every request. This is suggested in the geocoder documentation.
Your modified code will then be:
import requests
#Google Api Key
api_key = 'my_api_key'
#create empty array
address_gps = []
#google api address
url = 'https://maps.googleapis.com/maps/api/geocode/json'
#For each address return its geocoded latlng coordinates
with requests.Session() as session:
for int, val in enumerate(addresses):
''' Direct way to make call without geocoder
params = {'sensor': 'false', 'address': address, 'key': api_key}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
print location['lat'], location['lng']
num_address = num_address+1;
'''
endTime = time.time()
g = geocoder.google(val, key=api_key, exactly_one=True, session=session)
print "Address,", (val), "Number,", int, "Total,", len(addresses), "Time,", endTime-startTime
if g.ok:
address_gps.append(g.latlng)
print g.latlng
else:
address_gps.append(0)
print("Error")
#save every 100 iterations
if int%100==0:
# save as csv
df1 = pd.DataFrame({'Address GPS': address_gps})
df1.to_csv('C:/Users/username/Downloads/AllCustomerAddressAsGPS.csv')
# save as csv
df1 = pd.DataFrame({'Address GPS': address_gps})
df1.to_csv('C:/Users/username/Downloads/AllCustomerAddressAsGPS.csv')