https://www.bestbuy.com/site/promo/health-fitness-deals
I want to loop through these 10 pages and scrape their names and hrefs
Below is my code which only scrapes the 1st page continuously 10 times:
def name():
for i in range(1, 11):
tag = driver.find_elements_by_xpath('/html/body/div[4]/main/div[9]/div/div/div/div/div/div/div[2]/div[2]/div[3]/div/div[5]/ol/li[3]/div/div/div/div/div/div[2]/div[1]/div[2]/div/h4')
for a in tag:
for name in a.find_elements_by_tag_name('a'):
links = name.get_attribute("href")
names = name.get_attribute('text')
watches_name.append(names)
watches_link.append(links)
# print(watches_name)
# print(watches_link)
name()
If you want to get elements from next pages then you have to click() on link >
driver.find_element_by_css_selector('.sku-list-page-next').click()
Minimal working code with other changes.
I reduced xpath to something much simpler. And I keep name, link as pair because it is simpler to write in file CSV or in database or to filter and sort.
I had to use longer sleep - sometimes my browser needs more time to update elements on page.
from selenium import webdriver
import time
url = 'https://www.bestbuy.com/site/promo/health-fitness-deals'
driver = webdriver.Firefox()
driver.get(url)
time.sleep(2)
# page "Hello! Choose a Country" - selecting Unitet State flag
driver.find_element_by_class_name('us-link').click()
items = []
for page in range(1, 11):
print('\n[DEBUG] wait 15 seconds to update page\n')
time.sleep(15)
print('\n--- page', page, '---\n')
all_links = driver.find_elements_by_css_selector('#main-results h4 a')
for a in all_links:
link = a.get_attribute("href")
name = a.get_attribute('text')
items.append( [name, link] )
print(name)
print('\n[DEBUG] click next\n')
driver.find_element_by_css_selector('.sku-list-page-next').click()
#print(items)
BTW:
This method could be done with while True and some method to recognize if there is link > - and exit loop when there is no >. This way it could work with any number of pages.
Other method.
When you manually visit few pages then you should see that second page has url with ?cp=2, third with ?cp=3, etc. so you could use it to load pages
driver.get(url + '?cp=' + str(page+1) )
Minimal working code.
from selenium import webdriver
import time
url = 'https://www.bestbuy.com/site/promo/health-fitness-deals'
driver = webdriver.Firefox()
driver.get(url)
time.sleep(2)
# page "Hello! Choose a Country" - selecting Unitet State flag
driver.find_element_by_class_name('us-link').click()
items = []
for page in range(1, 11):
print('\n[DEBUG] wait 15 seconds to update page\n')
time.sleep(15)
print('\n--- page', page, '---\n')
all_links = driver.find_elements_by_css_selector('#main-results h4 a')
for a in all_links:
link = a.get_attribute("href")
name = a.get_attribute('text')
items.append( [name, link] )
print(name)
print('\n[DEBUG] load next url\n')
driver.get(url + '?cp=' + str(page+1) )
#print(items)
This method could also use while True and variable page to get any number of pages.
EDIT:
Versions with while True
from selenium import webdriver
import time
url = 'https://www.bestbuy.com/site/promo/health-fitness-deals'
driver = webdriver.Firefox()
driver.get(url)
time.sleep(2)
# page "Hello! Choose a Country" - selecting Unitet State flag
driver.find_element_by_class_name('us-link').click()
items = []
page = 1
while True:
print('\n[DEBUG] wait 15 seconds to update page\n')
time.sleep(15)
print('\n--- page', page, '---\n')
all_links = driver.find_elements_by_css_selector('#main-results h4 a')
for a in all_links:
link = a.get_attribute("href")
name = a.get_attribute('text')
items.append( [name, link] )
print(name)
page += 1
print('\n[DEBUG] load next url\n')
driver.get(url + '?cp=' + str(page) )
if driver.title == 'Best Buy: Page Not Found':
print('\n[DEBUG] exit loop\n')
break
#print(items)
and
from selenium import webdriver
import time
url = 'https://www.bestbuy.com/site/promo/health-fitness-deals'
driver = webdriver.Firefox()
driver.get(url)
time.sleep(2)
# page "Hello! Choose a Country" - selecting Unitet State flag
driver.find_element_by_class_name('us-link').click()
items = []
page = 1
while True:
print('\n[DEBUG] wait 15 seconds to update page\n')
time.sleep(15)
print('\n--- page', page, '---\n')
all_links = driver.find_elements_by_css_selector('#main-results h4 a')
for a in all_links:
link = a.get_attribute("href")
name = a.get_attribute('text')
items.append( [name, link] )
print(name)
page += 1
print('\n[DEBUG] click next\n')
item = driver.find_element_by_css_selector('.sku-list-page-next')
if item.get_attribute("href"):
item.click()
else:
print('\n[DEBUG] exit loop\n')
break
#print(items)
I guess if your code is working right, you will just need to click the pagination button. I found it can be located with the help of css selector ('#Caret_Right_Line_Sm'). Try adding this line to your function:
def name():
for i in range(1, 11):
tag = driver.find_elements_by_xpath('/html/body/div[4]/main/div[9]/div/div/div/div/div/div/div[2]/div[2]/div[3]/div/div[5]/ol/li[3]/div/div/div/div/div/div[2]/div[1]/div[2]/div/h4')
for a in tag:
for name in a.find_elements_by_tag_name('a'):
links = name.get_attribute("href")
names = name.get_attribute('text')
watches_name.append(names)
watches_link.append(links)
# print(watches_name)
# print(watches_link)
driver.find_elements_by_css_selector('#Caret_Right_Line_Sm')[1].click()
name()
Related
I wanted to extract text from multiple pages. Currently, I am able to extract data from the first page but I want to append and go to muliple pages and extract the data from pagination. I have written this simple code which extracts data from the first page. I am not able to extract the data from multiple pages which is dynamic in number.
`
element_list = []
opts = webdriver.ChromeOptions()
opts.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install())
base_url = "XYZ"
driver.maximize_window()
driver.get(base_url)
driver.set_page_load_timeout(50)
element = WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.ID, 'all-my-groups')))
l = []
l = driver.find_elements_by_xpath("//div[contains(#class, 'alias-wrapper sim-ellipsis sim-list--shortId')]")
for i in l:
print(i.text)
`
I have shared the images of class if this could help from pagination.
If we could extract the automate and extract from all the pages that would be awesome. Also, I am new so please pardon me for asking silly questions. Thanks in advance.
You have provided the code just for the previous page button. I guess you need to go to the next page until next page exists. As I don't know what site we are talking about I can only guess its behavior. So I'm assuming the button 'next' disappears when no next page exists. If so, it can be done like this:
element_list = []
opts = webdriver.ChromeOptions()
opts.headless = True
driver = webdriver.Chrome(ChromeDriverManager().install())
base_url = "XYZ"
driver.maximize_window()
driver.get(base_url)
driver.set_page_load_timeout(50)
element = WebDriverWait(driver, 50).until(EC.presence_of_element_located((By.ID, 'all-my-groups')))
l = []
l = driver.find_elements_by_xpath("//div[contains(#class, 'alias-wrapper sim-ellipsis sim-list--shortId')]")
while True:
try:
next_page = driver.find_element(By.XPATH, '//button[#label="Next page"]')
except NoSuchElementException:
break
next_page.click()
l.extend(driver.find_elements(By.XPATH, "//div[contains(#class, 'alias-wrapper sim-ellipsis sim-list--shortId')]"))
for i in l:
print(i.text)
To be able to catch the exception this import has to be added:
from selenium.common.exceptions import NoSuchElementException
Also note that the method find_elements_by_xpath is deprecated and it would be better to replace this line:
l = driver.find_elements_by_xpath("//div[contains(#class, 'alias-wrapper sim-ellipsis sim-list--shortId')]")
by this one:
l = driver.find_elements(By.XPATH, "//div[contains(#class, 'alias-wrapper sim-ellipsis sim-list--shortId')]")
I'm trying to scrape rental listing data on Zillow. Specifically, I want the link, price, and address of each property. However, after scraping the first page successfully and clicking the next arrow button, it just displays the same listings even though the page shows I'm on page 2, 3, etc. How do I get the next page(s) listings? The project is supposed to use BeautifulSoup and Selenium, but after some research it looks like using only selenium is the easiest way to do this since Zillow uses lazy-loading.
main.py code:
DRIVER_PATH = "D:\chromedriver.exe"
FORM_URL = "HIDDEN"
WEBPAGE = "https://www.zillow.com/toronto-on/rentals/?searchQueryState=%7B%22pagination%22%3A%7B%7D%2C%22mapBounds%22%3A%7B%22west%22%3A-79.40771727189582%2C%22east%22%3A-79.35750631913703%2C%22south%22%3A43.639155005365474%2C%22north%22%3A43.66405824004801%7D%2C%22mapZoom%22%3A15%2C%22regionSelection%22%3A%5B%7B%22regionId%22%3A792680%2C%22regionType%22%3A6%7D%5D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22fore%22%3A%7B%22value%22%3Afalse%7D%2C%22ah%22%3A%7B%22value%22%3Atrue%7D%2C%22sort%22%3A%7B%22value%22%3A%22days%22%7D%2C%22auc%22%3A%7B%22value%22%3Afalse%7D%2C%22nc%22%3A%7B%22value%22%3Afalse%7D%2C%22fr%22%3A%7B%22value%22%3Atrue%7D%2C%22sf%22%3A%7B%22value%22%3Afalse%7D%2C%22tow%22%3A%7B%22value%22%3Afalse%7D%2C%22fsbo%22%3A%7B%22value%22%3Afalse%7D%2C%22cmsn%22%3A%7B%22value%22%3Afalse%7D%2C%22fsba%22%3A%7B%22value%22%3Afalse%7D%7D%2C%22isListVisible%22%3Atrue%7D"
data_entry = DataEntry(DRIVER_PATH)
# Opens the webpage and gets count of total pages via self.next_btns_len)
data_entry.open_webpage(WEBPAGE)
# n is the iterator for the number of pages on the site.
n = 1
# Scrapes link, price, address data, adds each to a specified class list, and then goes to next page.
while n < (data_entry.next_btns_len + 1):
# Scrapes one page of data and adds data to list in class object
data_entry.scrape_data()
# Goes to next page for scraping
sleep(5)
data_entry.next_page()
n += 1
enter_data.py code:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.chrome.options import Options
from time import sleep
class DataEntry:
"""Enters the data from soup into Google Form"""
def __init__(self, driver_path):
# Options keeps the browser open after execution.
self.chrome_options = Options()
self.chrome_options.add_experimental_option("detach", True)
self.driver = webdriver.Chrome(executable_path=driver_path, chrome_options=self.chrome_options)
self.links = []
self.prices = []
self.addresses = []
self.next_btns_len = 0
def open_webpage(self, webpage):
# Opens desired webpage and gives two seconds to load
self.driver.get(webpage)
sleep(2)
# Gets total page numbers for main.py while loop
page_nums = self.driver.find_element(By.CSS_SELECTOR, '.Text-c11n-8-69-2__sc-aiai24-0.gCvDSp')
self.next_btns_len = int(page_nums.text.split()[3])
def scrape_data(self):
# Scrolls to each listing to make it visible to Selenium.
n = 1
while n < 41:
listing = self.driver.find_element(By.XPATH, f'/html/body/div[1]/div[5]/div/div/div/div[1]/ul/li[{n}]')
self.driver.execute_script("arguments[0].scrollIntoView(true);", listing)
print(n)
n += 1
# todo: Create a list of links for all the listings you scraped.
links = self.driver.find_elements(By.CSS_SELECTOR, ".list-card-info .list-card-link")
link_list = [link.get_attribute("href") for link in links]
# The if statement is to check if the DOM class name has changed, which produces an empty list.
# If the list is empty, then changes the css_selector. The website alternates between two.
if len(link_list) == 0:
links = self.driver.find_elements(By.CSS_SELECTOR, ".StyledPropertyCardDataArea-c11n-8-69-2__sc-yipmu-0.dZxoFm.property-card-link")
link_list = [link.get_attribute("href") for link in links]
self.links.extend(link_list)
print(len(self.links))
print(self.links)
# todo: Create a list of prices for all the listings you scraped.
prices = self.driver.find_elements(By.CSS_SELECTOR, ".list-card-price")
price_list = [price.text for price in prices]
if len(price_list) == 0:
prices = self.driver.find_elements(By.CSS_SELECTOR, ".StyledPropertyCardDataArea-c11n-8-69-2__sc-yipmu-0.kJFQQX")
price_list = [price.text for price in prices]
split_price_list = [price.split() for price in price_list]
final_price_list = [price[0].strip("C+/mo") for price in split_price_list]
self.prices.extend(final_price_list)
print(len(self.prices))
print(self.prices)
# todo: Create a list of addresses for all the listings you scraped.
addresses = self.driver.find_elements(By.CSS_SELECTOR, ".list-card-addr")
address_list = [address.text for address in addresses]
if len(address_list) == 0:
addresses = self.driver.find_elements(By.CSS_SELECTOR, ".StyledPropertyCardDataArea-c11n-8-69-2__sc-yipmu-0.dZxoFm.property-card-link address")
address_list = [address.text for address in addresses]
self.addresses.extend(address_list)
print(len(self.addresses))
print(self.addresses)
def next_page(self):
# Clicks the next arrow and waits 2 seconds for page to load
next_arrow = self.driver.find_element(By.XPATH, "//a[#title='Next page']")
next_arrow.click()
sleep(5)
def close_webpage(self):
self.driver.quit()
def enter_data(self, form_url, address, rent, link):
# Opens the Google Form and waits 3 seconds to load.
self.driver.get(form_url)
sleep(2)
# Enters each address, rent, and link into the form. Clicks submit after.
address_input = self.driver.find_element(By.XPATH, '//*[#id="mG61Hd"]/div[2]/div/div[2]/div[1]/div/div/div['
'2]/div/div[1]/div/div[1]/input')
address_input.send_keys(address)
rent_input = self.driver.find_element(By.XPATH, '//*[#id="mG61Hd"]/div[2]/div/div[2]/div[2]/div/div/div['
'2]/div/div[1]/div/div[1]/input')
rent_input.send_keys(rent)
link_input = self.driver.find_element(By.XPATH, '//*[#id="mG61Hd"]/div[2]/div/div[2]/div[3]/div/div/div['
'2]/div/div[1]/div/div[1]/input')
link_input.send_keys(link)
submit_btn = self.driver.find_element(By.XPATH, '//*[#id="mG61Hd"]/div[2]/div/div[3]/div[1]/div['
'1]/div/span/span')
submit_btn.click()
There is a less complex way to obtain the data you're looking for, using cloudscraper and pandas (and tqdm for convenience). You might also be in for a surprise, considering the time taken to get the data:
import cloudscraper
import pandas as pd
from tqdm import tqdm
scraper = cloudscraper.create_scraper()
df_list = []
for current_page in tqdm(range(1, 21)):
url = f'https://www.zillow.com/search/GetSearchPageState.htm?searchQueryState=%7B%22pagination%22%3A%7B%22currentPage%22%3A{current_page}%7D%2C%22mapBounds%22%3A%7B%22west%22%3A-79.44174913987678%2C%22east%22%3A-79.32347445115607%2C%22south%22%3A43.57772225826024%2C%22north%22%3A43.7254027835563%7D%2C%22mapZoom%22%3A13%2C%22regionSelection%22%3A%5B%7B%22regionId%22%3A792680%2C%22regionType%22%3A6%7D%5D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22isForSaleForeclosure%22%3A%7B%22value%22%3Afalse%7D%2C%22isAllHomes%22%3A%7B%22value%22%3Atrue%7D%2C%22sortSelection%22%3A%7B%22value%22%3A%22days%22%7D%2C%22isAuction%22%3A%7B%22value%22%3Afalse%7D%2C%22isNewConstruction%22%3A%7B%22value%22%3Afalse%7D%2C%22isForRent%22%3A%7B%22value%22%3Atrue%7D%2C%22isSingleFamily%22%3A%7B%22value%22%3Afalse%7D%2C%22isTownhouse%22%3A%7B%22value%22%3Afalse%7D%2C%22isForSaleByOwner%22%3A%7B%22value%22%3Afalse%7D%2C%22isComingSoon%22%3A%7B%22value%22%3Afalse%7D%2C%22isForSaleByAgent%22%3A%7B%22value%22%3Afalse%7D%7D%2C%22isListVisible%22%3Atrue%7D&wants=%7B%22cat1%22:[%22listResults%22,%22mapResults%22]%7D&requestId=6'
r = scraper.get(url)
for x in r.json()['cat1']['searchResults']['listResults']:
status = x['statusText']
address = x['address']
try:
price = x['units'][0]['price']
except Exception as e:
price = x['price']
if not 'https://www.' in x['detailUrl']:
url = 'https://zillow.com' + x['detailUrl']
else:
url = x['detailUrl']
df_list.append((address, price, url))
df = pd.DataFrame(df_list, columns = ['Address', 'Price', 'Url'])
df.to_csv('renting_in_toronto.csv')
print(df)
This will save the data in a csv file, and print out:
100%
20/20 [00:16<00:00, 1.19it/s]
Address Price Url
0 2221 Yonge St, Toronto, ON C$1,900+ https://zillow.com/b/Toronto-ON/43.70606,-79.3...
1 10 Yonge St, Toronto, ON C$2,100+ https://zillow.com/b/10-yonge-st-toronto-on-BM...
2 924 Avenue Rd, Toronto, ON M5P 2K6 C$1,895/mo https://www.zillow.com/homedetails/924-Avenue-...
3 797 Don Mills Rd, Toronto, ON C$1,850+ https://zillow.com/b/Toronto-ON/43.71951,-79.3...
4 15 Queens Quay E, Toronto, ON C$2,700+ https://zillow.com/b/Toronto-ON/43.64202,-79.3...
... ... ...
You can install the packages with pip install cloudscraper & pip install tqdm. The urls accessed are visible in Dev Tools, Network tab, and are providing JSON data which is loaded by Javascript into page.
anyone can help with scraping from https://www.whed.net/home.php
the code I'm using is giving me empty df. would love to have universities with websites and maybe field of study. My scraping skills are weak so if you can guide me through this would be great thanks guys.
begin=time.time()
countries=['Emirates','United States of America (all)']
result = [] # List to store all data
univ_links=[] # Links for all universities
fields = ['Street:','City:','Province:','Post Code:','WWW:','Fields of study:','Job title:']
webD = wb.Chrome(executable_path=r'C:\Users\Admin\OneDrive\Sagasit\chromedriver.exe') # To launch chrome and run script
# Trigger the target website
webD.get("https://www.whed.net/results_institutions.php")
webD.implicitly_wait(5)
#all_countries=[]
cntry_el = webD.find_elements_by_xpath('//*[#id="Chp1"]/option')
#cntry_grp = webD.find_elements_by_xpath('//*[#id="Chp1"]/optgroup')
grps=webD.find_elements_by_xpath('//*[#id="Chp1"]/optgroup/option[1]')
for c in cntry_el:countries.append(c.text)
for g in grps: countries.append(g.text)
for cntry in countries:
select = Select(webD.find_element_by_id('Chp1'))#select country dropdown
select.select_by_visible_text(cntry)#choosing country
Btn_GO = webD.find_element_by_xpath('//*[#id="fsearch"]/p/input')
Btn_GO.click()
select_rpp = Select(webD.find_element_by_name('nbr_ref_pge'))#select results per page drop down
select_rpp.select_by_visible_text('100')#choosing 100 results per page option
university_form = webD.find_element_by_xpath('//*[#id="contenu"]').find_element_by_id('results')
university_list = university_form.find_elements_by_xpath('//*[#id="results"]/li') # list of university elements
for univ in range(len(university_list)):
href = university_list[univ].find_element_by_class_name('details').find_elements_by_tag_name('a')[0].get_property('href') # University details link
univ_links.append(href)
while True:
try:
webD.find_element_by_partial_link_text('Next').click()
university_form = webD.find_element_by_xpath('//*[#id="contenu"]').find_element_by_id('results')
university_list = university_form.find_elements_by_xpath('//*[#id="results"]/li')
for univ in range(len(university_list)):
href = university_list[univ].find_element_by_class_name('details').find_elements_by_tag_name('a')[0].get_property('href') # University details link
univ_links.append(href)
except NoSuchElementException: break
for l in univ_links:
webD.get(l)
webD.implicitly_wait(2)
title=webD.find_element_by_xpath('//*[#id="page"]/div/div/div[2]/div[1]').text
title_detailed = webD.find_element_by_xpath('//*[#id="page"]/div/div/div[2]/div[2]').text
cntry_name=webD.find_element_by_xpath('//*[#id="contenu"]/p[2]').text
t1=webD.find_elements_by_class_name('dt')
t2=webD.find_elements_by_class_name('dd')
labels=webD.find_elements_by_class_name('libelle')
content=webD.find_elements_by_class_name('contenu')
temp={}
fos=''
fos1=''
temp.update({'Title': title,'Detailed Title':title_detailed,'Country':cntry_name})
for i in range(len(t1)):
if t1[i].text == '' or t1[i].text == 'Address':
continue
else:
value=t2[i].text
temp.update({t1[i].text:value.replace('\n',',')})
for j in range(len(content)):
if labels[j].text in fields:
if labels[j].text == 'Fields of study:':
info=content[j].text
fos=fos+','+info
elif labels[j].text == 'Job title:':
info1=content[j].text
fos1=fos1+','+info1
else:
key=labels[j].text
temp.update({key[:-1]: content[j].text})
temp.update({'Fields of study': fos.lstrip(','),'Job titles':fos1.lstrip(',')})
result.append(temp)
data=pd.DataFrame(result)
data
end=time.time()
print("Time taken : "+ str(end-begin) +"s")
data.to_csv("WHED1.csv",index=False)
this code what i could use taken from github project.
would be great if i can re-create the data and save it, want this to be used as a dropdown in a web application just to make sure no mistakes written in the university studied in.
Update 1/12/22 - Async
Found a much better solution using aiohttp, it also runs the entire list of countries in ~30 seconds instead of 3 hours
import json
import time
import aiohttp
import asyncio
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
def main():
print("Init")
driver = init_driver()
print("Opening Homepage")
url = "https://www.whed.net/results_institutions.php"
driver.get(url)
time.sleep(1)
print("Gathering Countries")
countries = get_countries(driver)
driver.quit()
print("Scraping")
start = time.time()
institution_list = asyncio.run(fetch_all(countries))
print("Writing out")
f = open('output.json', 'w')
f.write(json.dumps(institution_list))
f.close()
end = time.time()
print(f"Total time: {end - start}s")
def init_driver():
chrome_executable = Service(executable_path='chromedriver.exe', log_path='NUL')
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(service=chrome_executable, options=chrome_options)
return driver
def get_countries(driver):
select = Select(driver.find_element(By.ID, "Chp1"))
countries = list(map(lambda c: c.get_attribute('value'), select.options))
countries.pop(0)
return countries
def extract_institutions(html, country):
soup = BeautifulSoup(html, 'html.parser')
page = soup.find('p', {'class': 'infos'}).text
print(str(page))
number_of_institutions = str(page).split()[0]
if number_of_institutions == 'No':
print(f"No results for {country}")
return []
results = []
inst_index = 0
raw = soup.find_all('a', {'class': 'fancybox fancybox.iframe'})
for i in raw:
results.append({
'name': str(i.text).strip(),
'url': 'https://www.whed.net/' + str(i.attrs['href']).strip(),
'country': country
})
inst_index += 1
return {
'country': country,
'count': number_of_institutions,
'records': results
}
async def get_institutions(country, session):
try:
async with session.post(
url='https://www.whed.net/results_institutions.php',
data={"Chp1": country, "nbr_ref_pge": 10000}
) as response:
html = await response.read()
print(f"Successfully got {country}")
return extract_institutions(html, country)
except Exception as e:
print(f"Unable to get {country} due to {e.__class__}.")
async def fetch_all(countries):
async with aiohttp.ClientSession() as session:
return await asyncio.gather(*[get_institutions(country, session) for country in countries])
# Main call
main()
Old answer using synchronous algorithm
Improving on #Mithun's answer since it doesn't really work as it'll be stuck on the same page.
Also added direct access to the name and url to make it easier in case you want to access those.
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
print("Init")
chrome_executable = Service(executable_path='chromedriver.exe', log_path='NUL')
chrome_options = Options()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(service=chrome_executable, options=chrome_options)
print("Opening Homepage")
url = "https://www.whed.net/results_institutions.php"
driver.get(url)
time.sleep(1)
print("Selecting country")
select = Select(driver.find_element(By.ID, "Chp1"))
country = "Albania"
select.select_by_visible_text(country)
time.sleep(.5)
print("Searching")
driver.find_element(By.XPATH, "//input[#value='Go']").click()
time.sleep(1)
print("Parsing")
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
page = soup.find('p', {'class': 'infos'}).text
number_of_pages = str(page).split()[0]
counter = 10
results = []
while True:
raw = soup.find_all('a', {'class': 'fancybox fancybox.iframe'})
for i in raw:
results.append({
'name': str(i.text).strip(),
'url': 'https://www.whed.net/' + str(i.attrs['href']).strip(),
'country': country
})
print(f'{len(results)}/{number_of_pages}')
if counter >= int(number_of_pages):
break
counter += 10
driver.find_element(By.LINK_TEXT, "Next page").click()
time.sleep(0.5)
soup = BeautifulSoup(driver.page_source, 'html.parser')
driver.quit()
print(results)
You can use Selenium to scrape data. The following code will help you scrape the university names for "United States of America (all)". Similarly, you can scrape for other countries as well using Loop or entering the name manually. If you need the field of study for every university, you can scrape its href using bs4 and its field of study.
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.select import Select
driver = webdriver.Chrome(r"chromedriver.exe")
url = "https://www.whed.net/results_institutions.php"
driver.get(url)
time.sleep(1)
select = Select(driver.find_element(By.ID, "Chp1"))
select.select_by_visible_text("United States of America (all)")
time.sleep(1)
driver.find_element(By.XPATH, "//input[#value='Go']").click()
time.sleep(1)
html = driver.page_source
soup = BeautifulSoup(html, 'html.parser')
page = soup.find('p', {'class': 'infos'}).text
number_of_pages = str(page).split()[0]
counter = 10
while counter < int(number_of_pages):
raw = soup.find_all('div', {'class': 'details'})
for i in raw:
i = (str(i.text).lstrip())
i = i.replace("\n","")
i = i.replace("\r", "")
i = i.replace("\t", "")
print(i)
next_page = driver.find_element(By.LINK_TEXT, "Next page").click()
counter += 10
driver.quit()
I was able to scrape the first page of eBay sold items, so I attempted the pagination, here's what I have:
ebay_url = 'https://www.ebay.com/sch/i.html?_from=R40&_nkw=oakley+sunglasses&_sacat=0&Brand=Oakley&rt=nc&LH_Sold=1&LH_Complete=1&_ipg=200&_oaa=1&_fsrp=1&_dcat=79720'
# Load in html
html = requests.get(ebay_url)
# print(html.text)
driver = wd.Chrome(executable_path=r'/Users/mburley/Downloads/chromedriver')
driver.maximize_window() #Maximizes window
driver.implicitly_wait(30) # Gives an implicit wait for 30 seconds
driver.get(ebay_url)
wait = WebDriverWait(driver, 20) # Makes driver wait 20 seconds
sold_date = []
title = []
price = []
i = 1
## Loop here to get multiple pages
next_page = True
while next_page:
try:
# for item in all_items
for item in driver.find_elements(By.XPATH, "//div[contains(#class,'title--
tagblock')]/span[#class='POSITIVE']"):
try:
# Get Sale Date of item and update 'data'
sold_date.append(item.text)
except NoSuchElementException:
# Element not found
sold_date.append(None)
try:
# Get title of each item and update 'data'
title.append(driver.find_element_by_xpath(f".
(//div[contains(#class,'title--tagblock')]/span[#class='POSITIVE']/ancestor::div[contains(#class,'tag')]/following-sibling::a/h3)[{i}]").text)
except NoSuchElementException:
# Element not found
title.append(None)
try:
# Get price of each item and update 'data'
price.append(item.find_element_by_xpath(f"(//div[contains(#class,'title--tagblock')]/span[#class='POSITIVE']/ancestor::div[contains(#class,'tag')]/following-sibling::div[contains(#class,'details')]/descendant::span[#class='POSITIVE'])[{i}]").text)
except NoSuchElementException:
# Element not found
price.append(None)
i = i + 1
# Print results of scraped data on page
print(sold_date)
print(title)
print(price)
data = {
'Sold_date': sold_date,
'title': title,
'price': price
}
# Load Next Page by clicking button
button = driver.find_element_by_name('pagination__next icon-link')
button.click()
print("Clicked on Next Page!")
time.sleep(1)
except:
print("Done!")
next_page = False
df = pd.DataFrame.from_dict(data)
df.to_csv('out_two.csv', index = 0)
After I had the code to scrape page 1, I added:
... code ...
## Loop here to get multiple pages
next_page = True
while next_page:
try:
... code to scrape page 1 ...
# Load Next Page by clicking button
button = driver.find_element_by_name('pagination__next icon-link')
button.click()
print("Clicked on Next Page!")
time.sleep(1)
except:
print("Done!")
next_page = False
Which unfortunately edits the code to scrape the first item, then searches for the next page, and can't find the "button" so it exits and prints done. I don't know a lot about scraping, so I tried to follow an online example. Can anyone help? Thanks!
I am trying to make a scraping application to scrape Hants.gov.uk and right now I am working on it just clicking the pages instead of scraping. When it gets to the last row on page 1 it just stopped, so what I did was make it click button "Next Page" but first it has to go back to the original URL. It clicks page 2, but after page 2 is scraped it doesn't go to page 3, it just restarts page 2.
Can somebody help me fix this issue?
Code:
import time
import config # Don't worry about this. This is an external file to make a DB
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
url = "https://planning.hants.gov.uk/SearchResults.aspx?RecentDecisions=True"
driver = webdriver.Chrome(executable_path=r"C:\Users\Goten\Desktop\chromedriver.exe")
driver.get(url)
driver.find_element_by_id("mainContentPlaceHolder_btnAccept").click()
def start():
elements = driver.find_elements_by_css_selector(".searchResult a")
links = [link.get_attribute("href") for link in elements]
result = []
for link in links:
if link not in result:
result.append(link)
else:
driver.get(link)
goUrl = urllib.request.urlopen(link)
soup = BeautifulSoup(goUrl.read(), "html.parser")
#table = soup.find_element_by_id("table", {"class": "applicationDetails"})
for i in range(20):
pass # Don't worry about all this commented code, it isn't relevant right now
#table = soup.find_element_by_id("table", {"class": "applicationDetails"})
#print(table.text)
# div = soup.select("div.applicationDetails")
# getDiv = div[i].split(":")[1].get_text()
# log = open("log.txt", "a")
# log.write(getDiv + "\n")
#log.write("\n")
start()
driver.get(url)
for i in range(5):
driver.find_element_by_id("ctl00_mainContentPlaceHolder_lvResults_bottomPager_ctl02_NextButton").click()
url = driver.current_url
start()
driver.get(url)
driver.close()
try this:
import time
# import config # Don't worry about this. This is an external file to make a DB
import urllib.request
from bs4 import BeautifulSoup
from selenium import webdriver
url = "https://planning.hants.gov.uk/SearchResults.aspx?RecentDecisions=True"
driver = webdriver.Chrome()
driver.get(url)
driver.find_element_by_id("mainContentPlaceHolder_btnAccept").click()
result = []
def start():
elements = driver.find_elements_by_css_selector(".searchResult a")
links = [link.get_attribute("href") for link in elements]
result.extend(links)
def start2():
for link in result:
# if link not in result:
# result.append(link)
# else:
driver.get(link)
goUrl = urllib.request.urlopen(link)
soup = BeautifulSoup(goUrl.read(), "html.parser")
#table = soup.find_element_by_id("table", {"class": "applicationDetails"})
for i in range(20):
pass # Don't worry about all this commented code, it isn't relevant right now
#table = soup.find_element_by_id("table", {"class": "applicationDetails"})
#print(table.text)
# div = soup.select("div.applicationDetails")
# getDiv = div[i].split(":")[1].get_text()
# log = open("log.txt", "a")
# log.write(getDiv + "\n")
#log.write("\n")
while True:
start()
element = driver.find_element_by_class_name('rdpPageNext')
try:
check = element.get_attribute('onclick')
if check != "return false;":
element.click()
else:
break
except:
break
print(result)
start2()
driver.get(url)
As per the url https://planning.hants.gov.uk/SearchResults.aspx?RecentDecisions=True to click through all the pages you can use the following solution:
Code Block:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
driver = webdriver.Chrome(chrome_options=options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get('https://planning.hants.gov.uk/SearchResults.aspx?RecentDecisions=True')
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.ID, "mainContentPlaceHolder_btnAccept"))).click()
numLinks = len(WebDriverWait(driver, 20).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, "div#ctl00_mainContentPlaceHolder_lvResults_topPager div.rdpWrap.rdpNumPart>a"))))
print(numLinks)
for i in range(numLinks):
print("Perform your scrapping here on page {}".format(str(i+1)))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//div[#id='ctl00_mainContentPlaceHolder_lvResults_topPager']//div[#class='rdpWrap rdpNumPart']//a[#class='rdpCurrentPage']/span//following::span[1]"))).click()
driver.quit()
Console Output:
8
Perform your scrapping here on page 1
Perform your scrapping here on page 2
Perform your scrapping here on page 3
Perform your scrapping here on page 4
Perform your scrapping here on page 5
Perform your scrapping here on page 6
Perform your scrapping here on page 7
Perform your scrapping here on page 8
hi #Feitan Portor you have written the code absolutely perfect the only reason that you are redirected back to the first page is because you have given url = driver.current_url in the last for loop where it is the url that remains static and only the java script that instigates the next click event so just remove url = driver.current_url and driver.get(url)
and you are good to go i have tested my self
also to get the current page that your scraper is in just add this part in the for loop so you will get to know where your scraper is :
ss = driver.find_element_by_class_name('rdpCurrentPage').text
print(ss)
Hope this solves your confusion