Selenium python- skip an iteration if a web element is not present
Please I'm trying to fetch data from https://b2b.baidu.com/ after inputting a keyword in a search field on the website. I want to skip an iteration if an element is not present on the first page.
I know this can work seamlessly but I'm still a novice and can't figure out what I'm doing wrongly at the moment. Your help will be greatly appreciated.
Here is what I've done:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
import time
import pandas as pd
website = 'https://b2b.baidu.com/'
path = "C:/Users/ICT/chromedriver.exe"
driver = webdriver.Chrome(path)
driver.get(website)
driver.implicitly_wait(4)
wait = WebDriverWait(driver, 10)
driver.maximize_window()
# the search terms which contains location and keyword are from a dataframe in another file
from baidu_locations import location_key_row
from baidu_locations import location_data_col
from baidu_locations import key_data_col
for i in range(1, 6):
website = []
rep_name = []
contact = []
location = []
keyword = []
business_name = []
# Input location and keyword
enter_query = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[placeholder='我要采购…']")))
enter_query.clear()
enter_query.send_keys(location_key_row[i-1])
location_query = location_data_col[i-1]
location.append(location_query)
keyword_query = location_data_col[i-1]
keyword.append(keyword_query)
search_type = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "li[class='search-type']")))
search_type.click()
# If *```company_url```* element is not available, I want to go back to the next *```enter_query```* and continue the iteration.
try:
company_url = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div > div:nth-child(1) > div > div > div > div.title-container > span > span.name > a')))
website.append(company_url.get_property('href'))
first_location = wait.until(EC.element_to_be_clickable((By.XPATH, '(//span[#class="title link"])[1]')))
first_location.click()
driver.switch_to.window(driver.window_handles[1])
name = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div[class='shop-contact-warp shop-contact-vertical-warp'] div[class='top'] div span[class='show-name']")))
business_name.append(name.text)
#print(reps)
representative = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "div.shop-index-new-right> div > div.top > div:nth-child(1) > div > div.text > p.sub-text")))
rep_name.append(representative.text)
phone_option = wait.until(EC.element_to_be_clickable((By.XPATH, "//span[contains(text(),'查看电话')]")))
phone_option.click()
popup_contact = driver.window_handles[1]
driver.switch_to.window(popup_contact)
phone_number = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'p[class="phone"]')))
contact.append(phone_number.text)
#print(contact_no)
time.sleep(2)
return_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//span[contains(text(),'返回')]")))
return_button.click()
driver.close()
driver.switch_to.window(driver.window_handles[0])
except:
continue
df = pd.DataFrame({'Location': location, 'Keyword': keyword, 'Name': business_name, 'Representative': rep_name, 'Phone': contact, 'Link': website})
So if the company_url variable element is present on the first page, I want to click on it, go to the new tab and copy the data on that page and return to the first tab and repeat the process.
If the element variable company_url is not present, I want to skip that iteration and input the next search term enter_query from the specified range.
I want to fetch the data of enter_query where company_url element is present and save in a dataframe.
This code block seems to only fetch one row of data no matter the range I set.
Thank you for your help. Kindly let me know if my question is not clear or any questions you might have.
enter image description here
Well I guess you only want to loop in specific conditions. In that case, why not increment iterator only when satisfied your conditions?
Hope below could be a help
import time
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
website = "https://b2b.baidu.com/"
path = "C:/Users/ICT/chromedriver.exe"
driver = webdriver.Chrome(path)
driver.get(website)
driver.implicitly_wait(4)
wait = WebDriverWait(driver, 10)
driver.maximize_window()
# the search terms which contains location and keyword are from a dataframe in another file
from baidu_locations import key_data_col, location_data_col, location_key_row
# ------------- added -------------
i = index_from = 1
index_to = 6
# ---------------------------------
# ------------------ modified ------------------
while i < index_to:
# ----------------------------------------------
website = []
rep_name = []
contact = []
location = []
keyword = []
business_name = []
# Input location and keyword
enter_query = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "input[placeholder='我要采购…']"))
)
enter_query.clear()
enter_query.send_keys(location_key_row[i - 1])
location_query = location_data_col[i - 1]
location.append(location_query)
keyword_query = location_data_col[i - 1]
keyword.append(keyword_query)
search_type = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, "li[class='search-type']"))
)
search_type.click()
# ------------------ modified ------------------
try:
company_url = wait.until(
EC.element_to_be_clickable(
(
By.CSS_SELECTOR,
"div > div:nth-child(1) > div > div > div > div.title-container > span > span.name > a",
)
)
)
except:
continue
try:
# ----------------------------------------------
website.append(company_url.get_property("href"))
first_location = wait.until(
EC.element_to_be_clickable((By.XPATH, '(//span[#class="title link"])[1]'))
)
first_location.click()
driver.switch_to.window(driver.window_handles[1])
name = wait.until(
EC.element_to_be_clickable(
(
By.CSS_SELECTOR,
"div[class='shop-contact-warp shop-contact-vertical-warp'] div[class='top'] div span[class='show-name']",
)
)
)
business_name.append(name.text)
# print(reps)
representative = wait.until(
EC.element_to_be_clickable(
(
By.CSS_SELECTOR,
"div.shop-index-new-right> div > div.top > div:nth-child(1) > div > div.text > p.sub-text",
)
)
)
rep_name.append(representative.text)
phone_option = wait.until(
EC.element_to_be_clickable((By.XPATH, "//span[contains(text(),'查看电话')]"))
)
phone_option.click()
popup_contact = driver.window_handles[1]
driver.switch_to.window(popup_contact)
phone_number = wait.until(
EC.element_to_be_clickable((By.CSS_SELECTOR, 'p[class="phone"]'))
)
contact.append(phone_number.text)
# print(contact_no)
time.sleep(2)
return_button = wait.until(
EC.element_to_be_clickable((By.XPATH, "//span[contains(text(),'返回')]"))
)
return_button.click()
driver.close()
driver.switch_to.window(driver.window_handles[0])
# ------------- added -------------
# No problem here
i += 1
# ---------------------------------
except:
continue
df = pd.DataFrame(
{
"Location": location,
"Keyword": keyword,
"Name": business_name,
"Representative": rep_name,
"Phone": contact,
"Link": website,
}
)
Related
I am trying to scrape the to paginate and scrape each table details from this site.
https://www.cyprusbar.org/CypriotAdvocateMembersPage.aspx
Screenshot
I need to click each details box, get directed to a new window and do it for the other records in each page. Then paginate. Here is my selenium code
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
PATH = 'chromedriver.exe'
options = Options()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--lang=en")
driver = webdriver.Chrome(executable_path=PATH, options=options)
driver.maximize_window()
driver.get('https://www.cyprusbar.org/CypriotAdvocateMembersPage.aspx')
driver.find_element_by_xpath('//*[#id="Div1"]/input').click()
def wait(locator, id):
element = WebDriverWait(driver, 50).until(
EC.presence_of_all_elements_located((locator, id))
)
return element
DATA = []
name = '//*[#id="ctl00_ContentPlaceHolder1_TxtName_I"]'
postal = '//*[#id="ctl00_ContentPlaceHolder1_TxtPostalCode_I"]'
fax = '//*[#id="ctl00_ContentPlaceHolder1_TxtFax_I"]'
province = '//*[#id="ctl00_ContentPlaceHolder1_TxtDistrict_I"]'
email = '//*[#id="ctl00_ContentPlaceHolder1_TxtEmail_I"]'
address = '//*[#id="ctl00_ContentPlaceHolder1_TxtAddress_I"]'
phone = '//*[#id="ctl00_ContentPlaceHolder1_TxtPhone_I"]'
courtroom = '//*[#id="ctl00_ContentPlaceHolder1_TxtCourtBox_I"]'
webpage = '//*[#id="ctl00_ContentPlaceHolder1_TxtUrl_I"]'
details = ['Postal Code', 'Fax', 'Calendar Province', 'Email', 'Address', 'Phone', 'Courtroom', 'Webpage']
def gotopage(page):
for p in range(page-1):
next_page = driver.find_element_by_class_name('dxWeb_pNext_Material')
action = ActionChains(driver)
action.click(next_page)
action.perform()
time.sleep(4)
def each_page(page, new):
global DATA
curr = 0
while curr < 80:
if page > 1 and new:
gotopage(page)
action = ActionChains(driver)
action.move_to_element(driver.find_element_by_xpath('//*[#id="ctl00_ContentPlaceHolder1_LawyersGrid_DXPagerBottom_PSI"]')).click()
action.perform()
action.send_keys(Keys.ARROW_UP, Keys.RETURN)
action.perform()
time.sleep(17)
data = {}
action = ActionChains(driver)
detail_list = wait(By.CLASS_NAME, 'dxb-hbc')
try:
action.click(detail_list[curr])
action.perform()
except IndexError:
print(curr)
driver.back()
gotopage(page)
data['Name'] = wait(By.XPATH, name)[0].get_attribute('value')
for i, d in enumerate([postal, fax, province, email, address, phone, courtroom, webpage]):
info = driver.find_element_by_xpath(d).get_attribute(('value'))
data[details[i]] = info
DATA.append(data)
curr += 1
driver.back()
print('============SCRAPING===============')
page = 1
new=True
while page <= 50:
try:
each_page(page, new)
page += 1
except Exception as err:
print(err)
print(page)
The problem here is that this is incredibly slow because each time you say
driver.back()
It goes back to page 1 and I would need to go back to the current page it would need to go back to the page it was in.
Is there anyway i can achieve this with something like BeautifulSoup?
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
Path = "C:\Program Files (x86)\chromedriver.exe"
driver = webdriver.Chrome(Path)
driver.get("https://www.emag.ro/")
search_bar = driver.find_element_by_id("searchboxTrigger")
search_bar.send_keys("laptopuri")
search_bar.send_keys(Keys.RETURN)
main = None
try:
main = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "main-container"))
)
print("Page loaded,main retrived succesfully")
except:
driver.quit()
items = main.find_element_by_id("card_grid")
products = items.find_elements_by_css_selector("div.card-item.js-product-data")
count = 0
for product in products:
raw_name = WebDriverWait(product, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "h2.card-body.product-title-zone"))
).text
raw_price = WebDriverWait(product, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "product-new-price"))
)
#Parsing the product name
raw_name = raw_name.replace("Laptop", "")
raw_name = raw_name.strip()
if raw_name.startswith("Apple"):
sEnd = raw_name.find(",")
else:
sEnd = raw_name.find("cu") - 1
product_name = raw_name[:sEnd]
#Parsing the product price
raw_price = raw_price.text[:raw_price.text.find(" ")]
print(raw_price)
count += 1
print(f"{count} results returned")
driver.quit()
Code works perfectly fine sometimes,but sometimes i get the error:
Please note i am new at this,so an explanation would be very appreciated.I just learned how to use selenium and the reason i transitioned from beautifulsoup because of the lack of wait possibility,and now when trying to use that,i get this error SOMETIMES
See this :
driver = webdriver.Chrome(Path)
and how have you used it here :
try:
main = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "main-container"))
)
print("Page loaded,main retrived succesfully")
If you pay attention you would see that, you are using WebDriverWait(driver, 10) and passing driver reference.
But here
raw_name = WebDriverWait(product, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "h2.card-body.product-title-zone"))
).text
you are passing product in WebDriverWait, which is wrong, you should pass driver reference here. like
raw_name = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "h2.card-body.product-title-zone"))
).text
This should help you past this issue for sure.
also, make changes here
raw_price = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "product-new-price"))
)
This is what we have internally :
class WebDriverWait(object):
def __init__(self, driver, timeout, poll_frequency=POLL_FREQUENCY, ignored_exceptions=None):
"""Constructor, takes a WebDriver instance and timeout in seconds.
Update 1 :
driver = webdriver.Chrome(driver_path)
driver.maximize_window()
driver.implicitly_wait(50)
driver.get("https://www.emag.ro/")
wait = WebDriverWait(driver, 10)
wait.until(EC.element_to_be_clickable((By.XPATH, "//i[contains(#class,'close')]/parent::button[#class='close']"))).click()
ActionChains(driver).move_to_element(wait.until(EC.visibility_of_element_located((By.XPATH, "//button[contains(#class,'js-accept')]")))).click().perform()
search_bar = driver.find_element_by_id("searchboxTrigger")
search_bar.send_keys("laptopuri")
search_bar.send_keys(Keys.RETURN)
main = None
try:
main = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, "main-container")))
print("Page loaded,main retrived succesfully")
except:
driver.quit()
items = main.find_element_by_id("card_grid")
products = driver.find_elements_by_css_selector("div.card-item.js-product-data")
count = 0
for product in products:
raw_name = product.find_element_by_css_selector("h2.card-body.product-title-zone a").text
print(raw_name)
raw_price = product.find_element_by_css_selector("p.product-new-price").text
print(raw_price)
#Parsing the product name
# raw_name = raw_name.replace("Laptop", "").strip()
# if raw_name.startswith("Apple"):
# sEnd = raw_name.find(",")
# else:
# sEnd = raw_name.find("cu") - 1
# product_name = raw_name[:sEnd]
#Parsing the product price
# raw_price = raw_price[raw_price.find(" ")]
# print(raw_price)
# count += 1
#print(f"{count} results returned")
driver.quit()
Trying to get the tyres' details from this page. https://eurawheels.com/fr/catalogue/BBS
links = driver.find_elements_by_xpath('//div[#class="col-xs-1 col-md-3"]//a')
parent_window = driver.current_window_handle
x = 0
for j in range(len(links)):
driver.execute_script('window.open(arguments[0]);', links[j])
#scraping here
if x == 0:
driver.close()
driver.switch_to.window(parent_window)
x += 1
else:
driver.back()
driver.refresh() #refresh page
tyres = WebDriverWait(driver, 25).until(EC.visibility_of_all_elements_located((By.XPATH, '//div[#class="card-body text-center"]//a'))) #redefine links
time.sleep(4)
It works for 10 links but then the links go stale. Cannot figure out what needs to be changed. Any help is welcome.
You need to add scroll element into the view before executing driver.execute_script('window.open(arguments[0]);', links[j]) since not all the elements are initially loaded on the page.
So your code should look like following:
from selenium.webdriver.common.action_chains import ActionChains
actions = ActionChains(driver)
links = driver.find_elements_by_xpath('//div[#class="col-xs-1 col-md-3"]//a')
parent_window = driver.current_window_handle
x = 0
for j in range(len(links)):
actions.move_to_element(j).perform()
driver.execute_script('window.open(arguments[0]);', links[j])
#scraping here
if x == 0:
driver.close()
driver.switch_to.window(parent_window)
x += 1
else:
driver.back()
driver.refresh() #refresh page
tyres = WebDriverWait(driver, 25).until(EC.visibility_of_all_elements_located((By.XPATH, '//div[#class="card-body text-center"]//a'))) #redefine links
time.sleep(4)
Try this:
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
link = 'https://eurawheels.com/fr/catalogue/BBS'
with webdriver.Chrome() as driver:
wait = WebDriverWait(driver,15)
driver.get(link)
linklist = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR,".card-body > a")))
for i,elem in enumerate(linklist):
linklist[i].click()
wait.until(EC.invisibility_of_element_located((By.CSS_SELECTOR,".spinner-border[role='status']")))
time.sleep(2) #if you kick out this delay, your script will run very fast but you may end up getting same results multiple times.
item = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"h3"))).text
print(item)
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR,"h1.modal-title + button[class='close'][data-dismiss='modal']"))).click()
driver.back()
import csv
import time
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from csv import reader
from selenium.webdriver.common.action_chains import ActionChains
from selenium.common.exceptions import NoSuchElementException
chrome_options = Options()
scroll = 5
chrome_options.add_experimental_option("useAutomationExtension", False)
chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
header_added = False
header_added1 = False
url = "url"
driver = webdriver.Chrome(executable_path='C:/chromedriver.exe', options=chrome_options)
driver.maximize_window()
driver.get(url)
time.sleep(3)
search_city = input("Enter the city :")
res_n = input("Enter the Restaurant's name :")
search = driver.find_element_by_xpath('//input[#name="location"]').send_keys(search_city)
time.sleep(2)
driver.find_element_by_xpath('//*[#id="root"]/div[1]/div[1]/div/div[1]/div[1]/div/div[2]/div/div[3]/div[1]/span[2]').click()
time.sleep(3)
driver.find_element_by_xpath('/html/body/div[1]/div[1]/header/div/div/ul/li[5]/div/a/span[1]').click()
time.sleep(1)
search_res = driver.find_element_by_class_name('_2BJMh').send_keys(res_n.lower())
time.sleep(5)
driver.find_element_by_class_name('_2BJMh').send_keys(Keys.RETURN)
time.sleep(5)
try:
driver.find_element_by_class_name('_3FR5S').click()
time.sleep(5)
except:
print("restaurant not open")
driver.quit()
html = driver.find_element_by_tag_name('html')
def get_items():
global header_added
global item_dvs
cats = driver.find_elements_by_class_name('D_TFT')
cats[1].click()
time.sleep(3)
item_dvs = driver.find_elements_by_class_name('_2wg_t')
for div in item_dvs:
name = div.find_element_by_class_name('styles_itemNameText__3bcKX')
print(name.text)
price = div.find_element_by_class_name('rupee')
print(price.text)
if div.find_elements_by_class_name('styles_itemDesc__MTsVd'):
desc = div.find_element_by_class_name('styles_itemDesc__MTsVd').text
else:
desc = None
if div.find_element_by_css_selector('div._1C1Fl._23qjy'):
element = div.find_element_by_css_selector('div._1C1Fl._23qjy')
print("found")
driver.execute_script("arguments[0].scrollIntoView();", element)
add = div.find_element_by_css_selector('._1RPOp')
driver.execute_script("arguments[0].click();", add)
time.sleep(1)
add_ons = driver.find_element_by_class_name('_3UzO2').text
print(add_ons)
driver.find_element_by_css_selector('#modal-placeholder > div:nth-child(3) > div > div._1Kr-y._3EeZR > div > div._1EZLh > div > button').click()
else:
add_ons = None
dict1 = {'Item Name': name.text, "Price": price.text, "Add Ons :": add_ons, "Description": desc}
with open(f'{search_city}_{res_n}.csv', 'a+', encoding='utf-8-sig') as f:
w = csv.DictWriter(f, dict1.keys())
if not header_added:
w.writeheader()
header_added = True
w.writerow(dict1)
get_items()
The is_cust loop keeps running over and over again opening the same element, while the rest of the code moves on to the next divs. What is wrong here?
xPath are bidirectional and is probably the cause here.
Try this code using cssSelector:
for div in item_dvs:
#Do Something
try:
is_cust = div.find_element_by_css_selector('._1C1Fl._23qjy')
print("found")
except NoSuchElementException:
continue
driver.execute_script("arguments[0].scrollIntoView();", is_cust)
add = div.find_element_by_css_selector('._1RPOp')
driver.execute_script("arguments[0].click();", add)
time.sleep(1)
# Not sure why for this one you had driver instead of div. Suspect div should be
add_ons = div.find_element_by_class_name('_26cJ9').text
div.find_element_by_css_selector('#modal-placeholder > div:nth-child(3) > div > div._1Kr-y._3EeZR > div > div._1EZLh > div > button').click()
UPDATE
From your updated code, you are using lot of hardcoded sleep. I will suggest to use the WebDriverWait with expected_conditions.
More info here: Wait from Selenium
Imports needed:
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
Code to be added post driver creation:
wait_time = 5
wait = WebDriverWait(driver, wait_time)
Instead of using sleep like this:
time.sleep(5)
driver.find_element_by_class_name('_2BJMh').send_keys(Keys.RETURN)
time.sleep(5)
Use:
wait.until(EC.presence_of_element_located((By.CLASS_NAME, '_2BJMh'))).send_keys(res_n.lower())
Don't gather the element twice.. use find_elements_by* then validate the length:
descs = wait.until(EC.presence_of_all_elements_located((By.CLASS_NAME, 'styles_itemDesc__MTsVd')))
if len(descs) > 0:
desc = descs[0].text
else:
desc = None
I'm scraping an E-Commerce website, Lazada using Selenium and bs4, I manage to scrape on the 1st page but I unable to iterate to the next page. What I'm tyring to achieve is to scrape the whole pages based on the categories I've selected.
Here what I've tried :
# Run the argument with incognito
option = webdriver.ChromeOptions()
option.add_argument(' — incognito')
driver = webdriver.Chrome(executable_path='chromedriver', chrome_options=option)
driver.get('https://www.lazada.com.my/')
driver.maximize_window()
# Select category item #
element = driver.find_elements_by_class_name('card-categories-li-content')[0]
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
t = 10
try:
WebDriverWait(driver,t).until(EC.visibility_of_element_located((By.ID,"a2o4k.searchlistcategory.0.i0.460b6883jV3Y0q")))
except TimeoutException:
print('Page Refresh!')
driver.refresh()
element = driver.find_elements_by_class_name('card-categories-li-content')[0]
webdriver.ActionChains(driver).move_to_element(element).click(element).perform()
print('Page Load!')
#Soup and select element
def getData(np):
soup = bs(driver.page_source, "lxml")
product_containers = soup.findAll("div", class_='c2prKC')
for p in product_containers:
title = (p.find(class_='c16H9d').text)#title
selling_price = (p.find(class_='c13VH6').text)#selling price
try:
original_price=(p.find("del", class_='c13VH6').text)#original price
except:
original_price = "-1"
if p.find("i", class_='ic-dynamic-badge ic-dynamic-badge-freeShipping ic-dynamic-group-2'):
freeShipping = 1
else:
freeShipping = 0
try:
discount = (p.find("span", class_='c1hkC1').text)
except:
discount ="-1"
if p.find(("div", {'class':['c16H9d']})):
url = "https:"+(p.find("a").get("href"))
else:
url = "-1"
nextpage_elements = driver.find_elements_by_class_name('ant-pagination-next')[0]
np=webdriver.ActionChains(driver).move_to_element(nextpage_elements).click(nextpage_elements).perform()
print("- -"*30)
toSave = [title,selling_price,original_price,freeShipping,discount,url]
print(toSave)
writerows(toSave,filename)
getData(np)
The problem might be that the driver is trying to click the button before the element is even loaded correctly.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Chrome(PATH, chrome_options=option)
# use this code after driver initialization
# this is make the driver wait 5 seconds for the page to load.
driver.implicitly_wait(5)
url = "https://www.lazada.com.ph/catalog/?q=phone&_keyori=ss&from=input&spm=a2o4l.home.search.go.239e359dTYxZXo"
driver.get(url)
next_page_path = "//ul[#class='ant-pagination ']//li[#class=' ant-pagination-next']"
# the following code will wait 5 seconds for
# element to become clickable
# and then try clicking the element.
try:
next_page = WebDriverWait(driver, 5).until(
EC.element_to_be_clickable((By.XPATH, next_page_path)))
next_page.click()
except Exception as e:
print(e)
EDIT 1
Changed the code to make the driver wait for the element to become clickable. You can add this code inside a while loop for iterating multiple times and break the loop if the button is not found and is not clickable.