The web url is
CCB bank login page
driver.find_element_by_xpath("//span[#class='data_money']")
but I can't find any element.
I've try switch to frame before find element by
driver.switch_to_frame
but still can't find any element, Any one can help me to resolve this issue?
My code as blew
driver = webdriver.Ie()
driver.maximize_window()
driver.implicitly_wait(8)
driver.get(CCB)
time.sleep(2)
driver.switch_to_frame
driver.switch_to_frame(0)
driver.find_element_by_name("USERID").send_keys(user_id)
driver.find_element_by_name("LOGPASS").send_keys(password)
driver.find_element_by_id("loginButton").click()
time.sleep(5)
dm_ret = dm.FindPic(0,0,2000,2000,"d:\Test_Code\Talk_later.bmp","303030",0.9,0,intX,intY)
if dm_ret[1] > 0 and dm_ret[2] > 0 :
print("PIC found")
dm.moveto(dm_ret[1]+24, dm_ret[2]+12)
dm.leftclick()
else :
print("PIC not found")
time.sleep(1)
driver.find_element_by_xpath('//span[#class='data_money']")
Related
from selenium.webdriver import Chrome
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import chromedriver_autoinstaller
chromedriver_autoinstaller.install()
TYPES = ['user', 'verified_audience', 'top_critics']
TYPE = TYPES[2]
URL = 'https://www.rottentomatoes.com/m/dunkirk_2017/reviews'
PAGES = 2
driver = Chrome()
driver.get(URL)
data_reviews = []
while PAGES != 0:
wait = WebDriverWait(driver, 30)
reviews = wait.until(lambda _driver: _driver.find_elements(
By.CSS_SELECTOR, '.review_table_row'))
# Extracting review data
for review in reviews:
if TYPE == 'top_critics':
critic_name_el = review.find_element(
By.CSS_SELECTOR, '[data-qa=review-critic-link]')
critic_review_text_el = review.find_element(
By.CSS_SELECTOR, '[data-qa=review-text]')
data_reviews.append(critic_name_el.text)
try:
next_button_el = driver.find_element(
By.CSS_SELECTOR, '[data-qa=next-btn]:not([disabled=disabled])'
)
if not next_button_el:
PAGES = 0
next_button_el.click() # refresh new reviews
PAGES -= 1
except Exception as e:
driver.quit()
Here, a rotten tomatoes review page is being opened and the reviews are being scraped, but when the next button is clicked and the new reviews are going to be scraped, this error pops up... I am guessing that the new reviews have not been loaded and trying to access them is causing the problem, I tried driver.implicitly_wait but that doesn't work too.
The error originates from line 33, data_reviews.append(critic_name_el.text)
By clicking a next page button next_button_el the new page is being loaded but this process takes some time while your Selenium code continues instantly after that click so probably on this line reviews = wait.until(lambda _driver: _driver.find_elements(By.CSS_SELECTOR, '.review_table_row')) it collects the elements on the old page but then the page is being refreshed so some of these elements critic_name_el collected after that (still on the old page) is no more there since the old page is refreshed.
To make your code working you need to introduce a short delay after clicking the next page button, as following:
data_reviews = []
while PAGES != 0:
wait = WebDriverWait(driver, 30)
reviews = wait.until(lambda _driver: _driver.find_elements(
By.CSS_SELECTOR, '.review_table_row'))
# Extracting review data
for review in reviews:
if TYPE == 'top_critics':
critic_name_el = review.find_element(
By.CSS_SELECTOR, '[data-qa=review-critic-link]')
critic_review_text_el = review.find_element(
By.CSS_SELECTOR, '[data-qa=review-text]')
data_reviews.append(critic_name_el.text)
try:
next_button_el = driver.find_element(
By.CSS_SELECTOR, '[data-qa=next-btn]:not([disabled=disabled])'
)
if not next_button_el:
PAGES = 0
next_button_el.click() # refresh new reviews
PAGES -= 1
time.sleep(2)
except Exception as e:
driver.quit()
Also I'd suggest to wait for elements visibility, not just presence here:
reviews = wait.until(lambda _driver: _driver.find_elements(By.CSS_SELECTOR, '.review_table_row'))
Also you need to understand that driver.implicitly_wait do not introduce any actual pause. This just sets the timeout for find_element and find_elements methods.
I'm fairly new with Selenium and I've been running a couple of very small web scraping projects.
When I try to click on this element through the .click() function I keep getting "Element not interactable"
The html section I'm trying to interact is this:
<a class="hawk-iconBefore hawk-styleCheckbox hawk-styleList" data-options="{"name":"finish","value":"Foil"}" href="https://starcitygames.com/search/?card_name=Glimmervoid&finish=Foil" rel="nofollow"><span class="hawk-selectionInner">Foil <span class="hawk-facetCount">(5)</span></span></a>
And my python code looks like this:
from selenium import webdriver
from selenium.webdriver.common.by import By
url = 'https://starcitygames.com/'
card_name = 'Fatal Push'
expansion_name = 'Double Masters'
foil = True
card_price = 0
browser_options = webdriver.ChromeOptions()
browser_options.add_argument("headless")
browser = webdriver.Chrome(options=browser_options)
browser.get(url)
browser.implicitly_wait(0.2)
browser.maximize_window()
print(card_name)
def get_card_price():
global card_price
print("Finding card...")
browser.find_element(By.CSS_SELECTOR, "[name='search_query']").send_keys(card_name)
search_button = browser.find_element(By.CLASS_NAME, "search-submit")
search_button.click()
if foil:
print("Checking if Foil...")
foil_select = browser.find_element(By.XPATH, "/html/body/div/div[1]/main/aside/div[2]/div[2]/div/div[5]/div/ul/li[1]/a")
try:
foil_select.click()
print("It's Foil")
except:
print("Element not interactable")
cards = browser.find_elements(By.CLASS_NAME,"hawk-results-item")
for card in cards:
c = card.text
price = card.find_element(By.CSS_SELECTOR, "div[class='hawk-results-item__options-table-cell hawk-results-item__options-table-cell--price childAttributes']")
if expansion_name in c:
card_price = price.text
return card_price
get_card_price()
print("Fetching card price...")
print(card_price)
browser.quit()
All other part send the info I need but when I check it the condition foil is true it jumps to the exception due to the element not being interactable.
I have tried accesing it with css_selector, and with the regular xpath, I saw another answer in which they suggested using the full XPATH and that it fixed the issue but it didn't work.
What could I do?
So I figured out how to fetch the href for the element I wanted and it was as simple as just getting that and then telling my code to go to that page and execute the rest of the code:
That's how it looks now:
if foil:
print("Checking if Foil...")
try:
foil_select=browser.find_element(By.XPATH, '//*[#id="hawkfacet_finish"]/li[1]/a')
link = foil_select.get_attribute("href")
print("It's Foil")
browser.get(link)
except:
print("Element not interactable")
else:
foil_select=browser.find_element(By.XPATH, '//*[#id="hawkfacet_finish"]/li[2]/a')
link = foil_select.get_attribute("href")
print("It's not foil")
browser.get(link)
Now to move on with the next step. Thanks everyone!
This
browser_options.add_argument("headless")
should be
browser_options.add_argument("--headless")
You need to scroll to each cards first before grabbing the price.
Below is the sample code:
driver.maximize_window()
wait = WebDriverWait(driver, 20)
url = 'https://starcitygames.com/'
card_name = 'Fatal Push'
expansion_name = 'Double Masters'
foil = True
card_price = 0
#browser_options = webdriver.ChromeOptions()
#browser_options.add_argument("headless")
#browser = webdriver.Chrome(options=browser_options)
driver.get(url)
driver.implicitly_wait(0.2)
driver.maximize_window()
print(card_name)
def get_card_price():
global card_price
print("Finding card...")
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "input[name='search_query']"))).send_keys(card_name)
search_button = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "search-submit")))
search_button.click()
if foil:
print("Checking if Foil...")
foil_select = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "ul#hawkfacet_rarity li a[data-options*='Rare']")))
try:
foil_select.click()
print("It's Foil")
except:
print("Element not interactable")
time.sleep(5)
cards = wait.until(EC.presence_of_all_elements_located((By.XPATH, "//div[#class='hawk-results-item']")))
for card in cards:
driver.execute_script("arguments[0].scrollIntoView(true);", card)
c = card.get_attribute('innerText')
print(c)
price = card.find_element(By.XPATH, ".//descendant::div[contains(#class, 'price childAttributes')]")
print(price.text)
if expansion_name in c:
card_price = price.text
return card_price
get_card_price()
print("Fetching card price...")
print(card_price)
Output:
Fatal Push
Finding card...
Checking if Foil...
It's Foil
Fatal Push (Borderless)
Double Masters - Variants
Near Mint -
English
$14.99
QTY: 0
NOTIFY ME
$14.99
Fatal Push (Borderless)
Double Masters - Variants (Foil)
Near Mint -
English
$14.99
QTY: 3
Add to cart
$14.99
Fetching card price...
$14.99
Process finished with exit code 0
This is the link
https://www.unibet.eu/betting/sports/filter/football/matches
Using selenium driver, I access this link. This is what we have on the page
The actual task for me is to click on each of the match link. I found all those matches by
elems = driver.find_elements_by_class_name('eb700')
When i did this
for elem in elems:
elements
elem.click()
time.sleep(2)
driver.execute_script("window.history.go(-1)")
time.sleep(2)
The first time it clicked, loaded new page, went to previous page and then gave the following error
StaleElementReferenceException: Message: stale element reference: element is not attached to the page document
I also tried getting HREF attribute from the elem, but it gave None, Is it possible to open the page in a new tab instead of clicking the elem?
You can retry to click on element once again since it is no longer present in DOM.
Code :
driver = webdriver.Chrome("C:\\Users\\**\\Inc\\Desktop\\Selenium+Python\\chromedriver.exe")
driver.maximize_window()
wait = WebDriverWait(driver, 30)
driver.get("https://www.unibet.eu/betting/sports/filter/football/matches")
wait.until(EC.element_to_be_clickable((By.PARTIAL_LINK_TEXT, "OK"))).click()
sleep(2)
elements = driver.find_elements(By.XPATH, "//div[contains(#class,'_')]/div[#data-test-name='accordionLevel1']")
element_len = len(elements)
print(element_len)
counter = 0
while counter < element_len:
attempts = 0
while attempts < 2:
try:
ActionChains(driver).move_to_element(elements[counter]).click().perform()
except:
pass
attempts = attempts + 1
sleep(2)
# driver.execute_script("window.history.go(-1)") #may be get team name
#using //div[#data-test-name='teamName'] xpath
sleep(2)
# driver.refresh()
sleep(2)
counter = counter + 1
Since you move to next page, the elements no longer exists in DOM. So, you will get Stale Element exception.
What you can do is when comming back to same page, get all the links again (elems) and use while loop instead of for loop.
elems = driver.find_elements_by_class_name('eb700')
i=0
while i<len(elems):
elems[i].click()
time.sleep(2)
driver.execute_script("window.history.go(-1)")
time.sleep(2)
elems = driver.find_elements_by_class_name('eb700')
i++
Other solution is to remain on same page and save all href attributes in a list and then use driver.navigate to open each match link.
matchLinks=[]
elems = driver.find_elements_by_class_name('eb700')
for elem in elems:
matchLinks.append(elem.get_attribute('href')
for match in matchLinks:
driver.get(match)
#do whatever you want to do on match page.
I'm writing a script to scrape product names from a website, filtered by brands. Some search results may contain more than one page, and this is where the problem comes in. I'm able to scrape the first page but when the script clicks on the next page the error message selenium.common.exceptions.StaleElementReferenceException: Message: stale element reference: element is not attached to the page document shows. Below is my code:
def scrape():
resultList = []
currentPage = 1
while currentPage <= 2:
titleResults = WebDriverWait(driver, 10).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, 'h4.mt-0')))
resultList.append(titleResults)
checkNextPage = WebDriverWait(driver, 30).until(EC.visibility_of_all_elements_located((By.XPATH, "//div/nav/ul/li/a[#aria-label='Next']")))
for cnp in checkNextPage:
nextPageNumber = int(cnp.get_attribute("data-page"))
currentPage += 1
driver.find_element_by_xpath("//div/nav/ul/li/a[#aria-label='Next']").click()
for result in resultList[0]:
print("Result: {}".format(result.text))
I think the error got triggered when .click() was called. I've done a lot of searching on the internet before resorting to posting this question here because either I don't understand the solutions from other articles/posts or they don't apply to my case.
Stale Element means an old element or no longer available element.
I think the error is caused by last line.
You should extract elements text before the elements become unavailable.
def scrape():
resultList = []
currentPage = 1
while currentPage <= 2:
titleResults = WebDriverWait(driver,
10).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, 'h4.mt-0')))
// Extract elements text
results_text = [titleResults[i].text for i in range(0, len(titleResults))]
resultList.extend(results_text)
checkNextPage = WebDriverWait(driver, 30).until(EC.visibility_of_all_elements_located((By.XPATH, "//div/nav/ul/li/a[#aria-label='Next']")))
for cnp in checkNextPage:
nextPageNumber = int(cnp.get_attribute("data-page"))
currentPage += 1
driver.find_element_by_xpath("//div/nav/ul/li/a[#aria-label='Next']").click()
print("Result: {}".format(resultList))
I want to do a google search and collect the links to all hits so that I can click those links and extract data from them after collecting all links. How can I get the link from every hit?
I've tried several solutions like using a for loop and a while True statement. I'll show some examples of the code below. I either get no data at all or I get only data (links) from 1 webpage. Can someone please help me figure out how to iterate over every page of the google search and get all the links so I can continue scraping those pages? I'm new to using Selenium so I'm sorry if the code doesn't make much sense, I've really confused myself with this one.
driver.get('https://www.google.com')
search = driver.find_element_by_name('q')
search.send_keys('condition')
sleep(0.5)
search.send_keys(Keys.RETURN)
sleep(0.5)
while True:
try:
urls = driver.find_elements_by_class_name('iUh30')
for url in urls
urls = [url.text for url in urls]
sleep(0.5)
element = driver.find_element_by_id('pnnext')
driver.execute_script("return arguments[0].scrollIntoView();", element)
sleep(0.5)
element.click()
urls = driver.find_elements_by_class_name('iUh30')
urls = [url.text for url in urls]
sleep(0.5)
element = driver.find_element_by_id('pnnext')
driver.execute_script("return arguments[0].scrollIntoView();", element)
sleep(0.5)
element.click()
while True:
next_page_btn = driver.find_element_by_id('pnnext')
if len(next_page_btn) <1:
print("no more pages left")
break
else:
urls = driver.find_elements_by_class_name('iUh30')
urls = [url.text for url in urls]
sleep(0.5)
element = driver.find_element_by_id('pnnext')
driver.execute_script("return arguments[0].scrollIntoView();", element)
sleep(0.5)
element.click()
I expect a list of all urls from the google search that can be opened by Selenium so Selenium can get data from those pages.
I only get a list of urls from one page. The next step (scraping those pages) is working fine. But due to this restriction I only get 10 results while I'd like to see all results.
Try the following code. I have changed a bit.Hope this help.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
driver=webdriver.Chrome()
driver.get('https://www.google.com')
search = driver.find_element_by_name('q')
search.send_keys('condition')
search.submit()
while True:
next_page_btn =driver.find_elements_by_xpath("//a[#id='pnnext']")
if len(next_page_btn) <1:
print("no more pages left")
break
else:
urls = driver.find_elements_by_xpath("//*[#class='iUh30']")
urls = [url.text for url in urls]
print(urls)
element =WebDriverWait(driver,5).until(expected_conditions.element_to_be_clickable((By.ID,'pnnext')))
driver.execute_script("return arguments[0].scrollIntoView();", element)
element.click()
OutPut :
['https://dictionary.cambridge.org/dictionary/english/condition', 'https://www.thesaurus.com/browse/condition', 'https://en.oxforddictionaries.com/definition/condition', 'https://www.dictionary.com/browse/condition', 'https://www.merriam-webster.com/dictionary/condition', 'https://www.collinsdictionary.com/dictionary/english/condition', 'https://en.wiktionary.org/wiki/condition', 'www.businessdictionary.com/definition/condition.html', 'https://en.wikipedia.org/wiki/Condition', 'https://www.definitions.net/definition/condition', '', '', '', '']
['https://www.thefreedictionary.com/condition', 'https://www.thefreedictionary.com/conditions', 'https://www.yourdictionary.com/condition', 'https://www.foxnews.com/.../woman-battling-rare-suicide-disease-says-chronic-pain-con...', 'https://youngminds.org.uk/find-help/conditions/', 'www.road.is/travel-info/road-conditions-and-weather/', 'https://roll20.net/compendium/dnd5e/Conditions', 'https://www.home-assistant.io/docs/scripts/conditions/', 'https://www.bhf.org.uk/informationsupport/conditions', 'https://www.gov.uk/driving-medical-conditions']
['https://immi.homeaffairs.gov.au/visas/already-have.../check-visa-details-and-condition...', 'https://www.d20pfsrd.com/gamemastering/conditions/', 'https://www.ofgem.gov.uk/licences-industry-codes-and.../licence-conditions', 'https://www.healthychildren.org/English/health-issues/conditions/Pages/default.aspx', 'https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html', 'https://www.ofcom.org.uk/phones-telecoms.../general-conditions-of-entitlement', 'https://www.rnib.org.uk/eye-health/eye-conditions', 'https://www.mdt.mt.gov/travinfo/map/mtmap_frame.html', 'https://www.mayoclinic.org/diseases-conditions', 'https://www.w3schools.com/python/python_conditions.asp']
['https://www.tremblant.ca/mountain-village/mountain-report', 'https://www.equibase.com/static/horsemen/horsemenareaCB.html', 'https://www.abebooks.com/books/rarebooks/...guide/.../guide-book-conditions.shtml', 'https://nces.ed.gov/programs/coe/', 'https://www.cdc.gov/wtc/conditions.html', 'https://snowcrows.com/raids/builds/engineer/engineer/condition/']
['https://www.millenniumassessment.org/en/Condition.html', 'https://ghr.nlm.nih.gov/condition', 'horsemen.ustrotting.com/conditions.cfm', 'https://lb.511ia.org/ialb/', 'https://www.nps.gov/deva/planyourvisit/conditions.htm', 'https://www.allaboutvision.com/conditions/', 'https://www.spine-health.com/conditions', 'https://www.tripcheck.com/', 'https://hb.511.nebraska.gov/', 'https://www.gamblingcommission.gov.uk/.../licence-conditions-and-codes-of-practice....']
['https://sports.yahoo.com/andrew-bogut-credits-beer-improved-022043569.html', 'https://ant.apache.org/manual/Tasks/conditions.html', 'https://www.disability-benefits-help.org/disabling-conditions', 'https://www.planningportal.co.uk/info/200126/applications/60/consent_types/12', 'https://www.leafly.com/news/.../qualifying-conditions-for-medical-marijuana-by-state', 'https://www.hhs.gov/healthcare/about-the-aca/pre-existing-conditions/index.html', 'https://books.google.co.uk/books?id=tRcHAAAAQAAJ', 'www.onr.org.uk/documents/licence-condition-handbook.pdf', 'https://books.google.co.uk/books?id=S0sGAAAAQAAJ']
['https://books.google.co.uk/books?id=KSjLDvXH6iUC', 'https://www.arcgis.com/apps/Viewer/index.html?appid...', 'https://www.trappfamily.com/trail-conditions.htm', 'https://books.google.co.uk/books?id=n_g0AQAAMAAJ', 'https://books.google.co.uk/books?isbn=1492586277', 'https://books.google.co.uk/books?id=JDjQ2-HV3l8C', 'https://www.newsshopper.co.uk/.../17529825.teenager-no-longer-in-critical-condition...', 'https://nbcpalmsprings.com/.../bicyclist-who-collided-with-minivan-hospitalized-in-cri...']
['https://www.stuff.co.nz/.../4yearold-christchurch-terrorist-attack-victim-in-serious-but-...', 'https://www.shropshirestar.com/.../woman-in-serious-condition-after-fall-from-motor...', 'https://www.expressandstar.com/.../woman-in-serious-condition-after-fall-from-motor...', 'https://www.independent.ie/.../toddler-rushed-to-hospital-in-serious-condition-after-hit...', 'https://www.nhsinform.scot/illnesses-and-conditions/ears-nose-and-throat/vertigo', 'https://www.rochdaleonline.co.uk/.../teenage-cyclist-in-serious-condition-after-collisio...', 'https://www.irishexaminer.com/.../baby-of-woman-found-dead-in-cumh-in-critical-cond...', 'https://touch.nihe.gov.uk/index/corporate/housing.../house_condition_survey.htm', 'https://www.nami.org/Learn-More/Mental-Health-Conditions', 'https://www.weny.com/.../update-woman-in-critical-but-stable-condition-after-being-s...']