How can I make Selenium run scroll only in the left quadrant?
when I use the command below it is executed in the zoom of the map and that is not my intention, because I want to scrape the links of the companies that are in the left column
driver.execute_script("window.scrollBy(0, 200)")
You need to find the scrollable div element and then you can apply JavaScript as following:
element = wait.until(EC.presence_of_element_located((By.XPATH, "//div[#role='main']//div[contains(#aria-label,'lanchonet')]")))
driver.execute_script("arguments[0].scroll(0, arguments[0].scrollHeight);", element)
The code above works for me.
The entire code is:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 5)
url = "https://www.google.com.br/maps/search/lanchonete,/#-27.0027727,-48.6293259,15z"
driver.get(url)
element = wait.until(EC.presence_of_element_located((By.XPATH, "//div[#role='main']//div[contains(#aria-label,'lanchonet')]")))
driver.execute_script("arguments[0].scroll(0, arguments[0].scrollHeight);", element)
you can, of course, scroll for other lengths, not only for the entire height.
Related
I need to scrape this site. How can I choose a certain element from the 'Licensee Name' dropdown list?
I tried the following code, but it's not working (I tried using the class attribute, but it's not working either):
input_ = driver.find_element(By.XPATH, "//input[#class='select2-search__field']")
input_.send_keys("1x2 NETWORK MALTA LTD")
After clicking on the drop-down, you need to insert the input text into the input field, then click on appeared search result and finally click on Search button.
The following code works:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://www.mga.org.mt/licences/"
driver.get(url)
wait.until(EC.frame_to_be_available_and_switch_to_it((By.XPATH, "//iframe[contains(#src,'//mgali')]")))
wait.until(EC.element_to_be_clickable((By.XPATH, "//span[#id='select2-ListBox3-container']"))).click()
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "select2-search__field"))).send_keys("1x2 NETWORK MALTA LTD")
wait.until(EC.element_to_be_clickable((By.XPATH, "//li[contains(#class,'select2-results__option')][contains(.,'NETWORK MALTA')]"))).click()
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '[type="submit"]'))).click()
The result is:
I'm using selenium to try and scrape a listing of products in this website:
https://www.zonacriativa.com.br/harry-potter
However, I'm having trouble getting the full listing of products. the page list 116 products, yet only a few are shown at a time. If I want to see the other ones, I need to click on the "Carregar mais Produtos" (load more products) button at the bottom a few times to get the full listing.
I'm having trouble locating this button, as it doesn't have an id and its class is a huge string. I've tried several things, like the examples below, but they don't seem to work. Any suggestions?
driver.find_element("xpath", "//button[text()='Carregar mais Produtos']").click()
driver.find_element("css selector", ".vtex-button__label.flex.items-center.justify-center.h-100.ph5").click()
driver.find_element(By.CLASS_NAME, "vtex-button.bw1.ba.fw5.v-mid.relative.pa0.lh-solid.br2.min-h-small.t-action--small.bg-action-primary.b--action-primary.c-on-action-primary.hover-bg-action-primary.hover-b--action-primary.hover-c-on-action-primary.pointer").click()
The element you trying to click is initially out of the visible screen so you can't click it. Also this XPath at least for me doesn't locate that element.
What you need to do is to scroll the page down untill that button becomes visible and clickable and then click it.
The following code clicks that button 1 time:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 5)
url = "https://www.zonacriativa.com.br/harry-potter"
driver.get(url)
while True:
try:
wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(#class,'buttonShowMore')]//button"))).click()
break
except:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
The above code can be simply modified to scroll and click that button until we reach the latest page where this button is not presented:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 5)
url = "https://www.zonacriativa.com.br/harry-potter"
driver.get(url)
while driver.find_elements(By.XPATH, "//div[contains(#class,'buttonShowMore')]//button"):
try:
wait.until(EC.element_to_be_clickable((By.XPATH, "//div[contains(#class,'buttonShowMore')]//button"))).click()
except:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
I am trying to scrap articles from this website. I manage to do the login part but when I try to click on the search button and send the values I got a timeout error. I try running the selenium with start-maximize option and I noticed the page don't seem to load.
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[#id="search__input"]')))
input_text = elem.find_element(by=By.XPATH, value='//*[#id="search__input"]').click()
input_text.send_keys("Anthony Albanese")
print(input_text.get_attribute('value'))
I have tried to use the get_attribute('innerHtml') and I got the search button HTML but I have to send the keys so it does not seems to be of any use. This is the error that I got
screenshot. What should I do to send in the search terms?
There are 2 elements on that page matching //*[#id="search__input"] XPath locator, while you need the second one.
You have to use unique locator.
This should work better:
text_input = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//div[#class='header__search']//*[#id='search__input']")))
text_input.click()
text_input.send_keys("Anthony Albanese")
This can also be done with CSS Selectors. They are shorter in this case:
text_input = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".header__search #search__input")))
text_input.click()
text_input.send_keys("Anthony Albanese")
UPD
This is the code I used, exactly accordingly to what I wrote before:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 20)
url = "https://www.theaustralian.com.au/"
driver.get(url)
text_input = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".header__search #search__input")))
text_input.click()
text_input.send_keys("Anthony Albanese")
The web page after the code above looks as following:
And if you add an Enter click to the search input as following:
from selenium import webdriver
from selenium.webdriver import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 20)
url = "https://www.theaustralian.com.au/"
driver.get(url)
text_input = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, ".header__search #search__input")))
text_input.click()
text_input.send_keys("Anthony Albanese" + Keys.ENTER)
the web page will finally become as following
I would like to extract the text that appears on mouse hover on an element from the website https://idsc.cidadessustentaveis.org.br/rankings. A particular example of text of interest, is the text “Erradicacao da pobreza Pontuacao: 44,47” which appears on hovering the first bar located in the column “Desempenho por ODS”. I have tried the code below but it returns the blank text ‘’
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://idsc.cidadessustentaveis.org.br/rankings")
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
wait = WebDriverWait(driver, 20)
desired_elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.SdgPerformanceBar__Block-sc-1yl1q71-2.fBQLcJ')))
I printed an attribute of the element extracted which confirmed that I have successfully gotten to the targeted element.
print(desired_elem.get_attribute('outerHTML'))
Which returned:
<div style="width:2.62%" class="SdgPerformanceBar__Block-sc-1yl1q71-2 fBQLcJ"></div>
Note that by inspecting the element in Firefox I found that the element has no innerHTML.
I then tried to extract the text using desired_elem.text but I get a blank ‘’
I also tried the code below which returned a blank as well.
from selenium.webdriver.common.action_chains import ActionChains
elem = driver.find_element(By.CSS_SELECTOR, '.SdgPerformanceBar__Block-sc-1yl1q71-2.fBQLcJ');
actions = ActionChains(driver)
actions.move_to_element(elem)
actions.move_to_element(elem).perform()
Calling elem.text returned ''
You was close to the solution.
When hovering over those elements tooltips appearing.
These tooltips will present different texts according to element you hovered over.
Here I used your code, just added the tooltips
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
actions = ActionChains(driver)
tooltip1 = "div[role='tooltip'] .MuiTypography-root.MuiTypography-body1"
tooltip2 = "div[role='tooltip'] .MuiTypography-root.MuiTypography-body2"
url = "https://idsc.cidadessustentaveis.org.br/rankings"
driver.get(url)
desired_elem = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.SdgPerformanceBar__Block-sc-1yl1q71-2.fBQLcJ')))
actions.move_to_element(desired_elem).perform()
tt1_text = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, tooltip1))).text
tt2_text = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, tooltip2))).text
print(tt1_text)
print(tt2_text)
The output is:
Erradicação da pobreza
Pontuação: 44,47
I am trying to scraping in this URL, dealing with a Download button and I am having a problem, as the last line gives a ElementClickInterceptedException.
My actual goal is to download the CSV file.
The code:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.chrome.options import Options
from ipykernel import kernelapp as app
import time
options = webdriver.ChromeOptions()
driver_path = 'C:\\Users\\Idener\\Downloads\\chromedriver_win32\\chromedriver.exe'
driver = webdriver.Chrome(driver_path, options=options)
url = "https://pubchem.ncbi.nlm.nih.gov/compound/2078"
driver.get(url)
wait = WebDriverWait(driver, 5)
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="NIOSH-Toxicity-Data"]/div[1]/div/div/a'))).click()
wait = WebDriverWait(driver, 10)
wait.until(EC.element_to_be_clickable((By.XPATH, '//*[#id="Download"]'))).click()
enter image description here
Element you trying to click in initially out of the visible viewpoint. So, you need first to scroll the page and only then to click on that element.
By clicking the first element new tab is opened and the second element you want to click is there, on the second tab. So, you need to switch to the new tab to access that element.
No need to define wait = WebDriverWait(driver, 10) second time.
The following code is working:
import time
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(options=options, service=webdriver_service)
wait = WebDriverWait(driver, 10)
url = "https://pubchem.ncbi.nlm.nih.gov/compound/2078#section=Toxicity"
driver.get(url)
element = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, "#NIOSH-Toxicity-Data a[title*='Open']")))
element.location_once_scrolled_into_view
time.sleep(1)
element.click()
driver.switch_to.window(driver.window_handles[1])
wait.until(EC.element_to_be_clickable((By.ID, "Download"))).click()
It does not download the file, only opens the downloading dialog