I'm learning to scrape with selenium, but I'm having trouble connecting to this site 'http://www.festo.com/cat/it_it/products_VUVG_S?CurrentPartNo=8043720'
it does not load the content of the site
I would like to learn how to connect to this site to request images and data
my code is simple because I'm learning, I looked for ways to make the connection but without success
from selenium import webdriver
from selenium.webdriver.firefox.firefox_profile import FirefoxProfile
ff_profile = FirefoxProfile()
ff_profile.set_preference("general.useragent.override", "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36")
driver = webdriver.Firefox(firefox_profile = ff_profile)
driver.get('http://www.festo.com/cat/it_it/products_VUVG_S?CurrentPartNo=8043720')
time.sleep(5)
campo_busca = driver.find_elements_by_id('of132')
print(campo_busca)
As the the desired element is within an <iframe> so to invoke extract the src attribute of the desired element you have to:
Induce WebDriverWait for the desired frame to be available and switch to it.
Induce WebDriverWait for the desired visibility_of_element_located().
You can use the following Locator Strategies:
driver = webdriver.Firefox(executable_path=r'C:\Utility\BrowserDrivers\geckodriver.exe')
driver.get('http://www.festo.com/cat/it_it/products_VUVG_S?CurrentPartNo=8043720')
WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.XPATH,"//iframe[#id='CamosIFId' and #name='CamosIF']")))
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//img[#id='of132']"))).get_attribute("src"))
However as in one of the comments #google mentioned, it seems the browsing experiance is better with ChromeDriver / Chrome and you can use the following solution:
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
driver = webdriver.Chrome(options=options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get('http://www.festo.com/cat/it_it/products_VUVG_S?CurrentPartNo=8043720')
WWebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe#CamosIFId[name='CamosIF']")))
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "img#of132"))).get_attribute("src"))
Note : You have to add the following imports :
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Console Output:
https://www.festo.com/cfp/camosHtml/i?SIG=0020e295a546f45d9acb6844231fd8ff31ca817a_64_64.png
Here you can find a relevant discussion on Ways to deal with #document under iframe
try this
for more information here
FIREFOX_DRIVER_PATH = "your_geckodriver_path"
firefox_options = FirefoxOptions()
firefox_options.headless = True
# set options as per requirement for firefox
firefox_options.add_argument("--no-sandbox")
firefox_options.add_argument("--disable-setuid-sandbox")
firefox_options.add_argument('--disable-dev-shm-usage')
firefox_options.add_argument("--window-size=1920,1080")
driver = webdriver.Firefox(firefox_options=firefox_options, executable_path=FIREFOX_DRIVER_PATH)
driver.get('http://www.festo.com/cat/it_it/products_VUVG_SCurrentPartNo=8043720')
time.sleep(5)
campo_busca = driver.find_elements_by_id('of132')
print(campo_busca)
download the driver from this link
and place it a folder and copy the complete path and paste below
FIREFOX_DRIVER_PATH = "driver_path"
firefox_options = FirefoxOptions()
#only if you dont want to see the gui else make is false or comment
firefox_options.headless = True
driver = webdriver.Firefox(firefox_options=firefox_options, executable_path=FIREFOX_DRIVER_PATH)
driver.get('http://www.festo.com/cat/it_it/products_VUVG_SCurrentPartNo=8043720')
time.sleep(3)
campo_busca = driver.find_elements_by_id('of132')
print(campo_busca)
Related
The error messageI try to extract data from the below website. But when the selenium click the "search" button (the last step of the code), error was returned, it seems blocked by the server (It is totally alright when I access the website manually. But when I use automated Chrome browser, the attached error message was returned when I click the "search" button). How should I get around this?
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
ser = Service(r"C:\Users\shekc\Documents\chromedriver.exe")
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("–Referer=https://www.dahsing.com/jsp/fundPlatform/index_e.jsp")
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36")
driver = webdriver.Chrome(options=options, service=ser)
url = "https://www.dahsing.com/jsp/fundPlatform/risk_warning_e.jsp"
driver.get(url)
time.sleep(3)
# click "Agree"
driver.find_element(By.LINK_TEXT,"Agree").click()
driver.switch_to.default_content()
driver.switch_to.frame(1)
# return the # Fund house
from selenium.webdriver.support.ui import Select
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlFundHouse"]'))
FH_No=len(Select.options)
# select " all per page"
from selenium.webdriver.support.ui import Select
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlPageNumber"]'))
Select.select_by_index(len(Select.options)-1)
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlFundHouse"]'))
Select.select_by_index(1)
FH_name=Select.first_selected_option.text
# click "Search"
driver.find_element(By.LINK_TEXT,"Search").click()
The code clicks on a element that may or may not exist on the page and then needs to click on all elements of the same class:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.firefox.options import Options
import time
my_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.84 Safari/537.36'
options = Options()
options.set_preference("general.useragent.override", my_user_agent)
options.page_load_strategy = 'eager'
options.add_argument('--headless')
driver = webdriver.Firefox(options=options)
driver.get("https://int.soccerway.com/matches/2022/07/23/")
try:
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, "//a[contains(#class,'tbl-read-more-btn')]")))
driver.find_element(by=By.XPATH, value="//a[contains(#class,'tbl-read-more-btn')]").click()
time.sleep(0.1)
except:
pass
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.XPATH, "//tr[contains(#class,'group-head')]")))
for btn in driver.find_elements(by=By.XPATH, value="//tr[contains(#class,'group-head')]"):
btn.click()
time.sleep(0.1)
But this work takes 90 seconds to do and when I remove the time.sleep it drops to 65 seconds, but if I remove it I notice that in some very random times some of the elements that should be clicked are ignored.
Is there any way to do this same service but clicking all the elements at the same time to speed up the process?
Buttons to click on visual examples:
Expected Result after clicks (open the boxes):
In order to speed up the process you can click on all the competition-link one by one in a sequence using either of the following Locator Strategies:
Using CSS_SELECTOR:
for ele in driver.find_elements(By.CSS_SELECTOR, "tr.group-head.clickable th.competition-link>a"):
ele.click()
Using XPATH:
for ele in driver.find_elements(By.XPATH, "//tr[#class='group-head clickable ']//th[#class='competition-link']/a"):
ele.click()
This my python code to login into Google
from seleniumwire.undetected_chromedriver.v2 import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pathlib import Path
def execute_authorization(url, email, password):
# Create empty profile
Path("./chrome_profile").mkdir(parents=True, exist_ok=True)
Path('./chrome_profile/First Run').touch()
options = {}
chrome_options = ChromeOptions()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--incognito')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--user-data-dir=./chrome_profile/')
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
chrome_options.add_argument('user-agent={0}'.format(user_agent))
chrome_options.add_argument('--headless')
browser = Chrome(seleniumwire_options=options, options=chrome_options)
wait = WebDriverWait(browser, 10)
browser.execute_script("return navigator.userAgent")
browser.get(url)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="identifierId"]')))
browser.find_element_by_xpath('//*[#id="identifierId"]').send_keys(email)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="identifierNext"]/div/button')))
browser.find_element_by_xpath('//*[#id="identifierNext"]/div/button').click()
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="password"]/div[1]/div/div[1]/input')))
browser.find_element_by_xpath('//*[#id="password"]/div[1]/div/div[1]/input').send_keys(password)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="passwordNext"]/div/button')))
browser.find_element_by_xpath('//*[#id="passwordNext"]/div/button').click()
wait_for_correct_current_url(wait)
return browser.current_url
In non headless mode everything works fine.
In headless mode after giving mail based on screenshot I got the message that browser is not safe. As above solution with agent did not help.
I also tried solutions proposed in post google login working in without headless but not with headless mode
with no success.
Any other proposals ?
When headless mode is activated, the Navigator.Webdriver flag is set to true, which indicates that the browser is controlled by automation tools. The code below worked for me.
chrome_options.add_argument('--disable-blink-features=AutomationControlled')
This blog has some other options you could try.
https://piprogramming.org/articles/How-to-make-Selenium-undetectable-and-stealth--7-Ways-to-hide-your-Bot-Automation-from-Detection-0000000017.html
I have a problem with a headless mode of Selenium while making log in with Instagram. I have found a default web code snippet for Selenium headless mode, but it does not work to find a particular element on webpage (like username in Instagram main page). The code works fine locally on my pc, but when it is deployed on Heroku, it shows the error. The logs of error are below the code.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random as rd
import os
import schedule
def job():
try:
user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
options = webdriver.ChromeOptions()
options.headless = True
options.add_argument(f'user-agent={user_agent}')
options.add_argument("--window-size=1920,1080")
options.add_argument('--ignore-certificate-errors')
options.add_argument('--allow-running-insecure-content')
options.add_argument("--disable-extensions")
options.add_argument("--proxy-server='direct://'")
options.add_argument("--proxy-bypass-list=*")
options.add_argument("--start-maximized")
options.add_argument('--disable-gpu')
options.add_argument('--disable-dev-shm-usage')
options.add_argument('--no-sandbox')
options.binary_location = os.environ.get("GOOGLE_CHROME_BINARY")
CHROMEDRIVER_PATH = os.environ.get("CHROMEDRIVER_PATH")
wd = webdriver.Chrome(executable_path=CHROMEDRIVER_PATH,chrome_options=options)
wd.get('https://instagram.com')
time.sleep(rd.uniform(9,11))
username = os.environ.get("INSTAGRAM_USER")
password = os.environ.get("INSTAGRAM_PASSWORD")
time.sleep(rd.uniform(2.5,3.5))
wd.find_element_by_name('username').send_keys(username)
time.sleep(rd.uniform(0.95,1.45))
wd.find_element_by_name('password').send_keys(password + Keys.ENTER)
time.sleep(rd.uniform(6,8))
wd.get('https://instagram.com')
time.sleep(rd.uniform(2.5,3.5))
print("SUCCESS")
wd.quit()
except Exception as e:
print(e)
schedule.every(3).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(10)
Error:
Message: no such element: Unable to locate element: {"method":"css selector","selector":"[name="username"]"}
(Session info: headless chrome=88.0.4324.96)
To send a character sequence within the username field on Instagram you can use either of the following Locator Strategies:
Using css_selector:
driver.find_element(By.CSS_SELECTOR, "input[name='username']").send_keys("Artem")
Using xpath:
driver.find_element(By.XPATH, "//input[#name='username']").send_keys("Artem")
However, the username field on Instagram is a ReactJS enabled element, so ideally, to send a character sequence to the element you need to induce WebDriverWait for the element_to_be_clickable() and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "input[name='username']"))).send_keys("Artem")
Using XPATH:
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//input[#name='username']"))).send_keys("Artem")
Note: You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
Browser Snapshot:
When I run the code below, I get selenium.common.exceptions.NoSuchElementException: Message: no such element: Unable to locate element: {"method":"xpath","selector":"//*[#id="i0"]/input"}
(Session info: chrome=83.0.4103.97).
However, in my terminal I can access the required element:
>>> steam_pressure_field = browser.find_element_by_xpath('//*[#id="i0"]/input')
>>> steam_pressure_field.get_attribute('value')
'0'
As you can see in the comment section of the code, I have tried waiting for a delay of 3 seconds to see if that made a difference. However, after doing this several times, the page stopped loading when I used the delay (maybe some anti-bot feature?)
So I'm trying to understand what is going on here, and how I can successfully access the fields I require. Is there something I am missing please?
BTW, The div IDs don't seem to be dynamic.
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36'
options = webdriver.ChromeOptions()
options.add_argument(f'user-agent={user_agent}')
browser = webdriver.Chrome(options=options)
url = 'https://www.tlv.com/global/US/calculator/superheated-steam-table.html?advanced=off'
# delay = 3
# try:
# wait = WebDriverWait(browser, delay)
# wait.until(EC.presence_of_element_located((By.ID, "body")))
# browser.get(url)
# print("Page is ready")
# except TimeoutException:
# print("Loading took too much time")
browser.get(url)
steam_pressure_field = browser.find_element_by_xpath('//*[#id="i0"]/input')
print(steam_pressure_field.get_attribute('value'))
To print the value 0 you have to induce WebDriverWait for the visibility_of_element_located() and you can use either of the following Locator Strategies:
Using CSS_SELECTOR:
driver.get('https://www.tlv.com/global/US/calculator/superheated-steam-table.html?advanced=off')
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.CSS_SELECTOR, "div.inputPanel>div.Controlpanel input.inputText"))).get_attribute("value"))
Using XPATH:
driver.get('https://www.tlv.com/global/US/calculator/superheated-steam-table.html?advanced=off')
print(WebDriverWait(driver, 20).until(EC.visibility_of_element_located((By.XPATH, "//div[#class='InputControlLabel' and text()='Steam Pressure']//following-sibling::input[1]"))).get_attribute("value"))
Console Output:
0
Note : You have to add the following imports :
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC