I am trying to use selenium to download an excel file from a website. I am not sure why the code isn't allowing me to download it. I get an exit code 0 so everything ran successfully but I am not seeing the file in my downloads.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
def scrape_mclellan_website():
url = 'https://www.mcoscillator.com/market_breadth_data/'
s = Service(ChromeDriverManager().install())
op = webdriver.ChromeOptions()
op.add_argument('headless')
driver = webdriver.Chrome(service=s)
driver.get(url)
download_link = driver.find_element(by=By.XPATH, value='//*[#id="data_table"]/a[1]/img')
download_link.click()
scrape_mclellan_website()
How to fix?
Wait until the element you try to click is present and click the <a> not the <img>:
download_link = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[#id="data_table"]/a[1]')))
download_link.click()
Set your preferences for donwload folder and take care window is opening in right size:
prefs = {'download.default_directory':'ENTER PATH TO DOWNLOAD FOLDER'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1920,1080")
options.add_argument("--start-maximized")
options.add_argument("--headless")
options.add_experimental_option("prefs",prefs)
Example (selenium 4)
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
service = Service(executable_path='ENTER YOUR PATH TO CHROMEDRIVER')
prefs = {'download.default_directory':'ENTER PATH TO DOWNLOAD FOLDER'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1920,1080")
options.add_argument("--start-maximized")
options.add_argument("--headless")
options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(service=service, options=options)
driver.get('https://www.mcoscillator.com/market_breadth_data/')
download_link = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[#id="data_table"]/a[1]')))
download_link.click()
Related
I have written a code than logins to a webpage and tries to download a file. but after login and before downloading it requires to select a particular field from drop down and then select a particular tab to download the file, can anyone pls assist what could be done here ?
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=options)
wait = WebDriverWait(driver, 20)
url = "products.markit.com/home/login.jsp"
driver.get(url)
wait.until(EC.element_to_be_clickable((By.NAME, "username"))).send_keys("Admin")
wait.until(EC.element_to_be_clickable((By.NAME, "password"))).send_keys("admin123")
wait.until(EC.element_to_be_clickable((By.TAG_NAME, "button"))).click()
Inspect element for first selection (Pricing Data - Loan)
Inspect element for Past Loan Market
Inspect element for Download tab
I would like auto-click the website and search for the information, but somehow the website cannot search, and keep loading. Or just close quickly after it print the key in search bar.
I would like auto-click the website and search for the information, and I tried:
import selenium
import pandas as pd
import numpy as np
import platform
import time
import random
from os import getcwd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-notification")
options.add_argument("--disable-infobars")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--remote-debugging-port=9230")
#options.add_argument("--headless")
url = 'https://vip.stock.finance.sina.com.cn/mkt/#hs_z'
driver.get(url)
w = WebDriverWait(driver, 10)
w.until(EC.presence_of_element_located((By.XPATH, '//*[#id="inputSuggest"]')))
driver.find_element('xpath', '//*[#id="inputSuggest"]').clear()
driver.find_element('xpath', '//*[#id="inputSuggest"]').send_keys('sz111973'))
driver.find_element('xpath', '//*[#id="SSForm"]/input[3]').click()
But somehow the website cannot search, and keep loading. Or just close quickly after it print the key in search bar.
Any help will be appreciated! Thanks.
There are several issues here:
to prevent site from very long loading you can use eager pageLoadStrategy.
I see redundant ) at the end of this line driver.find_element('xpath', '//*[#id="inputSuggest"]').send_keys('sz111973'))
The following code works perfect:
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=options, desired_capabilities=caps,)
url = 'https://vip.stock.finance.sina.com.cn/mkt/#hs_z'
driver.get(url)
wait = WebDriverWait(driver, 20)
input = wait.until(EC.element_to_be_clickable((By.ID, 'inputSuggest')))
input.clear()
input.send_keys('sz111973')
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#inputSuggest +input'))).click()
I made a script to visit a page and log in then get a download link from the page.
The script works fine on my local window machine, but it's not working on Amazon EC2 instance(ubuntu)
The code is as below
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
dir_chrome_driver = "c:/selenium/driver/chromedriver.exe"
parser = ConfigParser()
option = webdriver.chrome.options.Options()
url = "https://ams.amazon.com/webpublisher/analytics/requested_downloads"
option.add_argument('--user-agent="Chrome/102.0.5005.115"')
option.add_argument("--headless")
option.add_argument('--no-sandbox')
driver = webdriver.Chrome(executable_path=dir_chrome_driver, options=option)
# driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=option)
driver.get(url)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#ap_email')))
driver.find_element(By.ID, "ap_email").send_keys(USER_ID)
driver.find_element(By.ID, "ap_password").send_keys(USER_PASSWORD)
driver.find_element(By.ID, "signInSubmit").click()
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.download-link')))
download_link = driver.find_element(By.CSS_SELECTOR, ".download-link")
It gives me an error
"File "aps.py", line 46, in <module>
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.download-link')))
File "/home/ubuntu/.local/lib/python3.8/site-packages/selenium/webdriver/support/wait.py", line 90, in until
raise TimeoutException(message, screen, stacktrace)
selenium.common.exceptions.TimeoutException:"
I added fixed time wait between 'click' and WebDriverWait like below.
driver.find_element(By.ID, "signInSubmit").click()
time.sleep(30)
WebDriverWait(driver, 30).until(EC.presence_of_element_located((By.CSS_SELECTOR, '.download-link')))
It worked for a while, but it became not work again today.
I tried to change wait time but the driver still in the login page.
Please advise me if there is any possible cause or solution.
You need a different setup for selenium on ubuntu/debian:
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = Options()
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--headless")
webdriver_service = Service("chromedriver/chromedriver") ## path to where you saved chromedriver binary
browser = webdriver.Chrome(service=webdriver_service, options=chrome_options)
I was trying to download a file from google chrome using selenium. The code I used below was working fine. But somehow it didn't work anymore. Any ideas?
import os.path
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
RAWDATA_URL = 'https://oui.doleta.gov/unemploy/DataDownloads.asp'
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : SAVE_PATH, "download.prompt_for_download": False}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path = DRIVE_PATH, chrome_options = options)
driver.get(RAWDATA_URL)
time.sleep(5)
the xpath below is just copying from the HTML so should be correct
driver.find_element_by_xpath("//*[#id='main']/table[38]/tbody/tr[2]/td[5]/a").click()
I also tried the get method:
driver.get("https://oui.doleta.gov/unemploy/csv/ar9047.csv")
I was expecting the csv file could download successfully. But google chrome just tell me that "Fail- Download error'.
UPDATE: The question above is simplified by me. There are actually two steps in my project. First downloading the data from one site and then navigating to another to download the csv data.
import datetime
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
SUMMARY_URL = "https://oui.doleta.gov/unemploy/reemploy.asp"
RAWDATA_URL = 'https://oui.doleta.gov/unemploy/DataDownloads.asp'
REEMPLOYMENT_QTR = '09/30/2018'
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : SAVE_PATH, "download.prompt_for_download": False}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path = DRIVE_PATH, chrome_options = options)
First Step:
driver.get(SUMMARY_URL)
time.sleep(5)
select = Select(driver.find_element_by_id('qtr'))
select.select_by_value(REEMPLOYMENT_QTR)
driver.find_element_by_xpath("//input[#name='submit'][#type='submit']").click()
re_table = driver.find_element_by_xpath("//*[#id='content']/table")
state = []
value = []
for re in re_table.find_elements_by_tag_name('tr'):
c = 0
for ele in re.find_elements_by_tag_name('td'):
if c == 0:
state.append(ele.text.encode('utf8'))
c += 1
else:
value.append(ele.text.encode('utf8'))
reemployment = pd.DataFrame({'state' : state, AS_OF_DATE : value})
reemployment = reemployment[['state', AS_OF_DATE]]
Second Step(my original question):
driver.execute_script("window.open('');")
time.sleep(5)
driver.switch_to.window(driver.window_handles[1])
time.sleep(5)
driver.get(RAWDATA_URL)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//th[text()='ETA 9047']//following::table[1]//tr/td/a[#title='Data']"))).click()
my problem is my save path for default directory has issue:
it was 'C:/Users/...' but should have been 'C:\Users\...' like below
chrome_options = webdriver.ChromeOptions()
prefs = {
'download.default_directory': 'C:\\Users\\<username>\\Documents\\test\\',
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
}
chrome_options.add_experimental_option('prefs', prefs)
Presumably you are trying to invoke click() on the element with text as Data from the ETA 9047 section and to achieve that you have to induce WebDriverWait for the element_to_be_clickable() and you can use the following Locator Strategy:
Using XPATH:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("start-maximized")
driver = webdriver.Chrome(options=chrome_options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get("https://oui.doleta.gov/unemploy/DataDownloads.asp")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//th[text()='ETA 9047']//following::table[1]//tr/td/a[#title='Data']"))).click()
Browser Snapshot:
PS: Ensure that you are using Selenium v3.141.59 with ChromeDriver / Chrome v76.0
I need some help.
There is URL: https://www.inipec.gov.it/cerca-pec/-/pecs/companies.
I need to click checkbox Captcha:
My code is look like:
import os, urllib.request, requests, datetime, time, random, ssl, json, codecs, csv, urllib
from urllib.request import Request, urlopen
from urllib.request import urlretrieve
from datetime import datetime
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.chrome.options import Options
chromedriver = "chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
chrome_options = webdriver.ChromeOptions()
driver = webdriver.Chrome(executable_path=chromedriver, chrome_options=chrome_options)
driver.get("https://www.inipec.gov.it/cerca-pec/-/pecs/companies")
driver.switch_to_default_content()
element = driver.find_elements_by_css_selector('iframe')[1]
driver.switch_to_frame(element)
driver.find_elements_by_xpath('//*[#id="recaptcha-anchor"]/div[1]').click()
During the execution, there is an error:
driver.find_elements_by_xpath('//*[#id="recaptcha-anchor"]/div1').click()
AttributeError: 'list' object has no attribute 'click'
Please, help to fix it.
Solution update (11-Feb-2020)
Using the following set of binaries:
Selenium v3.141.0
ChromeDriver v80.0
Chrome Version 80.0
You can use the following updated block of code as a solution:
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
driver = webdriver.Chrome(options=options, executable_path=r'C:\WebDrivers\chromedriver.exe')
driver.get("https://www.inipec.gov.it/cerca-pec/-/pecs/companies")
WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[name^='a-'][src^='https://www.google.com/recaptcha/api2/anchor?']")))
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//span[#id='recaptcha-anchor']"))).click()
Original solution
Within the URL https://www.inipec.gov.it/cerca-pec/-/pecs/companies to invoke click() on the reCAPTCHA checkbox you need to:
Induce WebDriverWait for the desired frame to be available and switch to it.
Induce WebDriverWait for the desired element to be clickable.
You can use the following solution:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
options.add_argument('disable-infobars')
driver = webdriver.Chrome(executable_path=r'C:\WebDrivers\chromedriver.exe', chrome_options=options)
driver.get("https://www.inipec.gov.it/cerca-pec/-/pecs/companies")
WebDriverWait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it((By.CSS_SELECTOR,"iframe[name^='a-'][src^='https://www.google.com/recaptcha/api2/anchor?']")))
WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, "//span[#class='recaptcha-checkbox goog-inline-block recaptcha-checkbox-unchecked rc-anchor-checkbox']/div[#class='recaptcha-checkbox-checkmark']"))).click()
I resolved this, you can try this with your landing website url.
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.common.exceptions import SessionNotCreatedException
options = webdriver.ChromeOptions()
prefs = {"download.default_directory": download_dir}
options.add_experimental_option("prefs", prefs)
options.add_argument("--no-sandbox")
driver = webdriver.Chrome("/usr/bin/chromedriver", chrome_options = options)
driver.get("https://www.google.com/recaptcha/api2/demo")
driver.maximize_window()
price = driver.find_element_by_xpath("//div[#class='g-recaptcha']")
price_content = price.get_attribute('innerHTML')
start = str(price_content).find(";k=")+len(";k=")
end = str(price_content).find("&co")
driver.implicitly_wait(20)
driver.execute_script("document.getElementById('g-recaptcha-response').style.display = '';")
recaptcha_text_area = driver.find_element_by_id("g-recaptcha-response")
recaptcha_text_area.clear()
recaptcha_text_area.send_keys(price_content[start:end])
#.....................................................................................
button = driver.find_element_by_id("recaptcha-demo-submit")