How to specify the folder when downloading a file with slelenium python - python

Here's my code :
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
PATH = "driver\chromedriver.exe"
options = webdriver.ChromeOptions()
p = {"download.default_directory": "C:\\Users", "safebrowsing.enabled":"false"}
options.add_experimental_option("prefs", p)
driver = webdriver.Chrome(options=options, executable_path=PATH)
url = 'https://www.mergermarket.com/homepage'
driver.get(url)
download = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[#id="__next"]/div/div/div[1]/div/div[2]/div[1]/div/div/div[2]/button[1]')))
download.click()
I made some research but almost everyone recommende to use download.default_directory with ChromeOptions. I did like you can see above but it didn't work. Well, the code work but my file is downloaded in download and not the path I specified in my code.

You can copy automatically downloaded file in author path with os or shutill librarie in python

Related

website search bar doesn't work for python selenium

I would like auto-click the website and search for the information, but somehow the website cannot search, and keep loading. Or just close quickly after it print the key in search bar.
I would like auto-click the website and search for the information, and I tried:
import selenium
import pandas as pd
import numpy as np
import platform
import time
import random
from os import getcwd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
options = webdriver.ChromeOptions()
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--disable-notification")
options.add_argument("--disable-infobars")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--remote-debugging-port=9230")
#options.add_argument("--headless")
url = 'https://vip.stock.finance.sina.com.cn/mkt/#hs_z'
driver.get(url)
w = WebDriverWait(driver, 10)
w.until(EC.presence_of_element_located((By.XPATH, '//*[#id="inputSuggest"]')))
driver.find_element('xpath', '//*[#id="inputSuggest"]').clear()
driver.find_element('xpath', '//*[#id="inputSuggest"]').send_keys('sz111973'))
driver.find_element('xpath', '//*[#id="SSForm"]/input[3]').click()
But somehow the website cannot search, and keep loading. Or just close quickly after it print the key in search bar.
Any help will be appreciated! Thanks.
There are several issues here:
to prevent site from very long loading you can use eager pageLoadStrategy.
I see redundant ) at the end of this line driver.find_element('xpath', '//*[#id="inputSuggest"]').send_keys('sz111973'))
The following code works perfect:
from selenium import webdriver
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
options = Options()
options.add_argument("start-maximized")
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
webdriver_service = Service('C:\webdrivers\chromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=options, desired_capabilities=caps,)
url = 'https://vip.stock.finance.sina.com.cn/mkt/#hs_z'
driver.get(url)
wait = WebDriverWait(driver, 20)
input = wait.until(EC.element_to_be_clickable((By.ID, 'inputSuggest')))
input.clear()
input.send_keys('sz111973')
wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#inputSuggest +input'))).click()

Why isn't selenium successfully clicking my download link?

I am trying to use selenium to download an excel file from a website. I am not sure why the code isn't allowing me to download it. I get an exit code 0 so everything ran successfully but I am not seeing the file in my downloads.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
def scrape_mclellan_website():
url = 'https://www.mcoscillator.com/market_breadth_data/'
s = Service(ChromeDriverManager().install())
op = webdriver.ChromeOptions()
op.add_argument('headless')
driver = webdriver.Chrome(service=s)
driver.get(url)
download_link = driver.find_element(by=By.XPATH, value='//*[#id="data_table"]/a[1]/img')
download_link.click()
scrape_mclellan_website()
How to fix?
Wait until the element you try to click is present and click the <a> not the <img>:
download_link = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[#id="data_table"]/a[1]')))
download_link.click()
Set your preferences for donwload folder and take care window is opening in right size:
prefs = {'download.default_directory':'ENTER PATH TO DOWNLOAD FOLDER'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1920,1080")
options.add_argument("--start-maximized")
options.add_argument("--headless")
options.add_experimental_option("prefs",prefs)
Example (selenium 4)
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
service = Service(executable_path='ENTER YOUR PATH TO CHROMEDRIVER')
prefs = {'download.default_directory':'ENTER PATH TO DOWNLOAD FOLDER'}
options = webdriver.ChromeOptions()
options.add_argument("--window-size=1920,1080")
options.add_argument("--start-maximized")
options.add_argument("--headless")
options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(service=service, options=options)
driver.get('https://www.mcoscillator.com/market_breadth_data/')
download_link = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[#id="data_table"]/a[1]')))
download_link.click()

Dropdown menu not clicking in python selenium

import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.remote import webelement
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import TimeoutException, NoSuchElementException
import time
url = "https://www.mrporter.com/en-gb/mens/product/nike/shoes/low-top-sneakers/space-hippie-04-recycled-stretch-knit-sneakers/19971654707345242"
PATH = 'C:\Program Files (x86)\chromedriver.exe'
browser = webdriver.Chrome(PATH)
browser.get(url)
element_dropdown = browser.find_element_by_class_name("CombinedSelect11__field CombinedSelect11__field--selectableOption CombinedSelect11__field--nativeSelect")
select = Select(element_dropdown)
try:
select.select_by_visible_text("8")
except NoSuchElementException:
print("the item doesnt exist")
I am trying to locate the dropdown menu of the link in my code. Once the dropdown box is located I want to search by visible text for a size 8. However whatever I try it still doesn't work.
You can try using explicit wait and then perform your operation. Please take a look at the below code which I have written to replicate your scenario. It's working fine for me. Do let me know if you face any problems.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
options = webdriver.ChromeOptions()
options.add_argument("start-maximized")
driver = webdriver.Chrome(options=options)
driver.get(
'https://www.mrporter.com/en-gb/mens/product/nike/shoes/low-top-sneakers/space-hippie-04-recycled-stretch-knit-'
'sneakers/19971654707345242')
wait = WebDriverWait(driver, 30)
wait.until(EC.visibility_of_element_located((By.XPATH, '//div[text()="Select a size"]'))).click()
wait.until(EC.visibility_of_element_located((By.XPATH, '//li[#data-value="8"]'))).click()

How to click on download icon on chrome browser using python selenium

I want to download files by clicking on Download icon on Chrome browser.
I tried several ways like Xpath and CSS but it doesn't worked. Please let me know if there is any solution on this using Python 3.x and selenium.
Below is code that I have tried,
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
import time
class TEAutomation:
def automateTask(self):
chromeOptions = Options()
chromeOptions.add_experimental_option("prefs",{"download.default_directory": "/home/vishal/Documents/PythonProgram/"})
baseUrl = "https://www.te.com/commerce/DocumentDelivery/DDEController?Action=showdoc&DocId=Customer+Drawing%7F160743%7FM2%7Fpdf%7FEnglish%7FENG_CD_160743_M2.pdf%7F160743-1"
driver = webdriver.Chrome(executable_path="/home/vishal/PycharmProjects/VSProgramming/drivers/chromedriver",chrome_options=chromeOptions)
driver.maximize_window()
driver.get(baseUrl)
driver.implicitly_wait(10)
driver.find_element(By.XPATH,'//*[#id="download"]').click()
#driver.find_element(By.CSS_SELECTOR, '#download').click()
time.sleep(5)
driver.quit()
molexAuto = TEAutomation()
molexAuto.automateTask()
Thank you in advance.
Maybe the element is still not loaded when you try to click it, try waiting for it with WebDriverWait, I don't have chrome so you will have to test this yourself:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
class TEAutomation:
def automateTask(self):
chromeOptions = Options()
prefs = {
"download.default_directory": "/home/vishal/Documents/PythonProgram/",
"plugins.always_open_pdf_externally": True
}
chromeOptions.add_experimental_option("prefs", prefs)
baseUrl = "https://www.te.com/commerce/DocumentDelivery/DDEController?Action=showdoc&DocId=Customer+Drawing%7F160743%7FM2%7Fpdf%7FEnglish%7FENG_CD_160743_M2.pdf%7F160743-1"
driver = webdriver.Chrome(executable_path="/home/vishal/PycharmProjects/VSProgramming/drivers/chromedriver",chrome_options=chromeOptions)
driver.implicitly_wait(10)
driver.maximize_window()
driver.get(baseUrl)
time.sleep(5)
driver.quit()
molexAuto = TEAutomation()
molexAuto.automateTask()

Python - Can't download file using selenium chromewebdriver - 'Failed - Download error'

I was trying to download a file from google chrome using selenium. The code I used below was working fine. But somehow it didn't work anymore. Any ideas?
import os.path
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
RAWDATA_URL = 'https://oui.doleta.gov/unemploy/DataDownloads.asp'
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : SAVE_PATH, "download.prompt_for_download": False}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path = DRIVE_PATH, chrome_options = options)
driver.get(RAWDATA_URL)
time.sleep(5)
the xpath below is just copying from the HTML so should be correct
driver.find_element_by_xpath("//*[#id='main']/table[38]/tbody/tr[2]/td[5]/a").click()
I also tried the get method:
driver.get("https://oui.doleta.gov/unemploy/csv/ar9047.csv")
I was expecting the csv file could download successfully. But google chrome just tell me that "Fail- Download error'.
UPDATE: The question above is simplified by me. There are actually two steps in my project. First downloading the data from one site and then navigating to another to download the csv data.
import datetime
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
SUMMARY_URL = "https://oui.doleta.gov/unemploy/reemploy.asp"
RAWDATA_URL = 'https://oui.doleta.gov/unemploy/DataDownloads.asp'
REEMPLOYMENT_QTR = '09/30/2018'
options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : SAVE_PATH, "download.prompt_for_download": False}
options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome(executable_path = DRIVE_PATH, chrome_options = options)
First Step:
driver.get(SUMMARY_URL)
time.sleep(5)
select = Select(driver.find_element_by_id('qtr'))
select.select_by_value(REEMPLOYMENT_QTR)
driver.find_element_by_xpath("//input[#name='submit'][#type='submit']").click()
re_table = driver.find_element_by_xpath("//*[#id='content']/table")
state = []
value = []
for re in re_table.find_elements_by_tag_name('tr'):
c = 0
for ele in re.find_elements_by_tag_name('td'):
if c == 0:
state.append(ele.text.encode('utf8'))
c += 1
else:
value.append(ele.text.encode('utf8'))
reemployment = pd.DataFrame({'state' : state, AS_OF_DATE : value})
reemployment = reemployment[['state', AS_OF_DATE]]
Second Step(my original question):
driver.execute_script("window.open('');")
time.sleep(5)
driver.switch_to.window(driver.window_handles[1])
time.sleep(5)
driver.get(RAWDATA_URL)
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//th[text()='ETA 9047']//following::table[1]//tr/td/a[#title='Data']"))).click()
my problem is my save path for default directory has issue:
it was 'C:/Users/...' but should have been 'C:\Users\...' like below
chrome_options = webdriver.ChromeOptions()
prefs = {
'download.default_directory': 'C:\\Users\\<username>\\Documents\\test\\',
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
}
chrome_options.add_experimental_option('prefs', prefs)
Presumably you are trying to invoke click() on the element with text as Data from the ETA 9047 section and to achieve that you have to induce WebDriverWait for the element_to_be_clickable() and you can use the following Locator Strategy:
Using XPATH:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("start-maximized")
driver = webdriver.Chrome(options=chrome_options, executable_path=r'C:\Utility\BrowserDrivers\chromedriver.exe')
driver.get("https://oui.doleta.gov/unemploy/DataDownloads.asp")
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//th[text()='ETA 9047']//following::table[1]//tr/td/a[#title='Data']"))).click()
Browser Snapshot:
PS: Ensure that you are using Selenium v3.141.59 with ChromeDriver / Chrome v76.0

Categories