Use Selenium to click javascript image in Python - python

I am trying to use Selenium to click on a javascript image on a webpage, but can't figure out how to do it.
The code I have so far is:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Firefox()
driver.get("http://explorer.natureserve.org/servlet/NatureServe?init=Ecol")
assert "NatureServe" in driver.title
#elem = driver.find_element_by_name("")
SciName = driver.find_element_by_name('nameSpec')
SciName.send_keys(names)
mouse = webdriver.ActionChains(driver)
element = driver.find_element_by_name('nameCriteriaForm')
mouse.move_to_element(span_element).click().perform()
The last two lines are clearly wrong. What I want to select is located in the page source as:
<img src=" http://explorer.natureserve.org/images/search_now.gif" width="77" height="17" border="0" align="absmiddle">
It is located multiple times on the page and links to a gif.

from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Firefox()
driver.get("http://explorer.natureserve.org/servlet/NatureServe?init=Ecol")
SciName = driver.find_element_by_name('nameSpec')
SciName.send_keys(names)
SciName.submit()

Related

Can't scroll down in web page

In python and selenium I can't scroll down in this page https://melkemun.com/ to show & get another post
driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
This page uses body to crop displayed area and real scrolled area is
<div class="scroll-body" onscroll="loadMore(event)">
This works for me in JavaScript console (in DevTools in Firefox)
item = document.getElementsByClassName('scroll-body')[0];
item.scrollTo(0, item.scrollHeight);
so you may need
driver.execute_script("item=document.getElementsByClassName('scroll-body')[0];item.scrollTo(0,item.scrollHeight);")
Page uses Shadow Root and code works only with Chrome. (Selenium: 4.4.0)
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
#from webdriver_manager.firefox import GeckoDriverManager
import time
url = 'https://melkemun.com/'
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
#driver = webdriver.Firefox(service=Service(GeckoDriverManager().install()))
driver.get(url)
print('sleep')
time.sleep(5)
shadow = driver.find_element(By.ID, 'stting').shadow_root
#print(shadow)
# select city
shadow.find_elements(By.CLASS_NAME, "btn-city")[0].click()
print('sleep')
time.sleep(5)
for i in range(5):
print('scrolling:', i)
#driver.execute_script("window.scrollTo(0,document.body.scrollHeight);")
driver.execute_script('item=document.getElementsByClassName("scroll-body")[0];item.scrollTo(0,item.scrollHeight);')
print('sleep')
time.sleep(5)

Selenium 'send_keys' does not work for YouTube

I'm new to Selenium and I'm trying to create my first automatization test where OS is gonna open Chrome browser, opens YouTube and enteres a word in the search bar.
Well, browser opens, YouTube opens, but OS doesn't enter any word.
This is my code:
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('/Users/mariabiriulina/Desktop/chromedriver')
driver.get('https://youtube.com/')
searchbox = driver.find_element(By.XPATH, '//*[#id="search"]')
searchbox.click()
searchbox.send_keys('Selenium')
Seems like there could be 2 element in the page with the same id as search, we need to get the input element having id search.
The below code, with the changes should work fine now.
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome('/Users/mariabiriulina/Desktop/chromedriver')
driver.get('https://youtube.com/')
searchbox = driver.find_element(By.XPATH, '//input[#id="search"]')
searchbox.click()
searchbox.send_keys('Selenium')

Scraping flex-element Selenium Python

I am trying to scrape some tennis statistics starting from 01-01-2019.
For this I try to scrape the following webpage with selenium: https://www.sofascore.com/de/tennis/2019-01-01
When I click on the first match manually the container on the right side changes and shows the statistics.
This is what I want to access automatically.
When I try to click on the element with selenium it redirects me to another page.
Can anyone tell me why it is not just showing the same content as by manually clicking and how I can solve this issue?
Here is my code:
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
import time
options = Options()
options.binary_location = "C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe"
browser = webdriver.Chrome(chrome_options = options)
url = 'https://www.sofascore.com/de/tennis/2019-01-01'
browser.get(url)
browser.maximize_window()
xpath = '/html/body/div[1]/main/div/div[2]/div/div[3]/div[2]/div/div/div/div/div[2]/a/div'
browser.find_element_by_xpath(xpath).click()
time.sleep(2)
browser.close()`
You can use the below xpath :
//div[contains(#class, 'Col-pm5mcz-')]//descendant::div[contains(#class, 'styles__StyledWidget-')]
and get the innerHTML of that using get_attribute method
Code :
url = "https://www.sofascore.com/de/tennis/2019-01-01"
driver.get(url)
xpath = '/html/body/div[1]/main/div/div[2]/div/div[3]/div[2]/div/div/div/div/div[2]/a/div'
driver.find_element_by_xpath(xpath).click()
sleep(2)
details = driver.find_element_by_xpath("//div[contains(#class, 'Col-pm5mcz-')]//descendant::div[contains(#class, 'styles__StyledWidget-')]").get_attribute('innerHTML')
print(details)
The xpath that you are using is absolute xpath /html/body/div[1]/main/div/div[2]/div/div[3]/div[2]/div/div/div/div/div[2]/a/div
try to replace that with Relative xpath.
See if this works
tableRows = driver.find_elements_by_xpath(".//div[#class='ReactVirtualized__Grid ReactVirtualized__List']//following::div/a[contains(#class,'EventCellstyles__Link')]")
for e in tableRows:
e.click()
//You can add implicit wait here for the statics section to load
driver.find_element_by_xpath(".//a[text()='Statistiken']").click()

BeautifulSoup scraping from a web page already opened by Selenium

I would like to make scrape a web page which was opened by Selenium from a different webpage.
I entered a search term into a website using Selenium and this landed me in a new page. My aim is to create soup out of this new page. But, the soup is getting created out of the previous page where I entered my search term. Help please!
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
driver = webdriver.Firefox()
driver.get('http://www.ratestar.in/')
inputElement = driver.find_element_by_css_selector("#txtStock")
inputElement.send_keys('GM Breweries')
inputElement.send_keys(Keys.ENTER)
driver.wait.until(staleness_of('txtStock')
source = driver.page_source
soup = BeautifulSoup(source)
You need to know the exect company names for your search. After you are using send_keys, you tried to check for staleness of an element. I did not understand how that statement should work. I added WebDriverWait for an element of the new page.
The following works for me reagrding the selenium part up to getting the page source:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
driver = webdriver.Firefox()
driver.get('http://www.ratestar.in/')
inputElement = driver.find_element_by_css_selector("#txtStock")
inputElement.send_keys('GM Breweries Ltd.')
inputElement.send_keys(Keys.ENTER)
company = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'lblCompany')))
source = driver.page_source
You should add exception handling.
#Jens Dibbern has given a working solution. But it is not necessary that the exact name of the company should be given in the search. What happens is that when you type a non-exact name, a drop-down will pop up.
I have observed that until and unless this drop-down is present enter key is not working. You can check this by going to the site, pasting the name and without waiting press the enter key as fast as possible. Nothing happens.
You could also wait for this drop-down to be visible instead and the send the enter key.This also works perfectly. Note that this will end up selecting the first item in the drop-down if more than one is present.
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver = webdriver.Firefox()
driver.get('http://www.ratestar.in/')
inputElement = driver.find_element_by_css_selector("#txtStock")
inputElement.send_keys('GM Breweries')
drop_down=driver.find_element_by_css_selector("#listPlacementStock")
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#listPlacementStock:not([style*="display: none"])')))
inputElement.send_keys(Keys.ENTER)
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '//*[#id="CompanyLink"]')))
source = driver.page_source
soup = BeautifulSoup(source,'html.parser')
print(soup)

Unable to submit keys using selenium with python

Following is the code which im trying to run
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import os
import time
#Create a new firefox session
browser=webdriver.Firefox()
browser.maximize_window()
#navigate to app's homepage
browser.get('http://demo.magentocommerce.com/')
#get searchbox and clear and enter details.
browser.find_element_by_css_selector("a[href='/search']").click()
search=browser.find_element_by_class_name('search-input')
search.click()
time.sleep(5)
search.click()
search.send_keys('phones'+Keys.RETURN)
However, im unable to submit the phones using send_keys.
Am i going wrong somewhere?
Secondly is it possible to always use x-path to locate an element and not rely on id/class/css-selections etc ?
The input element you are interested in has the search_query class name. To make it work without using hardcoded time.sleep() delays, use an Explicit Wait and wait for the search input element to be visible before sending keys to it. Working code:
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
browser = webdriver.Firefox()
browser.maximize_window()
wait = WebDriverWait(browser, 10)
browser.get('http://demo.magentocommerce.com/')
browser.find_element_by_css_selector("a[href='/search']").click()
search = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, "search-query")))
search.send_keys("phones" + Keys.RETURN)

Categories