"Submit is not a function" error in Python Selenium - python

I'm trying solve re-captcha in a site using 2captcha service, but always returns to me the error:
selenium.common.exceptions.JavascriptException: Message: javascript error: document.getElementById(...).submit is not a function
my code:
try:
time.sleep(0.3)
driver.find_element(by=By.XPATH, value='//*[#id="email"]').send_keys(mail)
except:
def Solver():
driver.get(page_url)
u1 = f"https://2captcha.com/in.php?key={API_KEY}&method=userrecaptcha&googlekey={data_sitekey}&pageurl={page_url}&json=1&invisible=1"
r1 = requests.get(u1)
print(r1.json())
rid = r1.json().get("request")
u2 = f"https://2captcha.com/res.php?key={API_KEY}&action=get&id={int(rid)}&json=1"
time.sleep(5)
while True:
r2 = requests.get(u2)
print(r2.json())
if r2.json().get("status") == 1:
form_tokon = r2.json().get("request")
break
time.sleep(5)
wirte_tokon_js = f'document.getElementById("g-recaptcha-response").innerHTML="{form_tokon}";'
submit_js = 'document.getElementById("g-recaptcha-response").submit();'
driver.execute_script(wirte_tokon_js)
time.sleep(3)
driver.execute_script(submit_js)
time.sleep(3)
pic of captcha

Related

Reusing same browser window in selenium

I'm currently using Python and Selenium to loop my server for specific tasks to complete, I have tried to do 2 things, to speed up the process they are:
To use options.add_argument(f"user-data-dir={script_directory}\\profile") in the Chrome driver initiasation to avoid having to log in all the time.
To try and reuse the same browser window instead of closing and then re-opening the browser all the time.
Code:
#!/usr/bin/env python
import pathlib
import time
import urllib.parse
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
USER = "..."
PASS = "..."
def upload_to_server(link, redirect, unique_hash):
try:
requests.get(
"https://www.example.com/crons.php?cronUploadToServer=1&link={0}&redirect={1}&hash={2}".format(link,
redirect,
unique_hash))
except Exception as e:
print(e)
def download_from_server():
try:
server = requests.get("https://www.example.com/crons.php?cronDownloadFromServer=1")
return server.text.strip()
except Exception as e:
print(e)
# tear down chrome.
def tear_down(_driver):
_driver.quit()
_driver.close()
def check_for_tasks():
if download_from_server() == "NO_TASKS":
print("--> NO TASKS")
else:
# init the chrome driver.
def init_driver(using_linux, proxy):
script_directory = pathlib.Path().absolute()
try:
options = Options()
options.headless = False
options.add_argument('start-maximized')
options.add_argument('--disable-popup-blocking')
options.add_argument('--disable-notifications')
options.add_argument('--log-level=3')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument(f"user-data-dir={script_directory}\\profile")
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option("detach", True)
prefs = {'profile.default_content_setting_values.notifications': 2}
options.add_experimental_option('prefs', prefs)
if proxy == "0.0.0.0:0":
print("--> PROXY DISABLED ...")
else:
print("--> PROXY: " + str(proxy) + " ...")
options.add_argument('--proxy-server=%s' % proxy)
if using_linux:
return webdriver.Chrome(options=options)
else:
return webdriver.Chrome(options=options)
except Exception as e:
print(e)
# create session.
driver = init_driver(False, "0.0.0.0:00")
# starting URL.
driver.get('https://www.example.com/logon')
# click recorded.
def topcashback_click(_driver):
try:
_driver.get('https://www.example.com/Earn.aspx?mpurl=shein&mpID=17233')
if "redirect.aspx?mpurl=shein" in _driver.current_url:
return _driver.current_url
else:
return False
except Exception as e:
print(e)
# already logged in check.
if ">Account</span>" in driver.page_source:
print("--> LOGGED IN (ALREADY) ...")
driver.get('https://www.SITE.CO.UK/Earn.aspx?mpurl=shein&mpID=17233')
try:
server = download_from_server()
data_from_server = server.split('|')
link = topcashback_click(driver)
print("--> LINK --> " + link)
time.sleep(4)
if link != driver.current_url:
print("--> LINK (REDIRECT) --> " + driver.current_url)
upload_to_server(urllib.parse.quote_plus(link),
urllib.parse.quote_plus(
driver.current_url.replace('https://www.example.com', data_from_server[0])),
data_from_server[1])
# print(driver.current_url.replace('https://www.example.com', data_from_server[0]))
print("--> LINK UPLOADED TO THE DB ...")
# tear_down(driver)
except Exception as e:
print(e)
else:
# TopCashBack login for the first time.
def topcashback_login(_driver):
_driver.get('https://www.example.com/logon')
# small sleep to let the page load.
time.sleep(1)
_driver.find_element(By.XPATH, '//*[#id="txtEmail"]').send_keys(USER)
time.sleep(1)
_driver.find_element(By.XPATH, '//*[#id="loginPasswordInput"]').send_keys(PASS)
time.sleep(1)
_driver.find_element(By.XPATH, '//*[#id="Loginbtn"]').click()
time.sleep(5)
if ">Account</span>" in _driver.page_source:
return True
else:
return False
def topcashback_click(_driver):
try:
_driver.get('https://www.SITE.CO.UK/Earn.aspx?mpurl=shein&mpID=17233')
if "redirect.aspx?mpurl=shein" in _driver.current_url:
return _driver.current_url
else:
return False
except Exception as e:
print(e)
if topcashback_login(driver):
try:
print("--> LOGGED IN ...")
server = download_from_server()
data_from_server = server.split('|')
link = topcashback_click(driver)
print("--> LINK --> " + link)
time.sleep(4)
if link != driver.current_url:
print("--> LINK (REDIRECT) --> " + driver.current_url)
upload_to_server(urllib.parse.quote_plus(link),
urllib.parse.quote_plus(
driver.current_url.replace('https://www.example.com',
data_from_server[0])),
data_from_server[1])
# print(driver.current_url.replace('https://www.example.com', data_from_server[0]))
print("--> LINK UPLOADED TO THE DB ...")
# tear_down(driver)
except Exception as e:
print(e)
else:
print("--> ERROR --> DEBUG TIME ...")
tear_down(driver)
if __name__ == "__main__":
while True:
check_for_tasks()
time.sleep(2)
It's the 2nd one I'm having trouble with, currently, with my code, I'm getting the error:
driver.get('https://www.example.com/logon')
AttributeError: 'NoneType' object has no attribute 'get'
I think this is because I'm not connecting the first browser window, instead it's opening a new one which fails with the error above straight away.
Is there possibly a way to keep the first browser open and reuse it? any help would be appreciated.

Instaloader get_followers issue

So I wrote this code to get the list of followers on Instagram using instaloader library in python :
login_name = 'beyondhelloworld'
target_profile = 'femindharamshi'
# OR
#import sys
#target_profile = sys.argv[1] # pass in target profile as argument
from instaloader import Instaloader, Profile
loader = Instaloader()
# login
try:
loader.load_session_from_file(login_name)
except FileNotFoundError:
loader.context.log("Session file does not exist yet - Logging in.")
if not loader.context.is_logged_in:
loader.interactive_login(login_name)
loader.save_session_to_file()
profile = Profile.from_username(loader.context, target_profile)
followers = profile.get_followers()
loader.context.log()
loader.context.log('Profile {} has {} followers:'.format(profile.username, profile.followers))
loader.context.log()
for follower in followers:
loader.context.log(follower.username, flush=True)
But I keep getting this error :
Loaded session from /Users/femindharamshi/.config/instaloader/session-beyondhelloworld.
Traceback (most recent call last):
File "/Users/femindharamshi/Documents/instaload/env/lib/python3.7/site-packages/instaloader/structures.py", line 597, in _obtain_metadata
self._node = metadata['entry_data']['ProfilePage'][0]['graphql']['user']
KeyError: 'graphql'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "il.py", line 20, in <module>
profile = Profile.from_username(loader.context, target_profile)
File "/Users/femindharamshi/Documents/instaload/env/lib/python3.7/site-packages/instaloader/structures.py", line 552, in from_username
profile._obtain_metadata() # to raise ProfileNotExistException now in case username is invalid
File "/Users/femindharamshi/Documents/instaload/env/lib/python3.7/site-packages/instaloader/structures.py", line 606, in _obtain_metadata
', '.join(similar_profiles[0:5]))) from err
instaloader.exceptions.ProfileNotExistsException: Profile femindharamshi does not exist.
The most similar profile is: femindharamshi.
How do I solve this issue?
The output says that profile "femindharamshi" does not exist but that is what my profile is. It also says :
The most similar profile is: femindharamshi.
import instaloader
import random
import os
dir_path_driver = os.getcwd()
def username_password():
listusername = []
with open("./username.txt","r") as usernames:
for username in usernames:
listusername.append((username.rstrip("\n")).split(":"))
if len(listusername) == 1:
select = 0
else:
select = random.randint(0,len(listusername))
return listusername[select][0],listusername[select][1]
def get_followers():
L = instaloader.Instaloader()
# Login or load session
username,password =username_password()
listfile = os.listdir(dir_path_driver+"/cookie")
for i in listfile:
if i != f"{username}":
L.login(username, password)
L.save_session_to_file(filename=dir_path_driver+"/cookie/"+f"{username}")
else:
L.load_session_from_file(filename=dir_path_driver+"/cookie/"+f"{username}",username = username)
file = open("prada_followers.txt","a+")
profile = instaloader.Profile.from_username(L.context, "idinstagram")
for followee in profile.get_followers():
username = followee.username
file.write(username + "\n")
file.close()
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
class InstaBot:
"""InstaBot can login, can return unfollowers that don't
follow you back.
Object requires two args.
'Username' & 'Password' """
def __init__(self,username,pw):
self.username = username
self.pw = pw
self.driver = webdriver.Chrome(executable_path='chromedriver.exe')
self.base_url = "https://instagram.com"
self.driver.get("{}".format(self.base_url))
sleep(2)
self.driver.maximize_window()
self.login()
def login(self):
self.driver.find_element_by_xpath("//input[#name=\"username\"]")\
.send_keys(self.username)
self.driver.find_element_by_xpath("//input[#name=\"password\"]")\
.send_keys(self.pw)
self.driver.find_element_by_xpath("//button[#type=\"submit\"]")\
.click()
sleep(10)
self.driver.find_element_by_xpath("//button[contains(text(), 'Not Now')]")\
.click()
sleep(2)
def get_unfollowers(self):
self.driver.find_element_by_xpath("//a[contains(#href, '/{}')]".format(self.username))\
.click()
sleep(3)
self.driver.find_element_by_xpath("//a[contains(#href, '/following')]")\
.click()
sleep(2)
following = self._get_names()
self.driver.find_element_by_xpath("//a[contains(#href, '/followers')]")\
.click()
sleep(2)
followers = self._get_names()
not_following_back = [user for user in following if user not in followers]
return not_following_back
## suggetions = self.driver.find_element_by_xpath('//h4[contains(text(), Suggetions)]')
## self.driver.execute_script('arguments[0].scrollIntoView()',suggetions)
def _get_names(self):
scroll_box = self.driver.find_element_by_xpath("/html/body/div[4]/div/div[2]")
last_ht , ht = 0,1
while last_ht != ht:
last_ht = ht
sleep(1)
ht = self.driver.execute_script("""
arguments[0].scrollTo(0,arguments[0].scrollHeight);
return arguments[0].scrollHeight;
""", scroll_box)
links = scroll_box.find_elements_by_tag_name('a')
names = [name.text for name in links if name.text != '']
sleep(2)
self.driver.find_element_by_xpath("/html/body/div[4]/div/div[1]/div/div[2]/button")\
.click()
return names
def navigate_to_user(self,user):
self.driver.get("{}/{}".format(self.base_url,user))
def scroll_down(self):
last_height = self.driver.execute_script("return document.body.scrollHeight")
while True:
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(2)
new_height = self.driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
my_bot = InstaBot(Username,Password)
##unfollowers = my_bot.get_unfollowers() #will return a list
my_bot.navigate_to_user(Any User Name that you follow) #Will return your friend's followers list

requests_html stop website from redirecting

I am trying to scrape the follow link https://9anime.to/watch/one-piece-dub.34r/r2wjlq using python/requests_html.
My problem is it gets auto redirected to the default server tab instead of the mp4upload tab, trying to find a fix for this but cant figure it out.
Below is the code
import re
import requests
import cloudscraper
from urllib import parse
from bs4 import BeautifulSoup
from requests_html import HTMLSession
base_url = 'https://9anime.to'
class nine_scraper:
def get_ep_links(url):
html = nine_scraper.get_html(url, True)
servers = html.find('div', id='servers-container')
if servers:
results = []
mp4upload_results = []
mp4upload = servers.find('div', attrs={'data-id': '35'})
mp4upload_eps = mp4upload.find_all('a', href=True)
for ep in mp4upload_eps:
x = (ep.get('href'), ep.text)
mp4upload_results.append(x)
for result in mp4upload_results:
results.append(base_url + result[0])
return results
else:
print('No servers found!!')
def get_series_info(url):
return
def get_servers(html):
return
def find_download(url):
html = nine_scraper.get_html(url, True)
def search(query):
if '&page=' in query:
query = query.split('&page=')
search_url = base_url + '/search?keyword=' + parse.quote(query[0]) + '&page=' + query[1]
else:
search_url = base_url + '/search?keyword=' + parse.quote(query)
html = nine_scraper.get_html(search_url, False)
film_list = html.find('div', class_='film-list')
if film_list:
results = []
prev_page = html.find('a', class_='pull-left')
next_page = html.find('a', class_='pull-right')
films = film_list.find_all('div', class_='inner')
for film in films:
results.append((film.find('a', class_='name').text.strip(), film.find('a', class_='name').get('href').strip()))
if prev_page.get('href'):
param = parse.urlsplit(base_url + '/' + prev_page.get('href')).query
url = parse.unquote_plus(param.replace('keyword=', ''), encoding='utf-8')
results.append(('Previous page', url))
if next_page.get('href'):
param = parse.urlsplit(base_url + '/' + next_page.get('href')).query
url = parse.unquote_plus(param.replace('keyword=', ''), encoding='utf-8')
results.append(('Next page', url))
return results
else:
print('No results found!')
def get_html(url, render_js=False): # Load webpage and return its html
try:
if render_js: # Check if page needs to render javascript, if so use 'requests_html'
session = HTMLSession() # Make a GET request to your webpage, using 'Requests'
resp = session.get(url, timeout=10)
resp.raise_for_status() # Raise an exception if respones doesnt come back 200-400
resp.html.render(timeout=10) # Render the javascript
html = BeautifulSoup(resp.html.html, 'html.parser') # Parse the html data we just got with 'BeautifulSoup4'
return html # Return the parsed html
else: # Use 'cloudscraper' since we dont need to load any javascript
c_scraper = cloudscraper.create_scraper() # Make a GET request to your webpage, using 'Requests'
resp = c_scraper.get(url)
resp.raise_for_status() # Raise an exception if respones doesnt come back 200-400
html = BeautifulSoup(resp.content, 'html.parser') # Parse the html data we just got with 'BeautifulSoup4'
return html # Return the parsed html
except requests.HTTPError as e:
print(f'HTTP error occurred: {e}')
except requests.ConnectionError as e:
print(f'Connection Error occurred: {e}')
except requests.Timeout as e:
print(f'Timeout Error occurred: {e}')
except requests.RequestException as e:
print(f'General Error occurred: {e}')
except Exception as e:
print(f'Other error occurred: {e}')
except KeyboardInterrupt:
print("Someone closed the program")
import sys
from os import system, name
from scrapers import nine_scraper
def screen_clear():
# for mac and linux(os.name is 'posix')
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
def main_menu():
while True:
screen_clear()
print('------9anime downloader------\n[1] Search \n[2] Download \n[3] Exit\n-----------------------------\n')
main_choice = input('Enter your choice [1-3] >')
if main_choice == '1':
search_menu()
break
elif main_choice == '2':
continue
elif main_choice == '3':
screen_clear()
sys.exit()
else:
continue
def search_menu(query=False):
screen_clear()
print('--------------9anime downloader/search--------------\n')
if query:
search_results = nine_scraper.search(query)
results_menu(search_results)
else:
query = input('Please enter the name of the anime >')
if query:
search_results = nine_scraper.search(query)
results_menu(search_results)
def results_menu(results):
for num, result in enumerate(results, 1):
title = result[0]
link = result[1]
if 'Previous page' not in title:
if 'Next page' in title:
n = True
print('[N] ' + title)
else:
print(f'[{num}] {title}')
else:
p = True
print('[P] ' + title)
print('[M] Main menu')
titles, links = map(list, zip(*results))
while True:
search_choice = input('Enter choice >')
try:
search_choice = int(search_choice)
if 1 <= search_choice <= len(results) + 1:
print(links[search_choice - 1])
print(titles[search_choice - 1])
ep_links = nine_scraper.get_ep_links(links[search_choice - 1])
for link in ep_links:
print(link)
nine_scraper.find_download(link)
# series_menu(links[search_choice - 1])
break
except ValueError:
if search_choice.lower() == 'm':
main_menu()
break
elif search_choice.lower() == 'p':
if p:
url = links[-2]
search_menu(url)
break
continue
elif search_choice.lower() == 'n':
if n:
url = links.pop()
search_menu(url)
break
continue
def series_menu(url):
info = nine_scraper.get_series_info()
main_menu()
I know it has to be some javascript that is redirecting the page but i cant figure out what i need to do in order to stop that, any help would be very appreciated!
Using requests_html you can set allow_redirects=False like this:
r = session.get(url,allow_redirects=False)
Now your request should go only to the requested URL.

Python Selenium web scraper slows down when the internet is being use by another application

I built a selenium web scraper (see below for code). It works fine and normally takes 4-6 seconds per loop. However, if I use a different web browser to do something else, say check my email, the web scraper slows down (sometimes taking up to a couple minutes per loop) and it also takes a long time to load my email (or whatever else I am trying to do with the internet.
Is there something wrong with my scraper? Or is it not possible to run a web scraper while also using the internet to do other things? Or...
Thanks!
counter = 36386
options = Options()
options.set_headless(True)
driver = webdriver.Firefox(options=options, executable_path = r'C:\Users\jajacobs\Downloads\geckodriver.exe')
while counter <= 50000:
start_time = time.time()
try:
driver.get("url goes here")
timeout = 20
inputElement = driver.find_element_by_name("naics_lookup[companyName]")
inputElement.send_keys(naics.iloc[counter, 1])
inputElement = driver.find_element_by_name("naics_lookup[city]")
inputElement.send_keys(naics.iloc[counter, 3])
inputElement = driver.find_element_by_name("naics_lookup[state]")
inputElement.send_keys(naics.iloc[counter, 2])
inputElement.submit()
print('Looking for NAICS code of company number ', counter)
try:
element_present = EC.presence_of_element_located((By.CLASS_NAME, 'results'))
WebDriverWait(driver, timeout).until(element_present)
print("element is ready")
try:
data = driver.find_element_by_class_name('results').text
naics.at[counter, 'naics'] = re.findall(r"\D(\d{6})\D", data)[0]
print(re.findall(r"\D(\d{6})\D", data)[0])
except:
print("No NAICS code")
pass
except:
print("element did not load")
pass
list = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000,11000,12000,13000,
14000,15000,16000,17000,18000,19000,20000,21000,22000,23000,24000,25000,
25000,26000,27000,28000,29000,30000,31000,32000,33000,34000,35000,36000,
37000,38000,39000,40000,41000,42000,43000,44000,45000,46000,47000,48000,
49000,50000,]
if counter in list:
data_folder = Path('C:/Users/jajacobs/Documents/ipynb/')
file_to_save = data_folder / ('naics' + str(counter) + '.csv')
naics.to_csv(file_to_save)
counter += 1
except Exception as e:
print(e)
pass
print("total time taken this loop: ", time.time() - start_time)
driver.close()

Selenium Python Automation

I have a table with multiple pages. I want to select say 5 elements from the table and click on the checkbox corresponding to those at a time. How is that possible through selenium python automation
def __init__(self, env):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
if env == 'Staging':
self.driver.get("https://serviceconsole-stg.tivo.com/serviceconsole/login.action")
elif env == 'QE':
self.driver.get("http://serviceconsolese01.tivo.com:9090/serviceconsole")
else:
print "Environment is not available", env
print "\n Supported Environments are Staging and QE"
self.driver.quit()
raise SystemExit("Program Exited")
with open('config.json','r') as user_credentials:
config = json.load(user_credentials)
self.driver.find_element_by_id('username').send_keys(config['user']['name'])
self.driver.find_element_by_id('password').send_keys(config['user']['password'])
self.driver.find_element_by_id("signIn").click()
try:
self.driver.find_element_by_xpath('// *[ # id = "loginValidationError"]')
print "Login Not successful"
self.driver.quit()
raise SystemExit("Program Exited")
except NoSuchElementException:
print "Login Successful"
def addnewlinearpackage(self, title, enddate_days_from_today):
try:
# Select Manage
self.driver.find_element_by_xpath("//*[#id='configuration-tab']").click()
# Creating new Linear Package
self.driver.find_element_by_id("linearpublishing").click()
self.driver.find_element_by_id("linpub").click()
self.driver.find_element_by_id("addLinearPackage").click()
self.driver.find_element_by_id("linearpackageTitle").send_keys(title)
self.driver.find_element_by_id('tempPackageId').send_keys(
datetime.strftime(datetime.now(), '%Y%m%d%H%M'))
self.driver.find_element_by_id("inlineLinearPackageCheckbox").click()
start_time = self.driver.find_element_by_id('startDate')
execute = start_time.find_element_by_xpath("*//span[#class='fa fa-calendar']")
self.driver.execute_script("arguments[0].click();", execute)
time.sleep(7)
end_time = self.driver.find_element_by_id('endDate')
end_time.find_element_by_xpath("*//span[#class='fa fa-calendar']").click()
end_date = (datetime.now() + timedelta(days=enddate_days_from_today)).strftime('%m/%d/%Y')
self.driver.find_element_by_xpath("*//td[#data-day='" + end_date + "']").click()
time.sleep(7)
except NoSuchElementException as exp:
print exp
self.driver.quit()
raise SystemExit("Program Exited")
def addlinearservice(self, serviceId):
try:
self.driver.find_element_by_id("linearServiceSection").click()
time.sleep(10)
self.driver.find_element_by_id("publishLinearPackageBtn").click()
time.sleep(30)
self.driver.find_element_by_class_name("sorting_1")
linear_service_found = False
# Searching existing linear service
if linear_service_found == False:
try:# Search in first page
self.driver.find_element_by_xpath(
"/html/body/div[4]/div/div/div[2]/div/div/div/div[2]/div[2]/div/ul/li[9]/a").click()
if self.driver.find_element_by_link_text(serviceId).is_displayed():
self.driver.find_element_by_xpath(
"//a[contains(text(),'" + serviceId + "')]/following::td/input[#type='checkbox']").click()
linear_service_found = True
print "Linear service found"
except NoSuchElementException:
print"No such Element found in page 1"
try:
while linear_service_found == False: # loop to navigate to next page till finding the service ID
try: # Search till last page is reached and next button is disabled
self.driver.find_element_by_xpath(
"// *[#id = 'associatedLinearServicesTable1_next']/../li[#class ='paginate_button next disabled']")
print 'No further Page available to search'
break
except NoSuchElementException:
try:
self.driver.find_element_by_xpath(
'/html/body/div[4]/div/div/div[2]/div/div/div/div[2]/div[2]/div/ul/li[9]/a').click()
if self.driver.find_element_by_link_text(serviceId).is_displayed():
# click the checkbox of Service ID
self.driver.find_element_by_xpath(
"//a[contains(text(),'" + serviceId + "')]/following::td/input[#type='checkbox']").click()
linear_service_found = True
print "Linear Service found"
break
except NoSuchElementException:
print "No such Element found in current page"
except NoSuchElementException:
print"No such Element found"
if linear_service_found == True:
time.sleep(10)
#Click on Save button
self.driver.find_element_by_xpath('/ html / body / div[4] / div / div / div[3] / button[1]').click()
time.sleep(10)
except NoSuchElementException as exp:
print exp
self.driver.quit()
raise SystemExit("Program Exited")
def publish(self):
try:
self.driver.find_element_by_xpath('//button[contains(text(), "Publish")]').click()
time.sleep(5)
self.driver.find_element_by_xpath('//*[#id="confirmDialogOk"]').click()
time.sleep(10)
try:
self.driver.find_element_by_xpath('//*[#id="appSuccessMsg"]')
print("Linear Package Published Successfully")
except NoSuchElementException:
print ("Linear Package NOT PUBLISHED.. check the Error Message in Service console webpage")
time.sleep(60)
self.driver.quit()
raise SystemExit("Program Exited")
except NoSuchElementException as exp:
print exp
self.driver.quit()
raise SystemExit("Program Exited")
def exit(self):
print("Exiting.....")
time.sleep(5)
self.driver.quit()
Please find the full code. This code works only for selecting one element. I have to select multiple elements.
image
If the checkboxes have a common locator, you can use find_elements_by_xpath instead of find_element_by_xpath. This will return a list of WebElements, which you can then iterate over to click the boxes.

Categories