With the recent announcement of Twitter charging for their API usage, I am trying to Tweet without it. But I am running into issues, from my Ubuntu box. Below is combination of various Python scripts found online.
The error I am running into is after successful login, I get
selenium.common.exceptions.ElementClickInterceptedException:
Message: element click intercepted: Element <div dir="ltr"
class="css-901oao r-18jsvk2 r-37j5jr r-a023e6 r-16dba41
r-rjixqe r-bcqeeo r-qvutc0">...</div> is not clickable at point (0, 1).
Other element would receive the click:
<header role="banner" class="css-1dbjc4n r-obd0qt r-16y2uox r-lrvibr r-1g40b8q">...</header>
Code
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
userAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36"
options.add_argument(f'user-agent={userAgent}')
browser = webdriver.Chrome(service=Service(ChromeDriverManager().install()), options=options)
url = 'https://twitter.com/login'
# change this to your username and password
userUsername = 'user'
userPassword = 'pw'
def main():
browser.get(url)
browser.implicitly_wait(5)
user = browser.find_element("name", "text")
user.send_keys(userUsername)
user.send_keys(Keys.ENTER)
browser.implicitly_wait(2)
password = browser.find_element("name", "password")
password.send_keys(userPassword)
browser.implicitly_wait(5)
password.send_keys(Keys.ENTER)
browser.implicitly_wait(0)
button1 = browser.find_element(By.CSS_SELECTOR,'div[dir="ltr"]')
button1.click()
browser.implicitly_wait(5)
tweet = browser.find_element(By.CSS_SELECTOR,"br[data-text='true']")
tweet.click()
browser.implicitly_wait(1)
tweet.send_keys('Testing1234')
button2 = browser.find_element(By.CSS_SELECTOR,"div[data-testid='tweetButtonInline']")
button2.click()
browser.close()
main()
Instead of this locator,
button1 = browser.find_element(By.CSS_SELECTOR,'div[dir="ltr"]')
to click on post a tweet use below locator.
button1 = browser.find_element(By.CSS_SELECTOR,'a[aria-label="Tweet"]')
Please provide delay or explicit wait while interacting with the element.
Also need to change tweet message locator and Tweet button
tweet = browser.find_element(By.CSS_SELECTOR,'div[aria-label="Tweet text"]')
AND
button2 = browser.find_element(By.CSS_SELECTOR,"div[data-testid='tweetButton']")
Related
The error messageI try to extract data from the below website. But when the selenium click the "search" button (the last step of the code), error was returned, it seems blocked by the server (It is totally alright when I access the website manually. But when I use automated Chrome browser, the attached error message was returned when I click the "search" button). How should I get around this?
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
ser = Service(r"C:\Users\shekc\Documents\chromedriver.exe")
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("–Referer=https://www.dahsing.com/jsp/fundPlatform/index_e.jsp")
options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36")
driver = webdriver.Chrome(options=options, service=ser)
url = "https://www.dahsing.com/jsp/fundPlatform/risk_warning_e.jsp"
driver.get(url)
time.sleep(3)
# click "Agree"
driver.find_element(By.LINK_TEXT,"Agree").click()
driver.switch_to.default_content()
driver.switch_to.frame(1)
# return the # Fund house
from selenium.webdriver.support.ui import Select
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlFundHouse"]'))
FH_No=len(Select.options)
# select " all per page"
from selenium.webdriver.support.ui import Select
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlPageNumber"]'))
Select.select_by_index(len(Select.options)-1)
Select =Select(driver.find_element(By.XPATH,'//*[#id="mainContent_ddlFundHouse"]'))
Select.select_by_index(1)
FH_name=Select.first_selected_option.text
# click "Search"
driver.find_element(By.LINK_TEXT,"Search").click()
I get this weird access denied message when I try to login to the "offspring.co.uk" website. This denial message pops up right after clicking the login button. I heard something about the Akamai Bot-Protection on this website. Maybe this protection detects my automation. Does anyone know how to prevent this website access denial?
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver import ActionChains
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
def call_Website():
# configurations
profile = webdriver.FirefoxProfile()
profile.accept_untrusted_certs = True
profile.set_preference("general.useragent.override","Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0")
firefox_capabilities = webdriver.DesiredCapabilities.FIREFOX
firefox_capabilities['marionette'] = True
# start webdriver etc
browser = webdriver.Firefox(firefox_profile=profile, desired_capabilities=firefox_capabilities)
wait = WebDriverWait(browser,20)
action = ActionChains(browser)
###########checkig if proxy works, old snippet
try:
browser.get("https://httpbin.org/ip")
except:
browser.close()
print("proxy was not working")
##############################################
time.sleep(2)
browser.get('https://www.offspring.co.uk/view/secured/content/login')
time.sleep(2)
# accept cookie
browser.find_element_by_css_selector("#onetrust-accept-btn-handler").click()
time.sleep(1)
#choose currency
browser.find_element_by_css_selector("li.EUR:nth-child(2)").click()
#fills out username
username_form = browser.find_element_by_css_selector('#user')
action.move_to_element(username_form).pause(1).click().pause(0.5).send_keys('username')
#fills out password
password_form = browser.find_element_by_css_selector('#loginpassword')
action.pause(2).move_to_element(password_form).pause(1).click().pause(0.5).send_keys('password')
#clicks on login
Login_Btn = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'#loginButton')))
action.move_to_element(Login_Btn).pause(1).click().perform()
if __name__ == "__main__":
call_Website()
And here is the "Access Denied"-Page.
This my python code to login into Google
from seleniumwire.undetected_chromedriver.v2 import Chrome, ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from pathlib import Path
def execute_authorization(url, email, password):
# Create empty profile
Path("./chrome_profile").mkdir(parents=True, exist_ok=True)
Path('./chrome_profile/First Run').touch()
options = {}
chrome_options = ChromeOptions()
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--incognito')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--user-data-dir=./chrome_profile/')
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'
chrome_options.add_argument('user-agent={0}'.format(user_agent))
chrome_options.add_argument('--headless')
browser = Chrome(seleniumwire_options=options, options=chrome_options)
wait = WebDriverWait(browser, 10)
browser.execute_script("return navigator.userAgent")
browser.get(url)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="identifierId"]')))
browser.find_element_by_xpath('//*[#id="identifierId"]').send_keys(email)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="identifierNext"]/div/button')))
browser.find_element_by_xpath('//*[#id="identifierNext"]/div/button').click()
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="password"]/div[1]/div/div[1]/input')))
browser.find_element_by_xpath('//*[#id="password"]/div[1]/div/div[1]/input').send_keys(password)
wait.until(EC.visibility_of_element_located((By.XPATH, '//*[#id="passwordNext"]/div/button')))
browser.find_element_by_xpath('//*[#id="passwordNext"]/div/button').click()
wait_for_correct_current_url(wait)
return browser.current_url
In non headless mode everything works fine.
In headless mode after giving mail based on screenshot I got the message that browser is not safe. As above solution with agent did not help.
I also tried solutions proposed in post google login working in without headless but not with headless mode
with no success.
Any other proposals ?
When headless mode is activated, the Navigator.Webdriver flag is set to true, which indicates that the browser is controlled by automation tools. The code below worked for me.
chrome_options.add_argument('--disable-blink-features=AutomationControlled')
This blog has some other options you could try.
https://piprogramming.org/articles/How-to-make-Selenium-undetectable-and-stealth--7-Ways-to-hide-your-Bot-Automation-from-Detection-0000000017.html
I'm trying to build a bot for Nike.com.
I'm rotating user agent, automation blink is hidden and have done everything needed (Even using VPN).
URL : https://www.nike.com/ca/t/air-force-1-pixel-shoe-txmVNP/CK6649-100
Size:2
from selenium import webdriver
import time
from selenium.webdriver.common.keys import Keys
import requests
import sys
from selenium.webdriver.chrome.options import Options
from fake_useragent import UserAgent
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-automation"])
options.add_experimental_option('useAutomationExtension', False)
ua=UserAgent()
userAgent=ua.random
options.add_argument("--log-level=3")
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_argument(f'user-agent={userAgent}')
driver = webdriver.Chrome(options=options)
driver.minimize_window()
driver.execute_script("Object.defineProperty(navigator, 'webdriver', {get: () => undefined})")
driver.execute_cdp_cmd('Network.setUserAgentOverride', {"userAgent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.53 Safari/537.36'})
URL=input("Enter URL : ")
SIZE=input("ENter Size : ")
driver.maximize_window()
driver.get(URL)
time.sleep(5)
print("Starting again")
while(True):
## try:
s_size=driver.find_element_by_xpath('//*[#id="buyTools"]/div[1]/fieldset/div/div['+SIZE+']/label')
s_size.click()
time.sleep(1)
## try:
print('here')
time.sleep(5)
## WebDriverWait(driver,10).until(EC.element_to_be_clickable((By.XPATH,'//*[#id="floating-atc-wrapper"]/div/button[1]'))).click()
add_to_bag=driver.find_element_by_xpath('//*[#id="floating-atc-wrapper"]/div/button[1]')
time.sleep(3)
add_to_bag.click()
print('1')
break
time.sleep(1)
while(True):
try:
move_to_cart=driver.find_element_by_xpath('//*[#id="nav-cart"]/a/div/span')
move_to_cart.click()
break
except:
time.sleep(1)
This code is selecting the required size, and also Clicks on Add to Bag button with clicking animation on website but after that also nothing happens even when I manually click on Add To Bag button or reload website nothing happen.
The only way out is open link in new tab and do all things manually
Can anyone give me a workaround for this.
I think selenium is doing its work its getting blocked by website
I'm trying to enter this site to retrieve my bank account, first I tried with selenium, but only filled username (maybe because it has 2 forms):
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://www.santandertotta.pt/pt_PT/Particulares.html")
user = driver.find_element_by_name("usr")
user.send_keys("user")
pas = driver.find_element_by_name("claveConsultiva")
pas.send_keys("password")
login = driver.find_element_by_id("login_button").click()
Then, I gone rambo mode :) trying figured out why I can't fill password space, and what are the hidden values of the form using requests, this is the code:
url = "https://www.particulares.santandertotta.pt/pagina/indice/0,,276_1_2,00.html"
user_agent = {"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/..."}
session = requests.session()
r = session.get(url)
soup = BeautifulSoup(r.text, "html.parser")
data = {t['name']:t.get('value') for t in soup.find_all('input', attrs={'type': 'hidden'})}
print(data)
But just received an empty dict. What is the best approach for enter a site with login and scrape?
You cannot get access to Password field because it's not present on main page. To handle Password field you have to click Login button to get to Login page. Also you need to switch to iframe which contains authentication form
from selenium.webdriver.support.ui import WebDriverWait as wait
from selenium.webdriver.support import expected_conditions as EC
driver.get("https://www.santandertotta.pt/pt_PT/Particulares.html")
driver.find_element_by_xpath("//input[#title='Login de Particulares']").click()
wait(driver, 10).until(EC.frame_to_be_available_and_switch_to_it("ws"))
user = driver.find_element_by_name("identificacionUsuario")
user.send_keys("user")
pas = driver.find_element_by_name("claveConsultiva")
pas.send_keys("password")
pas.submit()
Once you access the url https://www.santandertotta.pt/pt_PT/Particulares.html first you have to click on the element with text as Login then only the the Nome and Password field appears but to access those fileds you have to switch to the frame with id as ws inducing WebDriverWait. Next to locate the element of Nome you have to induce WebDriverWait again as follows :
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver=webdriver.Firefox(executable_path=r'C:\Utility\BrowserDrivers\geckodriver.exe')
driver.get("https://www.santandertotta.pt/pt_PT/Particulares.html")
driver.find_element_by_xpath("//input[#class='ttAH_button03']").click()
WebDriverWait(driver, 20).until(EC.frame_to_be_available_and_switch_to_it((By.ID, "ws")))
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, "//input[#class='inputlong' and #id='identificacionUsuario']"))).send_keys("your_name")
driver.find_element_by_xpath("//input[#id='claveConsultiva' and #name='claveConsultiva']").send_keys("your_password")
driver.find_element_by_link_text("Entrar no NetBanco Particulares").click()
Here you can find a relevant discussion on Ways to deal with #document under iframe