I have a Google Form that I would like to compile automatically. Can you please help me? This is my attempt:
The link to the form is this. I was trying to use Selenium to interact with the page.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import random
# Khai báo danh sách các đáp án có thể chọn ngẫu nhiên
answers = ["Option 1", "Option 2", "Option 3", "Option 4"]
# Khai báo đường dẫn của biểu mẫu Google Form
url = "https://docs.google.com/forms/d/e/1FAIpQLSe8Wj3QAV0LPYGPODkNCK_JSfoacBWUX_3Uq7gTlJB9icBgGQ/viewform"
# Khai báo số lượng email cần tạo và điền vào biểu mẫu
num_emails = 100
for i in range(num_emails):
# Tạo một email giả mạo bằng cách sử dụng các thư viện Python phù hợp
email = "test{}#example.com".format(i)
# Khởi tạo trình duyệt và mở đường dẫn của biểu mẫu
driver = webdriver.Chrome()
driver.get(url)
email_field = driver.find_element_by_css_selector("input[name='entry.1170127610']") # Thay thế "entry.987654321" bằng id của ô email trong biểu mẫu của bạn
email_field.send_keys(email)
# Chọn một câu trả lời ngẫu nhiên từ danh sách các đáp án
answer_choice = random.choice(answers)
answer = driver.find_element_by_xpath("//div[#data-value='{}']".format(answer_choice))
answer.click()
# Gửi biểu mẫu và đóng trình duyệt
submit = driver.find_element_by_xpath("//span[text()='Submit']")
submit.click()
driver.quit()
Related
i'm currently writing a script to crawl linkedin with a bot, but i get a few problems, first here's my code:
import selenium
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup, NavigableString, Tag
from time import sleep
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import csv
import time
import requests
print('- Importation des packages')
# Task 1: Login to Linkedin
driver = webdriver.Chrome(ChromeDriverManager().install())
# Task 1.1: Open Chrome and Access Linkedin login site
sleep(2)
url = 'https://www.linkedin.com/login'
driver.get(url)
print('Initialisation du chrome driver')
sleep(2)
# Task 1.2: Import username and password
credential = open(r"C:\credentials.txt")
line = credential.readlines()
username = line[0]
password = line[1]
print('Importation des id')
sleep(2)
# Task 1.2: Key in login credentials
email_field = driver.find_element(By.ID, 'username')
email_field.send_keys(username)
print('Email ok')
sleep(3)
password_field = driver.find_element(By.NAME, 'session_password')
password_field.send_keys(password)
print('Mdp ok')
sleep(2)
# Task 1.2: Click the Login button
signin_field = driver.find_element(By.XPATH, '//*[#id="organic-div"]/form/div[3]/button')
signin_field.click()
sleep(3)
print('- Task A: Connexion à Linkedin')
search_field = driver.find_element(By.XPATH, '//*[#id="global-nav-typeahead"]/input')
search_query = input('Type of profile to scrape ')
search_field.send_keys(search_query)
search_field.send_keys(Keys.RETURN)
print('TASK B OK')
def GetURL():
page_source = BeautifulSoup(driver.page_source, features='lxml')
profiles = driver.find_elements(By.XPATH,'//*[#id="main"]/div/div/div/ul/li/div/div/div[2]/div[1]/div[1]/div/span/span/a/span/span[1]')
all_profile_URL = []
for profile in profiles:
#profile_ID = profiles.get_attribute('href')
#profile_URL = "https://www.linkedin.com" + profile_ID
profile_URL = profile.get_attribute('href')
print(profile_URL)
if profile not in all_profile_URL:
all_profile_URL.append(profile_URL)
return all_profile_URL
##Pagination
input_page = int(input('Nombre de pages à scraper: '))
URLs_all_page = []
for page in range(input_page):
URLs_one_page = GetURL()
sleep(2)
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);') #scroll to the end of the page
sleep(3)
next_button = driver.find_element(By.CLASS_NAME, "artdeco-button__text")
driver.execute_script("arguments[0].click();", next_button)
if URLs_one_page is not None:
URLs_all_page = URLs_all_page + URLs_one_page
print(URLs_all_page)
else:
print('variable stores a None value')
sleep(2)
print(URLs_all_page)
#Scrappe + stock dans un csv
with open('nouvow.csv', 'w', newline = '') as file_output:
headers = ['Nom_prénom', 'Job', 'Location', 'URL']
writer = csv.DictWriter(file_output, delimiter=',', lineterminator='\n',fieldnames=headers)
writer.writeheader()
for linkedin_URL in URLs_all_page:
driver.get_all(linkedin_URL)
print('- Accès au profile: ', linkedin_URL)
page_source = BeautifulSoup(driver.page_source, "html.parser")
info_div = page_source.find('div',{'class':'flex-1 mr5'})
info_loc = info_div.find_all('ul')
name = info_loc[0].find('li').get_text().strip() #supp les charactères inutiles
print('--- Nom: ', name)
location = info_loc[1].find('li').get_text().strip()
print('--- Localisation :', location)
title = info_div.find('h2').get_text().strip()
print('--- Job: ', title)
writer.writerow({headers[0]:name, headers[1]:location, headers[2]:title, headers[3]:linkedin_URL})
print('\n')
print('Ok final')
Here's my output:
Initialisation du chrome driver
Importation des id
Email ok
Mdp ok
- Task A: Connexion à LinkedinQuels type de profil souhaitez vous scraper?? fullstack blockchain
TASK B OK
Nombre de pages à scraper: 4
[]
None
None
None
None
None
None
None
None
None
None
[None, None, None, None, None, None, None, None, None, None]
So i think that i couldnt localize the profiles and get their links because i have a problem with my xpaths expressions:
[#id="main"]/div/div/div/ul/li/div/div/div[2]/div[1]/div[1]/div/span/span/a/span/span[1]
Plus, it doesnt click on the next button for pagination, and finally i get an error at this line of code:
driver.get_all(linkedin_URL)
I would really appreciate some help with that, thank you
I am kinda new to Python and tried to make a script that automatically logs you in on Instagram. However I was not happy with the solution to only put it in your code, I want to ask for the credentials when running the script, then use it on the Instagram login page. This is the code I got so far:
from time import sleep
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from random import randint
username = input("Enter your username: ")
password = input("Enter your password: ")
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://www.instagram.com/')
sleep (2)
driver.execute_script("""
var l = document.getElementsByClassName("pbNvD FrS-d sjcY5 gD9tr")[0];
l.parentNode.removeChild(l);
""")
driver.execute_script("""
var l = document.getElementsByClassName("RnEpo Yx5HN _4Yzd2")[0];
l.parentNode.removeChild(l);
""")
sleep (1)
username = driver.find_element_by_name('username')
username.send_keys('username')
password = driver.find_element_by_name('password')
password.send_keys('password')
button_login = driver.find_element_by_css_selector('#loginForm > div > div:nth-child(3) > button')
button_login.click()
sleep(randint(3,5))
notnow = driver.find_element_by_css_selector('#react-root > section > main > div > div > div > div > button')
notnow.click()
sleep(randint(3,5))
notificationnotnow = driver.find_element_by_xpath('/html/body/div[4]/div/div/div/div[3]/button[2]')
notificationnotnow.click()
Thanks in advance!
Why do you pass a string ("username" and "password") name instead of (username and password)
from time import sleep
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from random import randint
username = input("Enter your username: ")
password = input("Enter your password: ")
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.get('https://www.instagram.com/')
sleep (5)
username_input = driver.find_element_by_name('username')
username_input.send_keys(username)
password_input = driver.find_element_by_name('password')
password_input.send_keys(password)
button_login = driver.find_element_by_css_selector('#loginForm > div > div:nth-child(3) > button')
button_login.click()
sleep(randint(3,5))
notnow = driver.find_element_by_css_selector('#react-root > section > main > div > div > div > div > button')
notnow.click()
sleep(randint(3,5))
notificationnotnow = driver.find_element_by_xpath('/html/body/div[4]/div/div/div/div[3]/button[2]')
notificationnotnow.click()
The problem is right here
sleep (1)
username = driver.find_element_by_name('username')
username.send_keys('username')
password = driver.find_element_by_name('password')
password.send_keys('password')
You are passing the words 'username' and 'password' instead of passing the variables username, password. When you put them inside quotes they become string.
You should try the following:
sleep (1)
username = driver.find_element_by_name('username')
username.send_keys(username)
password = driver.find_element_by_name('password')
password.send_keys(password)
So I am using selenium for my web automation. I have a excel sheet with my data entries. Right now I am able to fill my excel entries but its filling only first entry after that my program just stops. I have more than 1,50,000 entries. I want my program to keep running until it fills all the entries in my form. How do I do that. Please help me !
My Entries in excel
My code
from sre_parse import State
from tkinter.tix import Select
from unicodedata import name
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import pandas as pd
import xlrd
import time
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()), chrome_options=chrome_options)
driver.get("https://ssg2021.in/citizenfeedback")
# readign the excel file
df = pd.read_excel('Rajnandgaon_List2.xlsx')
# looping through all the data
for i in df.index:
time.sleep(3)
# selcting states
state_select = driver.find_element(By.XPATH,'//*[#id="State"]')
drp1 = Select(state_select)
drp1.select_by_visible_text('Chhattisgarh')
time.sleep(2)
# selecting district
district_select = driver.find_element(By.XPATH,'//*[#id="District"]')
drp2 = Select(district_select)
drp2.select_by_visible_text('RAJNANDGAON')
entry = df.loc[i]
# entering the age
age = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[1]/div/div/input')
age.send_keys(str(entry['age']))
# respondant name
rs_name = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[1]/div/input')
rs_name.send_keys(entry['name'])
# rs mobile number
rs_number = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[2]/div/input')
rs_number.send_keys(str(entry['mobile number']))
# rs gender
gender = driver.find_element(By.XPATH,'//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[3]/div/select')
rs_gender = Select(gender)
rs_gender.select_by_visible_text('Male')
# submitting the form
submit = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[5]/input')
submit.click()
# second page
# radio button 1
radio_1 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[1]/div[2]/div[1]/label[1]/input')
radio_1.click()
# radio button 2
radio_1 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[2]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 3
radio_1 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[3]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 4
radio_1 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[4]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 5
radio_1 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[5]/div[2]/div[1]/label[1]')
radio_1.click()
submit2 = driver.find_element(By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[2]/input')
submit2.click()
Your problem can be solve using below code:
from sre_parse import State
from tkinter.tix import Select
from unicodedata import name
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import pandas as pd
import xlrd
import time
import openpyxl
# readign the excel file
ExcelPath = 'Excel_Path'
df = pd.read_excel(ExcelPath)
wb = openpyxl.load_workbook(ExcelPath)
ws = wb.worksheets[0]
maxrow = ws.max_row
# looping through all the data
for i in range(2, maxrow + 1):
# Every Time Browser will open
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
driver = webdriver.Chrome()
driver.get("https://ssg2021.in/citizenfeedback")
time.sleep(3)
# selcting states
state_select = driver.find_element(By.XPATH, '//*[#id="State"]')
drp1 = Select(state_select)
drp1.select_by_visible_text('Chhattisgarh')
time.sleep(2)
# selecting district
district_select = driver.find_element(By.XPATH, '//*[#id="District"]')
drp2 = Select(district_select)
drp2.select_by_visible_text('RAJNANDGAON')
entry = df.loc[i]
# entering the age
age = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[1]/div/div/input')
age.send_keys(str(entry['age']))
# respondant name
rs_name = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[1]/div/input')
rs_name.send_keys(entry['name'])
# rs mobile number
rs_number = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[2]/div/input')
rs_number.send_keys(str(entry['mobile number']))
# rs gender
gender = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[4]/div[2]/div/div[3]/div/select')
rs_gender = Select(gender)
rs_gender.select_by_visible_text('Male')
# submitting the form
submit = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[1]/form/div[5]/input')
submit.click()
# second page
# radio button 1
radio_1 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[1]/div[2]/div[1]/label[1]/input')
radio_1.click()
# radio button 2
radio_1 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[2]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 3
radio_1 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[3]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 4
radio_1 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[4]/div[2]/div[1]/label[1]')
radio_1.click()
# radio button 5
radio_1 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[1]/div[5]/div[2]/div[1]/label[1]')
radio_1.click()
submit2 = driver.find_element(
By.XPATH, '//*[#id="zed_user_form"]/div/div[1]/div[2]/div/div/div[2]/form/div[2]/input')
submit2.click()
# After Clicking Submit Button Browser Will Quit
driver.quit()
for scraping reactions for a post in a page facebook, i can scrape all the informations (comments, reactions, tags,...) but when i want to put them in a dataframe, a have an error (arrays must all be same length) which is normal because sometime there's someone who put only a comment and an another one only a tag, so i have lists with differents length. i think i can put a conditionnal if but may there another optimized solution...
for example len(tag) =2, len(usr) = 17, len(commentaire)=12.
thanks :)
#imports here
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
import requests
from bs4 import BeautifulSoup
import time
from time import sleep
from lxml import html
import logging as log
import pandas as pd
#chemin de chrome et desactivation des adds automatique de FB anti scrape
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.notifications" : 2}
chrome_options.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome('C:/Users/User/Downloads/chromedriver.exe',
chrome_options=chrome_options)
#open FB
driver.get("http://www.facebook.com")
print ("facebook page log ok")
sleep(1)
#reperage de user et pass (css_selector)
username = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"input[name='email']")))
password = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"input[name='pass']")))
##reperage de user et pass et click (xpath)
#username = driver.find_element(By.XPATH,"//input[contains(#id,'email')]")
#password = driver.find_element(By.XPATH,"//input[contains(#id,'pass')]")
usr=input('Enter Email Id:')
pwd=input('Enter Password:')
#enter données
username.clear()
username.send_keys(usr)
print ("Email Id entered")
sleep(1)
password.clear()
password.send_keys(pwd)
print ("Pass entered")
#reperage bouton log in et click
button = WebDriverWait(driver, 2).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"button[type='submit']"))).click()
print("login Successfully...")
time.sleep(5)
post = 'https://mbasic.facebook.com/AJSTunisie/posts/6452145678144265'
#open the webpage
driver.get(post)
page = requests.get(post)
df_comm = pd.DataFrame(columns = ['post_url', 'user', 'commentaire', 'tag', 'user_url'])
page_count = 0
while (True ):
#scrape les reactions
tree = html.fromstring(driver.page_source)
user = tree.xpath("//div[#class='eb']/div/h3/a/text()")
commentaire = tree.xpath("//div[#class='eb']/div/div[1]/text()")
tag = tree.xpath("//div[#class='eb']/div/div[1]/a/text()")
user_url = tree.xpath("//div[#class='eb']/div/h3/a/#href")
data= {'post_url':[post]*len(user), 'user':user, 'commentaire':commentaire, 'tag':tag,
'user_url':user_url}
df_comm = df_comm.append(pd.DataFrame(columns = df_comm.columns,data=data))
#Check if more reaction exist ("En afficher davantage" existe ou pas)
next_link = tree.xpath("//div[#class='eb eu']/a/#href")
if len(next_link)!= 0:
driver.find_element_by_xpath("//div[#class='eb eu']/a/#href").click()
page_count = page_count+1
else :
next_link = ''
break
df_comm =df_comm.reset_index()
#df_comm.to_csv(path,index=False)
driver.close()
You should do it in different way.
First you should find all comments - elements with text, user, tag, etc. - and next you should use for-loop to work with every comment separatelly. If loop you should use relavite xpath (starting at .) to get only information for this single comment. And then you can see if you have missing tag or other item and you can put some default value - i.e. empty string.
This way every comment will have all values so every row in CSV will have the same size.
This way also resolve other problem - in previous method you could get first comment with tag from second comment and you couldn't control it.
To make code simpler I put every comment on list of rows and later I convert all to DataFrame.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from bs4 import BeautifulSoup
import time
from lxml import html
import logging as log
import pandas as pd
#chemin de chrome et desactivation des adds automatique de FB anti scrape
chrome_options = webdriver.ChromeOptions()
prefs = {"profile.default_content_setting_values.notifications" : 2}
chrome_options.add_experimental_option("prefs",prefs)
#driver = webdriver.Chrome('C:/Users/User/Downloads/chromedriver.exe', chrome_options=chrome_options)
driver = webdriver.Chrome(chrome_options=chrome_options)
#open FB
driver.get("http://www.facebook.com")
print ("facebook page log ok")
time.sleep(1)
#reperage de user et pass (css_selector)
username = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"input[name='email']")))
password = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"input[name='pass']")))
##reperage de user et pass et click (xpath)
#username = driver.find_element(By.XPATH,"//input[contains(#id,'email')]")
#password = driver.find_element(By.XPATH,"//input[contains(#id,'pass')]")
usr = input('Enter Email Id:')
pwd = input('Enter Password:')
#enter données
username.clear()
username.send_keys(usr)
print ("Email Id entered")
#time.sleep(1)
password.clear()
password.send_keys(pwd)
print ("Pass entered")
#reperage bouton log in et click
button = WebDriverWait(driver, 2).until(EC.element_to_be_clickable((By.CSS_SELECTOR,
"button[type='submit']"))).click()
print("login Successfully...")
time.sleep(5)
post_url = 'https://mbasic.facebook.com/AJSTunisie/posts/6452145678144265'
#open the webpage
driver.get(post_url)
all_rows = []
page_count = 0
while True:
#scrape les reactions
page_count += 1
print('\n--- page:', page_count, '---\n')
tree = html.fromstring(driver.page_source)
# find all comments
all_comments = tree.xpath("//div[#class='ec']/div")
print('len(all_comments):', len(all_comments))
# work with every comment separatelly
for comment in all_comments:
user = comment.xpath(".//h3/a/text()") # relative xpath starting at `.`
print('user:', user)
user = user[0] if user else "" # set default value
print('user:', user)
commentaire = comment.xpath(".//div[1]/text()") # relative xpath starting at `.`
print('commentaire:', commentaire)
commentaire = commentaire[0] if commentaire else "" # set default value
print('commentaire:', commentaire)
tag = comment.xpath(".//div[1]/a/text()") # relative xpath starting at `.`
print('tag:', tag)
tag = tag[0] if tag else "" # set default value
print('tag:', tag)
user_url = comment.xpath(".//h3/a/#href") # relative xpath starting at `.`
print('user_url:', user_url)
user_url = user_url[0] if user_url else "" # set default value
print('user_url:', user_url)
all_rows.append([post_url, user, commentaire, tag, user_url])
#Check if more reaction exist ("En afficher davantage" existe ou pas)
next_link = driver.find_elements_by_xpath("//div[#class='ec es']/a")
print('---')
print('len(next_link):', len(next_link))
if next_link:
next_link[0].click()
time.sleep(2)
else:
break
# - after loop -
df = pd.DataFrame(all_rows, columns=['post_url', 'user', 'commentaire', 'tag', 'user_url'])
print(df)
df.to_csv('output.csv', index=False)
#driver.close()
I have tried so many times trying to upload a photo to a Facebook post. When I read the selenium documents all it says was
Select the <input type="file"> element and call the send_keys() method passing the file path, either the path relative to the test script, or an absolute path.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
usr = "random#gmail.com"
pwd = "randompassword"
driver = webdriver.Firefox()
# or you can use Chrome(executable_path="/usr/bin/chromedriver")
driver.get("http://www.facebook.com/login")
assert "Facebook" in driver.title
elem = driver.find_element_by_id("email")
elem.send_keys(usr)
elem = driver.find_element_by_id("pass")
elem.send_keys(pwd)
elem.send_keys(Keys.RETURN)
elem = driver.find_element_by_css_selector("#u_0_y")
elem.send_keys("Hello Internet :) ")
driver.find_element_by_css_selector("._11b").click()
This works for me..
def main():
# Your Facebook account user and password
usr = "test.fb.post#gmail.com"
pwd = "test123456789"
grp = ['https://www.facebook.com/groups/grpid/', 'https://www.facebook.com/groups/grpid/',
'https://www.facebook.com/groups/grpid/', 'https://www.facebook.com/groups/grpid/',
'https://www.facebook.com/groups/grpid/', 'https://www.facebook.com/groups/grpid/',
'https://www.facebook.com/groups/grpid/']
chrome_options = webdriver.ChromeOptions()
chrome_options.add_experimental_option("detach", True)
chrome_options.add_argument("--disable-infobars")
chrome_options.add_experimental_option("prefs", { \
"profile.default_content_setting_values.notifications": 2 # 1:allow, 2:block
})
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.implicitly_wait(15) # seconds
# Go to facebook.com
driver.get("http://www.facebook.com")
sleep(2)
# Enter user email
elem = driver.find_element_by_id("email")
elem.send_keys(usr)
# Enter user password
elem = driver.find_element_by_id("pass")
elem.send_keys(pwd)
# Login
elem.send_keys(Keys.RETURN)
sleep(10)
for group in grp:
driver.get(group)
try:
try:
commentr = WebDriverWait(driver,10).until(EC.element_to_be_clickable( (By.XPATH, "//*[#name='xhpc_message_text']") ))
commentr.click()
except Exception:
commentr = WebDriverWait(driver,10).until(EC.element_to_be_clickable( (By.XPATH, "//*[#loggingname='status_tab_selector']") ))
commentr.click()
commentr = WebDriverWait(driver,10).until(EC.element_to_be_clickable( (By.XPATH, "//*[#class='_3u15']") ))
commentr.click()
sleep(3)
l=driver.find_elements_by_tag_name('input')
sleep(1)
for g in l:
if g==driver.find_element_by_xpath("//input[#type='file'][#class='_n _5f0v']"):
sleep(1)
g.send_keys(ipath)
print('image loaded')
sleep(10)
driver.find_element_by_xpath("//*[#class='_1mf _1mj']").send_keys(message)
sleep(1)
buttons = driver.find_elements_by_tag_name("button")
sleep(1)
for button in buttons:
if button.text == "Post":
sleep(5)
button.click()
sleep(10)
except Exception:
pass
print ('image not posted in '+group)
driver.close()
if __name__ == '__main__':
main()
Instead of using css_selector, try using xpath.
statuselement=driver.find_element_by_xpath("//[#name='xhpc_message']").click() driver.find_element_by_xpath("//[#class='_3jk']").click() l=driver.find_elements_by_tag_name('input') ipath="C:\Users\Utente\Pictures\CoutureBeardsley.jpg" for g in l: if g==driver.find_element_by_xpath("//input[#type='file'][#class='_n _5f0v']"): g.send_keys(ipath) print('image loaded')