scroll instagram followers following - python

i wanted to get instagram followers/following user list.
previously used this selenium python script is working:
scroll_box = browser.find_element_by_xpath("//div[#class='isgrP']")
sleep(5)
# height variable
last_ht, ht = 0, 1
while last_ht != ht:
last_ht = ht
sleep(2)
# scroll down and retrun the height of scroll
ht = browser.execute_script("""
arguments[0].scrollTo(0, arguments[0].scrollHeight);
return arguments[0].scrollHeight; """, scroll_box)
i've also tried this solution but still error: https://stackoverflow.com/a/54174682/11727107
is there a change from the instagram dev?
and now how to do scrolling.
thanks

work with this code:
scroll_box = browser.find_element_by_xpath("//div[#class='_9-49']")

Related

Instagram following box scrolling via selenium

I've a code for getting followers from a user on Instagram.But, i've some problems. First of all, sometimes it ends before reaching the bottom therefore i cannot get the all of the followers. Secondly, when i try to get too much followers after a while it takes too much time to scroll. How can i handle these two situations on python?
followers_panel = self.driver.find_element(By.XPATH,'/html/body/div[2]/div/div/div/div[2]/div/div/div[1]/div/div[2]/div/div/div/div/div[2]/div/div/div[2]')
last_ht, ht = 0, 1
while last_ht != ht:
last_ht = ht
ht = self.driver.execute_script(""" arguments[0].scrollTo(0, arguments[0].scrollHeight);return arguments[0].scrollHeight; """,followers_panel)
WebDriverWait(self.driver, 60).until(EC.invisibility_of_element_located((By.CLASS_NAME, "_ab8w _ab94 _ab97 _ab9f _ab9m _ab9p _abc0 _abcm")))
WebDriverWait(self.driver, 45).until(EC.visibility_of_element_located((By.XPATH, '(//div[#class="_aano"])')))
WebDriverWait(self.driver, 45).until(EC.visibility_of_element_located((By.XPATH, '//div[#class="_aanq"]')))
WebDriverWait(self.driver, 45).until(EC.visibility_of_element_located((By.XPATH, '(//div[#class="_aano"])')))
list_of_followers = list(map(lambda x: x.text, self.driver.find_elements(By.XPATH, '//div[#class="_aano"]//a/span/div')))

Selenium python- Is it possible to draw a circle around clicked GUI elements?

I am new to selenium and I am wondering if I could draw a red circle around anything my selenium script clicks before taking a screenshot of the page (I already know how to do the screenshot). I think that I could utilize:
ele.location
and
ele.size
to draw a circle, but I just do not know how. Any input is appreciated.
You can do the following. Let say you have a button
You can select it as
button = driver.find_element('xpath', '//button')
And then outline it
outline_style = driver.execute_script("return arguments[0].style.outline", button) # Get initial outline style
driver.execute_script("arguments[0].style.outline = '#f00 solid 5px';", button) # Change outline style
button.click()
driver.execute_script("arguments[0].style.outline = arguments[1];", button, outline_style) # Set back initial outline style
Yes, it's definitely not a circle, but if you need to somehow make a focus on element you can use this approach
Here is one other solution using canvas. In the below code for this same page, we are trying to click on the bookmark button. Check the video the element that we are trying to cllick is colored blue and then clicked.
So basically what we have done here is created a canvas on the top of our page, got the location of the element, clicked on it to show the blue circle and then we click on the element after removing the canvas. Its important to remove the canvas before doing the actual click as canvas is overlayed on top of our page.
In the below code, all you need to change is the line element_to_click = driver.find_element(By.CSS_SELECTOR, '.js-bookmark-btn.s-btn.s-btn__unset.c-pointer.py4.js-gps-track') if you want to click on some other element on the page.
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
# REPLACE YOUR CHROME PATH HERE
chrome_path = r"C:\Users\hpoddar\Desktop\Tools\chromedriver_win32\chromedriver.exe"
def click_on_element(element):
loc_dict = element.location
circle_x, circle_y = loc_dict['x'] + element.size['width']//2, loc_dict['y'] + element.size['height']//2
locX, locY = loc_dict['x'], loc_dict['y']
createCircle = f'''
document.body.appendChild(myCanvas);
createCircle = function (event) {{
var x = {circle_x};
var y = {circle_y};
ctx.fillStyle = "#2980b9";
ctx.beginPath();
ctx.arc(x, y, 10, 0, 2 * Math.PI);
ctx.fill();
ctx.closePath();
setTimeout(function () {{ ctx.clearRect(0, 0, myCanvas.width, myCanvas.height) }}, 2000);
}}
myCanvas.addEventListener("click", createCircle, true);
'''
driver.execute_script(createCircle)
elementFromPoint = f'''el = document.elementFromPoint({locX}, {locY});
el.click();'''
driver.execute_script(elementFromPoint)
removeCanvasAndClick = f'''
setTimeout(function () {{
document.body.removeChild(myCanvas);
myCanvas.removeEventListener('click', createCircle, true);
el = document.elementFromPoint({locX}, {locY});
el.click();
}}, 1000);
'''
driver.execute_script(removeCanvasAndClick)
url = 'https://stackoverflow.com/questions/73197807/selenium-python-is-it-possible-to-draw-a-circle-around-clicked-gui-elements'
s = Service(chrome_path)
driver = webdriver.Chrome(service=s)
driver.get(url)
driver.maximize_window()
# Say, we want to click on bookmark
element_to_click = driver.find_element(By.CSS_SELECTOR, '.js-bookmark-btn.s-btn.s-btn__unset.c-pointer.py4.js-gps-track')
#Create canvas
canvas = '''
myCanvas = document.createElement('canvas');
myCanvas.id = 'canvas';
myCanvas.style.position = 'absolute';
myCanvas.style.left = "0px";
myCanvas.style.top = "0px";
myCanvas.width = window.innerWidth;
myCanvas.height = window.innerHeight;
ctx = myCanvas.getContext('2d');
'''
driver.execute_script(canvas)
# Circles the element that you want to click and then clicks on it
click_on_element(element_to_click)
Check out the youtube video below, for the run on this same page.
https://www.youtube.com/watch?v=6-Wz97xFPkI

webscrapping by Python

I tried to extract data from below site but I don't know how to put the xpath in the loop "for", because the loop needs to be convert xpath to str, could you do me a favor and help me:
Site: https://www150.statcan.gc.ca/n1/pub/71-607-x/2021004/imp-eng.htm?r1=(1)&r2=0&r3=0&r4=12&r5=0&r7=0&r8=2022-01-01&r9=2022-05-01
from selenium import webdriver
import pandas as pd
import time
driver = webdriver.Chrome('C:\Webdriver\chromedriver.exe')
driver.get('https://www150.statcan.gc.ca/n1/pub/71-607-x/71-607-x2021004-eng.htm')
time.sleep(2)
# finding the button using ID
button = driver.find_element_by_xpath('//*[#id="cimt_import"]/p[1]/a')
# clicking on the button
button.click()
time.sleep(2)
# finding the button using ID
button = driver.find_element_by_xpath('//*[#id="topic3s"]')
# clicking on the button
button.click()
time.sleep(2)
# finding the start year:2022 from scroll
element_drop_down_startYear = driver.find_element_by_xpath('//*[#id="fromYear"]/option[1]')
element_drop_down_startYear.click()
# finding the start month from:January scroll
element_drop_down_startMonth = driver.find_element_by_xpath('//*[#id="fromMonth"]/option[1]')
element_drop_down_startMonth.click()
# finding the End year from scroll
element_drop_down_endYear = driver.find_element_by_xpath('//*[#id="toYear"]/option[1]')
element_drop_down_endYear.click()
# finding the End month from scroll
element_drop_down_endmonth = driver.find_element_by_xpath('//*[#id="toMonth"]/option[5]')
element_drop_down_endmonth.click()
# finding the specific Chapter
element_drop_down_specificChapter = driver.find_element_by_xpath('//*[#id="report_hs"]/option[1]')
element_drop_down_specificChapter.click()
time.sleep(1)
# finding the specific Commodity from the list
element_drop_down_specific_commodity = driver.find_element_by_xpath('//*[#id="report_hs"]/option[2]')
element_drop_down_specific_commodity.click()
# finding the specific Commodity from the list
element_drop_down_specific_button= driver.find_element_by_xpath('//*[#id="report"]/div[1]/div[3]/div[5]/p[2]/button')
element_drop_down_specific_button.click()
#--------------------------------------------------------------------
cel = 1
for cel in rane(25):
x = driver.find_element_by_xpath('//*[#id="report_table"]/tbody/tr[1]/td[2]/a')
print(x)
print("//*[#id="report_table"]/tbody/tr["+ cel +"]/td[4]")
print("//*[#id="report_table"]/tbody/tr["+ cel +"]/td[7]")
print("//*[#id="report_table"]/tbody/tr["+ cel +"]/td[8]/abbr")
time.sleep(3)
You need to find the element before printing it, otherwise you're printing a string. I think what you want to do is in each iteration of the for loop print those selectors? if so find the elements like so, then print them.
for i in range(25):
x = driver.find_element_by_xpath('//*[#id="report_table"]/tbody/tr[1]/td[2]/a')
print(x)
element_1 = driver.find_element_by_xpath(f'//*[#id="report_table"]/tbody/tr[{i}]/td[4]')
element_2 = driver.find_element_by_xpath(f'//*[#id="report_table"]/tbody/tr[{i}]/td[7]')
element_3 = driver.find_element_by_xpath(f'//*[#id="report_table"]/tbody/tr[{i}]/td[8]/abbr')
If you inspect the Network tab, you can see that webpage is pulling the table data from
https://www150.statcan.gc.ca//t1/cimt/rest/getReport/(1)/0/0/12/0/150000/1/0/2022-01-01/2022-05-01
Scrape that json page instead:
import requests
r = requests.get('https://www150.statcan.gc.ca//t1/cimt/rest/getReport/(1)/0/0/12/0/150000/1/0/2022-01-01/2022-05-01')
print(r.json())

How can I get Selenium to keep pulling down vertical scroll bar in element?

So I am trying to get Selenium to keep pulling down vertical scroll bar in an element, see here. Vertical scrolling
This is the code I have been using, it does work - However when most is loaded, it just suddenly stops and keep loading further followers in Instagram
def _get_names(self):
sleep(2)
scroll_box = self.driver.find_element(By.XPATH, "/html/body/div[6]/div/div/div/div[3]")
last_ht, ht = 0, 1
while last_ht != ht:
last_ht = ht
sleep(1)
ht = self.driver.execute_script("""
arguments[0].scrollTo(0, arguments[0].scrollHeight);
return arguments[0].scrollHeight;
""", scroll_box)
links = scroll_box.find_elements_by_tag_name('a')
names = [name.text for name in links if name.text != '']
# close button
self.driver.find_element(By.XPATH, "/html/body/div[4]/div/div[1]/div/div[2]/button")\
.click()
return names
Which then means, it is not checking all Followers, as it stops scrolling.

Why is scrolling interacting badly with my webscraping?

I'm trying to scrape all the corner betting odds for a given game at skybet, but it looks like scrolling is messing things up in my loop. When I print section.text it looks like its doing what I want but then it clicks the wrong thing?
And when I don't scroll it will only click on the first few odds sections before the code just freezes.
Any help would be really appreciated thanks!
Also, I made the odds_sections refresh itself at each iteration because I thought that might be the problem.
driver = webdriver.Safari()
driver.get("https://m.skybet.com/football/competitions")
driver.maximize_window()
#click accept cookie
try:
button_cookie = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, "//body/div[2]/div[1]/a[2]"))
)
button_cookie.click()
except:
print("no cookie")
#find location of premier league
pl = driver.find_elements_by_class_name("split__title")
locate_pl=0
link_name = pl[locate_pl].text
while link_name != "Premier League":
locate_pl += 1
link_name = pl[locate_pl].text
pl[locate_pl].click()
N = locate_pl + 1
#use N now to find pl matches
time.sleep(2)
#click on first match
button_match = driver.find_element_by_xpath("//div[#id='competitions']/ul[1]/li[{}]/div[1]/table[2]/tbody[1]/tr[2]/td[1]/a[1]".format(N))
teams = driver.find_element_by_xpath("//div[#id='competitions']/ul[1]/li[{}]/div[1]/table[2]/tbody[1]/tr[2]/td[1]/a[1]/b/span".format(N))
button_match.send_keys(Keys.ENTER)
time.sleep(2)
#find and click corners button
try:
button_corners = WebDriverWait(driver, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME,"_1ouz2ki")))
#button_corners = driver.find_elements_by_class_name("_1ouz2ki")
except:
print("no corners")
n=0
link_name = button_corners[n].text
while link_name != "Corners":
n += 1
link_name = button_corners[n].text
button_corners[n].click()
#Now we will scrape all corner odds for this game.
odds_sections = driver.find_elements_by_class_name('_t0tx82')
N_sections = len(odds_sections)
c=0
scroll_to = 35
#the issue is within this loop
while c <= N_sections:
odds_sections = driver.find_elements_by_class_name('_t0tx82')
section = odds_sections[c]
print(section.text)
section.click()
time.sleep(2)
section.click()
c += 1
driver.execute_script("window.scrollTo(0,{})".format(scroll_to))

Categories