How can I scroll a web page using selenium webdriver in python? - python

I am currently using selenium webdriver to parse through facebook user friends page and extract all ids from the AJAX script. But I need to scroll down to get all the friends. How can I scroll down in Selenium. I am using python.

You can use
driver.execute_script("window.scrollTo(0, Y)")
where Y is the height (on a fullhd monitor it's 1080). (Thanks to #lukeis)
You can also use
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
to scroll to the bottom of the page.
If you want to scroll to a page with infinite loading, like social network ones, facebook etc. (thanks to #Cuong Tran)
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
another method (thanks to Juanse) is, select an object and
label.sendKeys(Keys.PAGE_DOWN);

If you want to scroll down to bottom of infinite page (like linkedin.com), you can use this code:
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
Reference: https://stackoverflow.com/a/28928684/1316860

You can use send_keys to simulate an END (or PAGE_DOWN) key press (which normally scroll the page):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
html = driver.find_element(By.TAG_NAME, 'html')
html.send_keys(Keys.END)

same method as shown here:
in python you can just use
driver.execute_script("window.scrollTo(0, Y)")
(Y is the vertical position you want to scroll to)

element=find_element_by_xpath("xpath of the li you are trying to access")
element.location_once_scrolled_into_view
this helped when I was trying to access a 'li' that was not visible.

For my purpose, I wanted to scroll down more, keeping the windows position in mind. My solution was similar and used window.scrollY
driver.execute_script("window.scrollTo(0, window.scrollY + 200)")
which will go to the current y scroll position + 200

This is how you scroll down the webpage:
driver.execute_script("window.scrollTo(0, 1000);")

None of these answers worked for me, at least not for scrolling down a facebook search result page, but I found after a lot of testing this solution:
while driver.find_element_by_tag_name('div'):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
Divs=driver.find_element_by_tag_name('div').text
if 'End of Results' in Divs:
print 'end'
break
else:
continue

The easiest way i found to solve that problem was to select a label and then send:
label.sendKeys(Keys.PAGE_DOWN);
Hope it works!

When working with youtube the floating elements give the value "0" as the scroll height
so rather than using "return document.body.scrollHeight" try using this one "return document.documentElement.scrollHeight"
adjust the scroll pause time as per your internet speed
else it will run for only one time and then breaks after that.
SCROLL_PAUSE_TIME = 1
# Get scroll height
"""last_height = driver.execute_script("return document.body.scrollHeight")
this dowsnt work due to floating web elements on youtube
"""
last_height = driver.execute_script("return document.documentElement.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0,document.documentElement.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.documentElement.scrollHeight")
if new_height == last_height:
print("break")
break
last_height = new_height

scroll loading pages. Example: medium, quora,etc
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight-1000);")
# Wait to load the page.
driver.implicitly_wait(30) # seconds
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
# sleep for 30s
driver.implicitly_wait(30) # seconds
driver.quit()

Here's an example selenium code snippet that you could use for this type of purpose. It goes to the url for youtube search results on 'Enumerate python tutorial' and scrolls down until it finds the video with the title: 'Enumerate python tutorial(2020).'
driver.get('https://www.youtube.com/results?search_query=enumerate+python')
target = driver.find_element_by_link_text('Enumerate python tutorial(2020).')
target.location_once_scrolled_into_view

This code scrolls to the bottom but doesn't require that you wait each time. It'll continually scroll, and then stop at the bottom (or timeout)
from selenium import webdriver
import time
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://example.com')
pre_scroll_height = driver.execute_script('return document.body.scrollHeight;')
run_time, max_run_time = 0, 1
while True:
iteration_start = time.time()
# Scroll webpage, the 100 allows for a more 'aggressive' scroll
driver.execute_script('window.scrollTo(0, 100*document.body.scrollHeight);')
post_scroll_height = driver.execute_script('return document.body.scrollHeight;')
scrolled = post_scroll_height != pre_scroll_height
timed_out = run_time >= max_run_time
if scrolled:
run_time = 0
pre_scroll_height = post_scroll_height
elif not scrolled and not timed_out:
run_time += time.time() - iteration_start
elif not scrolled and timed_out:
break
# closing the driver is optional
driver.close()
This is much faster than waiting 0.5-3 seconds each time for a response, when that response could take 0.1 seconds

I was looking for a way of scrolling through a dynamic webpage, and automatically stopping once the end of the page is reached, and found this thread.
The post by #Cuong Tran, with one main modification, was the answer that I was looking for. I thought that others might find the modification helpful (it has a pronounced effect on how the code works), hence this post.
The modification is to move the statement that captures the last page height inside the loop (so that each check is comparing to the previous page height).
So, the code below:
Continuously scrolls down a dynamic webpage (.scrollTo()), only stopping when, for one iteration, the page height stays the same.
(There is another modification, where the break statement is inside another condition (in case the page 'sticks') which can be removed).
SCROLL_PAUSE_TIME = 0.5
while True:
# Get scroll height
### This is the difference. Moving this *inside* the loop
### means that it checks if scrollTo is still scrolling
last_height = driver.execute_script("return document.body.scrollHeight")
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
# try again (can be removed)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
# check if the page height has remained the same
if new_height == last_height:
# if so, you are done
break
# if not, move on to the next loop
else:
last_height = new_height
continue

You can use send_keys to simulate a PAGE_DOWN key press (which normally scroll the page):
from selenium.webdriver.common.keys import Keys
html = driver.find_element_by_tag_name('html')
html.send_keys(Keys.PAGE_DOWN)

if you want to scroll within a particular view/frame (WebElement), what you only need to do is to replace "body" with a particular element that you intend to scroll within. i get that element via "getElementById" in the example below:
self.driver.execute_script('window.scrollTo(0, document.getElementById("page-manager").scrollHeight);')
this is the case on YouTube, for example...

The ScrollTo() function doesn't work anymore. This is what I used and it worked fine.
driver.execute_script("document.getElementById('mydiv').scrollIntoView();")

According to the docs,
the class ActionChains does the job:
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Firefox()
action_chains = ActionChains(driver)
action_chains.scroll(x: int, y: int, delta_x: int, delta_y: int, duration: int = 0, origin: str = 'viewport').perform()

insert this line driver.execute_script("window.scrollBy(0,925)", "")

The loop using the "send keys" method of scrolling the page:
pre_scroll_height = driver.execute_script('return document.body.scrollHeight;')
while True:
driver.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(5)
post_scroll_height = driver.execute_script('return document.body.scrollHeight;')
print(pre_scroll_height, post_scroll_height)
if pre_scroll_height == post_scroll_height:
break
pre_scroll_height=post_scroll_height

Here is a method I wrote to slowly scroll down to a targets element
You can pass either Y-th position of element of the CSS Selector to it
It scrolls exactly like we do via mouse-wheel
Once this method called, you call it again with same driver object but with new target element, it will then scroll up/down wherever that element exists
def slow_scroll_to_element(self, driver, element_selector=None, target_yth_location=None):
current_scroll_position = int(driver.execute_script("return window.scrollY"))
if element_selector:
target_yth_location = int(driver.execute_script("return document.querySelector('{}').getBoundingClientRect()['top'] + window.scrollY".format(element_selector)))
scrollSpeed = 100 if target_yth_location-current_scroll_position > 0 else -100
def chunks(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
for l in list(chunks(list(range(current_scroll_position, target_yth_location, scrollSpeed)) + list([target_yth_location+(-scrollSpeed if scrollSpeed > 0 else scrollSpeed)]), 3)):
for pos in l:
driver.execute_script("window.scrollTo(0, "+str(pos)+");")
time.sleep(0.1)
time.sleep(random.randint(1,3))

driver.execute_script("document.getElementById('your ID Element').scrollIntoView();")
it's working for my case.

Just a small variation of the solutions provided so far: sometimes in scraping you have to meet the following requirements:
Keep scrolling step by step. Otherwise if you always jump to the bottom some elements are loaded only as containers/divs but their content is not loaded because they were never visible (because you jumped straight to the bottom);
Allow enough time for content to be loaded;
It's not an infinite scroll page, there is an end and you have to identify when the end is reached;
Here is a simple implementation:
from time import sleep
def keep_scrolling_to_the_bottom():
while True:
previous_scrollY = my_web_driver.execute_script( 'return window.scrollY' )
my_web_driver.execute_script( 'window.scrollBy( 0, 230 )' )
sleep( 0.4 )
if previous_scrollY == my_web_driver.execute_script( 'return window.scrollY' ):
print( 'job done, reached the bottom!' )
break
Tested and working on Windows 7 x64, Python 3.8.0, selenium 4.1.3, Google Chrome 107.0.5304.107, website for property rent.

Scroll to an element: Find the element and scroll using this code.
scroll_element = driver.find_element(By.XPATH, "your element xpath")
driver.execute_script("arguments[0].scrollIntoView();", scroll_element)

Related

Selenium Not Locating ALL elements with specific class name

I'm creating a web crawler for Zillow in order to practice using Selenium. All I'm trying to do is get the price, address, and link to each home, but when I use find_elements_by_class_name() or find_elements_by_css_selector(), it only finds the first 9 elements, when there are many more.
Normally my selenium works fine. Does anyone know why this occurs?
from selenium import webdriver
import time
zillow_url = "https://www.zillow.com/manhattan-new-york-ny/houses/?searchQueryState=%7B%22pagination%22%3A%7B%7D%2C%22usersSearchTerm%22%3A%22Manhattan%2C%20New%20York%2C%20NY%22%2C%22mapBounds%22%3A%7B%22west%22%3A-74.21047920019531%2C%22east%22%3A-73.73669379980468%2C%22south%22%3A40.626191262639644%2C%22north%22%3A40.933477919520115%7D%2C%22regionSelection%22%3A%5B%7B%22regionId%22%3A12530%2C%22regionType%22%3A17%7D%5D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22ah%22%3A%7B%22value%22%3Atrue%7D%2C%22beds%22%3A%7B%22min%22%3A0%2C%22max%22%3A0%7D%2C%22price%22%3A%7B%22max%22%3A400000%7D%2C%22mp%22%3A%7B%22max%22%3A1300%7D%7D%2C%22isListVisible%22%3Atrue%2C%22mapZoom%22%3A11%7D"
address = "My chrome driver address"
driver = webdriver.Chrome(executable_path=address)
driver.get(zillow_url)
time.sleep(2)
prices = driver.find_elements_by_class_name("list-card-price")
addresses = driver.find_elements_by_class_name("list-card-addr")
links = driver.find_elements_by_class_name("list-card-link")
Try this.
from selenium import webdriver
import time
zillow_url = "https://www.zillow.com/manhattan-new-york-ny/houses/?searchQueryState=%7B%22pagination%22%3A%7B%7D%2C%22usersSearchTerm%22%3A%22Manhattan%2C%20New%20York%2C%20NY%22%2C%22mapBounds%22%3A%7B%22west%22%3A-74.21047920019531%2C%22east%22%3A-73.73669379980468%2C%22south%22%3A40.626191262639644%2C%22north%22%3A40.933477919520115%7D%2C%22regionSelection%22%3A%5B%7B%22regionId%22%3A12530%2C%22regionType%22%3A17%7D%5D%2C%22isMapVisible%22%3Atrue%2C%22filterState%22%3A%7B%22ah%22%3A%7B%22value%22%3Atrue%7D%2C%22beds%22%3A%7B%22min%22%3A0%2C%22max%22%3A0%7D%2C%22price%22%3A%7B%22max%22%3A400000%7D%2C%22mp%22%3A%7B%22max%22%3A1300%7D%7D%2C%22isListVisible%22%3Atrue%2C%22mapZoom%22%3A11%7D"
address = "My chrome driver address"
driver = webdriver.Chrome(executable_path=address)
driver.get(zillow_url)
prices = []
addresses = []
links = []
time.sleep(2)
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while (condition):
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
prices = driver.find_elements_by_class_name("list-card-price")
addresses = driver.find_elements_by_class_name("list-card-addr")
links = driver.find_elements_by_class_name("list-card-link")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
Just put the condition as len(prices) <= number of houses you wanna scrape

Fix: Lazy loading in Selenium Python

Python is selecting just 18 elements out of possible 100 due to lazy loading of elements, on page load, just 18 will be loaded and on scrolling the new elements will be uploaded deleting the previously loaded elements, how can I fix this so that it stores all 100 elements in the persons list and clicks each of those elements.
DOM element structure of the page
d = 0
itr = 1
time.sleep(17)
while True:
persons = browser.find_elements_by_xpath("//*[#class='i-edit mrs no-text-decoration ember-view']")
print(len(persons))
for i, person in zip(names, persons):
time.sleep(4)
persons = browser.find_elements_by_xpath("//*[#class='i-edit mrs no-text-decoration ember-view']")
if d >= len(persons):
break
i = names[d]
person = persons[d]
browser.execute_script("arguments[0].scrollIntoView(true);", person)
time.sleep(3)
person.click()
To Click all the elements on a page where new elements are getting loaded after scrolling down and old elements are getting disappeared.
while True:
# Get screen height upon page loading for the first time
last_height = browser.execute_script("return document.body.scrollHeight")
#Get the list of elements
persons = browser.find_elements_by_xpath("//*[#class='i-edit mrs no-text-decoration ember-view']")
# Lopp through all elements and click on them
for i in range (1,len(persons)+1):
# As after every click page will be reloaded, getting fresh reference to element every time , so that wont get stale element exception
linkXpath = "(//*[#class='i-edit mrs no-text-decoration ember-view'])["+str(i)+"]"
WebDriverWait(browser, 30).until(EC.element_to_be_clickable((By.XPATH, linkXpath ))).click()
# Scroll down to bottom
browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
#Get new height of screen
new_height = browser.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height

Scraping the names of the accounts who liked a post on Instagram, using Python and Selenium, but only 11 names returned

I am trying to scrape the names of the accounts who liked a post on Instagram, using Selenium and Python.
https://www.instagram.com/p/B57dJp3gIGw/
There was no error returned, and I successfully scraped, but only the top 11 names of those who liked the post returned, while 40 persons liked the post. I am wondering what the reason is, and how I can fix it?
liker_list = []
likers = driver.find_elements_by_class_name("qyrsm")
for n in likers:
#scrape the name of the likers
liker = n.find_element_by_class_name("_4EzTm").get_attribute("textContent")
liker_list.append(liker)
print(liker_list)
Here is the result(the liker_list)
['rycmtn', 'asat0oo', 'misswanderwolf', 'renkuga0202', 'na_na972', 'natsu_5550', 'hachibayinternational_inc', 'mi.kyoung.jeon', 'crane42195', 'michi___kusa', 'ankhcarpediem']
Most likely the page needs to be scrolled down to view all likes and scrape them.
The chunk to scroll infinetly is below, and you can add what you have to the inside of this loop to scrape the data while it scrolls.
def scroll():
SCROLL_PAUSE_TIME = 1
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
scroll()
You will add your code after the time.sleep(SCROLL_PAUSE_TIME) line.
Hope this helps.

How to scrape in the form of a table so lists become even

I am using Selenium Webdriver (Python 3.0) to scrape data off this website. All the data is scraped correctly however, it scrapes in the form of a list meaning there are 127 Team and odds and 129 Hrefs. Unfortunately, this means the Href is not beside the Team and odds correctly in the excel. Is there a way to get around this?
I have attached screenshots and my code is below. Is there a way to adjust this to scrape as a table so that it knows not to scrape Href where no Team and odds visible? I am migrating from Winautomation which had this feature to Selenium.
https://ibb.co/kMC0mk - Picture showing why the Href is not beside Team and odds
https://ibb.co/hh4rsQ - What the Excel looks like.
import time
from selenium import webdriver
driver = webdriver.Chrome(executable_path=r'C:\ad\chromedriver.exe')
driver.set_window_size(1024, 600)
driver.maximize_window()
driver.get('https://www.bluebet.com.au/sports/Soccer/100')
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
time.sleep( 15 )
#Odds
langs = driver.find_elements_by_css_selector(".table-grid__row:nth-child(1) .headline-wrap")
for lang in langs:
print (lang.text)
time.sleep( 10 )
#link
langs = driver.find_elements_by_css_selector("div.ctr--epsilon.soft > a[href*='/sports/Soccer/']")
for lang in langs:
print (lang.get_attribute('href'))
time.sleep( 10 )
#Team
langs = driver.find_elements_by_css_selector(".table-grid__row:nth-child(1) .place-bet__odds")
for lang in langs:
print (lang.text)
I agree with #ChellWheatly, couldn't find a way to do this with CSS.
Try this xpath selector to scrape only the "Hrefs" that have content:
//a/ancestor::div[contains(#class, 'table-grid')]/preceding-sibling::div[contains(#class, 'ctr--epsilon')]//a
(I've tested this on the real page with this chrome extension)

How to check if further `scroll down` is not possible using Selenium

Am using Selenium + python to scrap a page which has infinite scroll (basically scroll till max first 500 results are shown)
Using below code, am able to scroll to bottom of the page. Now i want to stop when further scrolling doesn't fetches any content. (say, page only has 200 results, i don't want to keep on scrolling assuming max 500 result)
driver = webdriver.Firefox()
driver.get(url)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
I tried accessing window.pageYOffset but it's coming as None always.
I'm using Selenium with Chrome, not Firefox, but the following worked for me:
capture page height before scrolling down;
scroll down using key down;
capture page height after scrolling down;
if page height was same before and after scrolling, stop scrolling
My code looks like this:
import time
from selenium import webdriver
from selenium.webdriver import Chrome
from selenium.webdriver.common.keys import Keys
driver = webdriver.Chrome()
driver.get("www.yourTargetURL.com")
reached_page_end = False
last_height = driver.execute_script("return document.body.scrollHeight")
while not reached_page_end:
driver.find_element_by_xpath('//body').send_keys(Keys.END)
time.sleep(2)
new_height = driver.execute_script("return document.body.scrollHeight")
if last_height == new_height:
reached_page_end = True
else:
last_height = new_height
driver.quit()
You can check document.body.scrollTop by before and after each scroll attempt if there is no data to fetch then this value will stay the same
distanceToTop = driver.execute_script("return document.body.scrollTop);")
Just in case, if someone is using playwright. This code snippet is very similar to the ATJ answer.
import time
from playwright.sync_api import sync_playwright
def run(playwright):
page = playwright.chromium.launch(headless=False).new_page()
page.goto("URL")
reached_end = False
last_height = page.evaluate("() => document.body.scrollHeight") # scrollHeight: 5879
while not reached_end:
page.keyboard.press("End")
time.sleep(2)
new_height = page.evaluate("() => document.body.scrollHeight")
if new_height == last_height:
reached_end = True
else:
last_height = new_height
page.close()
with sync_playwright() as playwright:
run(playwright)
We can use a hard count while scrolling and as soon we reach that max count we get out of the loop.
b=0;
boolean x = true;
while (x){
WebElement button = null;
try {
button = driver.findElement(By.xpath("//*[#id='vjs_video_3']/div[7]/div[1]/button[1]"));
x= false;
} catch (Exception ex){
JavascriptExecutor js = (JavascriptExecutor) driver;
js.executeScript("javascript:window.scrollBy(50, 80)");
try {
Thread.sleep(500);
} catch (InterruptedException e) {
e.printStackTrace();
}
js.executeScript("javascript:window.scrollBy(50, 50)");
b++;
System.out.println("\n"+ b);
if(b>50) {
System.out.println("out!");
break;
}
// js.executeScript("javascript:window.scrollBy(50, 180)");
// Thread.sleep(1000);
// js.executeScript("javascript:window.scrollBy(50, 150)"); // button is missing
}
}
}

Categories