There is a web page that I want to run my scraping script on. However, because the page refreshes with additional content when you scroll down, I need to be able to add a function to my script that scrolls the web page all the way to the bottom before my scraping script is run.
In attempt to achieve this, please find my entire script which seems to stop at row height 5287.
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import csv
import pandas as pd
#Initialize a Chrome browser
driver = webdriver.Chrome("C:.............chromedriver_win32/chromedriver.exe")
#Go to the page we want to scrape
driver.get('https://icodrops.com/category/ended-ico/')
#Open csv file to write in
csv_file = open('icodrops_ended_icos.csv', 'w')
writer = csv.writer(csv_file)
writer.writerow(['Project_Name', 'Interest', 'Category', 'Received', 'Goal', 'End_Date', 'Ticker'])
page_url = 'https://icodrops.com/category/ended-ico/'
# Although only one page to scrape - need to scroll to the bottom to pull all data
lastHeight = driver.execute_script("return document.documentElement.scrollHeight")
print('lastHeight', lastHeight)
while True:
driver.execute_script(f"window.scrollTo(0, {lastHeight});")
time.sleep(15)
#height = driver.execute_script("return document.documentElement.scrollHeight")
newHeight = driver.execute_script("return document.documentElement.scrollHeight")
print('newHeight', newHeight)
if newHeight == lastHeight:
break
lastHeight = newHeight
try:
#print the url that we are scraping
print('Scraping this url:' + page_url)
#Exract a list object where each element of the list is a row in the table
rows = driver.find_elements_by_xpath('//div[#class="col-md-12 col-12 a_ico"]')
# Extract detail in columns from each row
for row in rows:
#Initialize a dictionary for each row
row_dict = {}
#Use relative xpaths to locate desired data
project_name = row.find_element_by_xpath('.//div[#class="ico-row"]/div[2]/h3/a').text
interest = row.find_element_by_xpath('.//div[#class="interest"]').text
category = row.find_element_by_xpath('.//div[#class="categ_type"]').text
received = row.find_element_by_xpath('.//div[#id="new_column_categ_invisted"]/span').text
goal = row.find_element_by_xpath('.//div[#id="categ_desctop"]').text
end_date = row.find_element_by_xpath('.//div[#class="date"]').text
ticker = row.find_element_by_xpath('.//div[#id="t_tikcer"]').text
# Add extracted data to the dictionary
row_dict['project_name'] = project_name
row_dict['interest'] = interest
row_dict['category'] = category
row_dict['received'] = received
row_dict['goal'] = goal
row_dict['end_date'] = end_date
row_dict['ticker'] = ticker
writer.writerow(row_dict.values())
except Exception as e:
print(e)
csv_file.close()
driver.close()
break
Without being able to scroll to the bottom of the page my script will only scrape data form the initial page which only constitutes about 10% of all that is available
I always use the below piece of code to scroll till bottom, and I have never seen that it fail.
driver.execute_script("var scrollingElement = (document.scrollingElement || document.body);scrollingElement.scrollTop = scrollingElement.scrollHeight;")
So, your effective code will be
while True:
driver.execute_script("var scrollingElement = (document.scrollingElement || document.body);scrollingElement.scrollTop = scrollingElement.scrollHeight;")
height = driver.execute_script("return document.documentElement.scrollHeight")
newHeight = driver.execute_script("window.scrollTo(0, " + str(height) + ");")
time.sleep(15)
if newHeight == lastHeight:
break
lastHeight = newHeight
If you use print() to see values in variables then you see that scrollTo gives None and you can't use it to get newHeight.
Minimal working code.
I tested on page http://quotes.toscrape.com/scroll created for learning scraping.
from selenium import webdriver
import time
url = 'http://quotes.toscrape.com/scroll'
driver = webdriver.Firefox()
driver.get(url)
lastHeight = driver.execute_script("return document.documentElement.scrollHeight")
print('lastHeight', lastHeight)
while True:
driver.execute_script(f"window.scrollTo(0, {lastHeight});")
time.sleep(1)
newHeight = driver.execute_script("return document.documentElement.scrollHeight")
print('newHeight', newHeight)
if newHeight == lastHeight:
break
lastHeight = newHeight
BTW:
I found on Stackoverflow answer from 2015 which use the same method but with document.body instead of document.documentElement
How can I scroll a web page using selenium webdriver in python?
So if this code works for you then this question could be closed as duplicate
https://www.forrent.com/apartment-community-profile/1012635
I am trying to parse a web page, such as this one. Selenium could return some of the content of this page, but not all of them. For example "Professionally Managed by: B & A Associates" is in the web page, but its not returned by the variable 'content' in the script. Any idea why is that, how to solve this problem?
driver = webdriver.Firefox(executable_path='/home/yliu/repos/funnel_objects/listing_sites/geckodriver')
try:
driver.set_page_load_timeout(20)
driver.get(url)
#WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, "contactHeading")))
WebDriverWait(driver, 40)
html = driver.page_source
content = BeautifulSoup(html,"lxml")
driver.quit()
return content
except TimeoutException:
print('time out from contact')
return None
That content is a lazy load component. It will be displayed once you have scrolled down. So you need a script to scroll down to the bottom. See code below.
driver = webdriver.Firefox(executable_path='/home/yliu/repos/funnel_objects/listing_sites/geckodriver')
try:
driver.set_page_load_timeout(20)
driver.get(url)
#WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.ID, "contactHeading")))
#WebDriverWait(driver, 40)
SCROLL_PAUSE_TIME = 0.5
SCROLL_LENGTH = 200
page_height = int(driver.execute_script("return document.body.scrollHeight"))
scrollPosition = 0
while scrollPosition < page_height:
scrollPosition = scrollPosition + SCROLL_LENGTH
driver.execute_script("window.scrollTo(0, " + str(scrollPosition) + ");")
time.sleep(SCROLL_PAUSE_TIME)
html = driver.page_source
content = BeautifulSoup(html,"lxml")
driver.quit()
return content
except TimeoutException:
print('time out from contact')
return None
I am currently using selenium webdriver to parse through facebook user friends page and extract all ids from the AJAX script. But I need to scroll down to get all the friends. How can I scroll down in Selenium. I am using python.
You can use
driver.execute_script("window.scrollTo(0, Y)")
where Y is the height (on a fullhd monitor it's 1080). (Thanks to #lukeis)
You can also use
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
to scroll to the bottom of the page.
If you want to scroll to a page with infinite loading, like social network ones, facebook etc. (thanks to #Cuong Tran)
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
another method (thanks to Juanse) is, select an object and
label.sendKeys(Keys.PAGE_DOWN);
If you want to scroll down to bottom of infinite page (like linkedin.com), you can use this code:
SCROLL_PAUSE_TIME = 0.5
# Get scroll height
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
Reference: https://stackoverflow.com/a/28928684/1316860
You can use send_keys to simulate an END (or PAGE_DOWN) key press (which normally scroll the page):
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
html = driver.find_element(By.TAG_NAME, 'html')
html.send_keys(Keys.END)
same method as shown here:
in python you can just use
driver.execute_script("window.scrollTo(0, Y)")
(Y is the vertical position you want to scroll to)
element=find_element_by_xpath("xpath of the li you are trying to access")
element.location_once_scrolled_into_view
this helped when I was trying to access a 'li' that was not visible.
For my purpose, I wanted to scroll down more, keeping the windows position in mind. My solution was similar and used window.scrollY
driver.execute_script("window.scrollTo(0, window.scrollY + 200)")
which will go to the current y scroll position + 200
This is how you scroll down the webpage:
driver.execute_script("window.scrollTo(0, 1000);")
None of these answers worked for me, at least not for scrolling down a facebook search result page, but I found after a lot of testing this solution:
while driver.find_element_by_tag_name('div'):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
Divs=driver.find_element_by_tag_name('div').text
if 'End of Results' in Divs:
print 'end'
break
else:
continue
The easiest way i found to solve that problem was to select a label and then send:
label.sendKeys(Keys.PAGE_DOWN);
Hope it works!
When working with youtube the floating elements give the value "0" as the scroll height
so rather than using "return document.body.scrollHeight" try using this one "return document.documentElement.scrollHeight"
adjust the scroll pause time as per your internet speed
else it will run for only one time and then breaks after that.
SCROLL_PAUSE_TIME = 1
# Get scroll height
"""last_height = driver.execute_script("return document.body.scrollHeight")
this dowsnt work due to floating web elements on youtube
"""
last_height = driver.execute_script("return document.documentElement.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script("window.scrollTo(0,document.documentElement.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.documentElement.scrollHeight")
if new_height == last_height:
print("break")
break
last_height = new_height
scroll loading pages. Example: medium, quora,etc
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight-1000);")
# Wait to load the page.
driver.implicitly_wait(30) # seconds
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
# sleep for 30s
driver.implicitly_wait(30) # seconds
driver.quit()
Here's an example selenium code snippet that you could use for this type of purpose. It goes to the url for youtube search results on 'Enumerate python tutorial' and scrolls down until it finds the video with the title: 'Enumerate python tutorial(2020).'
driver.get('https://www.youtube.com/results?search_query=enumerate+python')
target = driver.find_element_by_link_text('Enumerate python tutorial(2020).')
target.location_once_scrolled_into_view
This code scrolls to the bottom but doesn't require that you wait each time. It'll continually scroll, and then stop at the bottom (or timeout)
from selenium import webdriver
import time
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get('https://example.com')
pre_scroll_height = driver.execute_script('return document.body.scrollHeight;')
run_time, max_run_time = 0, 1
while True:
iteration_start = time.time()
# Scroll webpage, the 100 allows for a more 'aggressive' scroll
driver.execute_script('window.scrollTo(0, 100*document.body.scrollHeight);')
post_scroll_height = driver.execute_script('return document.body.scrollHeight;')
scrolled = post_scroll_height != pre_scroll_height
timed_out = run_time >= max_run_time
if scrolled:
run_time = 0
pre_scroll_height = post_scroll_height
elif not scrolled and not timed_out:
run_time += time.time() - iteration_start
elif not scrolled and timed_out:
break
# closing the driver is optional
driver.close()
This is much faster than waiting 0.5-3 seconds each time for a response, when that response could take 0.1 seconds
I was looking for a way of scrolling through a dynamic webpage, and automatically stopping once the end of the page is reached, and found this thread.
The post by #Cuong Tran, with one main modification, was the answer that I was looking for. I thought that others might find the modification helpful (it has a pronounced effect on how the code works), hence this post.
The modification is to move the statement that captures the last page height inside the loop (so that each check is comparing to the previous page height).
So, the code below:
Continuously scrolls down a dynamic webpage (.scrollTo()), only stopping when, for one iteration, the page height stays the same.
(There is another modification, where the break statement is inside another condition (in case the page 'sticks') which can be removed).
SCROLL_PAUSE_TIME = 0.5
while True:
# Get scroll height
### This is the difference. Moving this *inside* the loop
### means that it checks if scrollTo is still scrolling
last_height = driver.execute_script("return document.body.scrollHeight")
# Scroll down to bottom
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
# try again (can be removed)
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
new_height = driver.execute_script("return document.body.scrollHeight")
# check if the page height has remained the same
if new_height == last_height:
# if so, you are done
break
# if not, move on to the next loop
else:
last_height = new_height
continue
You can use send_keys to simulate a PAGE_DOWN key press (which normally scroll the page):
from selenium.webdriver.common.keys import Keys
html = driver.find_element_by_tag_name('html')
html.send_keys(Keys.PAGE_DOWN)
if you want to scroll within a particular view/frame (WebElement), what you only need to do is to replace "body" with a particular element that you intend to scroll within. i get that element via "getElementById" in the example below:
self.driver.execute_script('window.scrollTo(0, document.getElementById("page-manager").scrollHeight);')
this is the case on YouTube, for example...
The ScrollTo() function doesn't work anymore. This is what I used and it worked fine.
driver.execute_script("document.getElementById('mydiv').scrollIntoView();")
According to the docs,
the class ActionChains does the job:
from selenium import webdriver
from selenium.webdriver import ActionChains
driver = webdriver.Firefox()
action_chains = ActionChains(driver)
action_chains.scroll(x: int, y: int, delta_x: int, delta_y: int, duration: int = 0, origin: str = 'viewport').perform()
insert this line driver.execute_script("window.scrollBy(0,925)", "")
The loop using the "send keys" method of scrolling the page:
pre_scroll_height = driver.execute_script('return document.body.scrollHeight;')
while True:
driver.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(5)
post_scroll_height = driver.execute_script('return document.body.scrollHeight;')
print(pre_scroll_height, post_scroll_height)
if pre_scroll_height == post_scroll_height:
break
pre_scroll_height=post_scroll_height
Here is a method I wrote to slowly scroll down to a targets element
You can pass either Y-th position of element of the CSS Selector to it
It scrolls exactly like we do via mouse-wheel
Once this method called, you call it again with same driver object but with new target element, it will then scroll up/down wherever that element exists
def slow_scroll_to_element(self, driver, element_selector=None, target_yth_location=None):
current_scroll_position = int(driver.execute_script("return window.scrollY"))
if element_selector:
target_yth_location = int(driver.execute_script("return document.querySelector('{}').getBoundingClientRect()['top'] + window.scrollY".format(element_selector)))
scrollSpeed = 100 if target_yth_location-current_scroll_position > 0 else -100
def chunks(a, n):
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
for l in list(chunks(list(range(current_scroll_position, target_yth_location, scrollSpeed)) + list([target_yth_location+(-scrollSpeed if scrollSpeed > 0 else scrollSpeed)]), 3)):
for pos in l:
driver.execute_script("window.scrollTo(0, "+str(pos)+");")
time.sleep(0.1)
time.sleep(random.randint(1,3))
driver.execute_script("document.getElementById('your ID Element').scrollIntoView();")
it's working for my case.
Just a small variation of the solutions provided so far: sometimes in scraping you have to meet the following requirements:
Keep scrolling step by step. Otherwise if you always jump to the bottom some elements are loaded only as containers/divs but their content is not loaded because they were never visible (because you jumped straight to the bottom);
Allow enough time for content to be loaded;
It's not an infinite scroll page, there is an end and you have to identify when the end is reached;
Here is a simple implementation:
from time import sleep
def keep_scrolling_to_the_bottom():
while True:
previous_scrollY = my_web_driver.execute_script( 'return window.scrollY' )
my_web_driver.execute_script( 'window.scrollBy( 0, 230 )' )
sleep( 0.4 )
if previous_scrollY == my_web_driver.execute_script( 'return window.scrollY' ):
print( 'job done, reached the bottom!' )
break
Tested and working on Windows 7 x64, Python 3.8.0, selenium 4.1.3, Google Chrome 107.0.5304.107, website for property rent.
Scroll to an element: Find the element and scroll using this code.
scroll_element = driver.find_element(By.XPATH, "your element xpath")
driver.execute_script("arguments[0].scrollIntoView();", scroll_element)