Scrapy crawl every link after authentication - python

Introduction
Since my crawler is more or less finished yet, i need to redo a crawler which only crawls whole domain for links, i need this for my work.
The spider which crawls every link should run once per month.
I'm running scrapy 2.4.0 and my os is Linux Ubuntu server 18.04 lts
Problem
The website which i have to crawl changed their "privacy", so you have to be logged in before you can see the products, which is the reason why my "linkcrawler" wont work anymore.
I already managed to login and scrape all my stuff, but the start_urls where given in a csv file.
Code
import scrapy
from ..items import DuifItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import FormRequest, Request
from scrapy_splash import SplashRequest
class DuifLinkSpider(CrawlSpider):
name = 'duiflink'
allowed_domains = ['duif.nl']
login_page = 'https://www.duif.nl/login'
start_urls = ['https://www.duif.nl']
custom_settings = {'FEED_EXPORT_FIELDS' : ['Link']}
def start_requests(self):
yield SplashRequest(
url=self.login_page,
callback=self.parse_login,
args={'wait': 3},
dont_filter=True
)
rules = (
Rule(LinkExtractor(deny='https://www.duif.nl/nl/'), callback='parse_login', follow=True),
)
def parse_login(self, response):
return FormRequest.from_response(
response,
formid='login-form',
formdata={
'username' : 'not real',
'password' : 'login data'},
clickdata={'type' : 'submit'},
callback=self.after_login)
def after_login(self, response):
accview = response.xpath('//ul[#class="nav navbar-nav navbar-secondary navbar-right"]//a/#href')[13]
if accview:
print('success')
else:
print(':(')
for url in self.start_urls:
yield response.follow(url=url, callback=self.search_links)
def search_links(self, response):
link = response.xpath('//ul[#class="nav navbar-nav navbar-secondary navbar-right"]/li/a/#href').get()
for a in link:
link = response.url
yield response.follow(url=link, callback=self.parse_page)
def parse_page(self, response):
productpage = response.xpath('//div[#class="product-details col-md-12"]')
if not productpage:
print('No productlink', response.url)
for a in productpage:
items = DuifItem()
items['Link'] = response.url
yield items
Unfortunately i cant provide a dummyaccount, where you can try login by yourself, because its a b2b-service website.
I can imagine that my "def search_links" is wrong.
My planned structure is:
visit login_page, pass my login credentials
check if logged in by xpath, where it checks, if the logout button is given or not.
If logged in, it prints 'success'
Given by xpath expression, it should start to follow links by:
by visiting every link, it should check by xpath xpression, if specific container is given or not, so it knows whether its a productpage or not.
if product page, save visited link, if not productpage, take next link
Console output
Like you can see, the authentication is working, but it wont do anything afterwards.
Update
i reworked my code a very bit:
import scrapy
from ..items import DuifItem
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import FormRequest, Request
from scrapy_splash import SplashRequest
class DuifLinkSpider(CrawlSpider):
name = 'duiflink'
allowed_domains = ['duif.nl']
login_page = 'https://www.duif.nl/login'
start_urls = ['https://www.duif.nl/']
custom_settings = {'FEED_EXPORT_FIELDS' : ['Link']}
def start_requests(self):
yield SplashRequest(
url=self.login_page,
callback=self.parse_login,
args={'wait': 3},
dont_filter=True
)
rules = (
Rule(LinkExtractor(), callback='parse_login', follow=True),
)
def parse_login(self, response):
return FormRequest.from_response(
response,
formid='login-form',
formdata={
'username' : 'not real',
'password' : 'login data'},
clickdata={'type' : 'submit'},
callback=self.after_login)
def after_login(self, response):
accview = response.xpath('//ul[#class="nav navbar-nav navbar-secondary navbar-right"]//a/#href')[13]
if accview:
print('success')
else:
print(':(')
for url in self.start_urls:
yield response.follow(url=url, callback=self.search_links, dont_filter=True)
def search_links(self, response):
# link = response.xpath('//ul[#class="nav navbar-nav navbar-secondary navbar-right"]/li/a/#href')
link = response.xpath('//a/#href')
for a in link:
link = a.get()
link = 'https://www.duif.nl' + link if link else link
yield response.follow(url=link, callback=self.parse_page, dont_filter=True)
def parse_page(self, response):
productpage = response.xpath('//div[#class="product-details col-md-12"]')
if not productpage:
print('No productlink', response.url)
for a in productpage:
items = DuifItem()
items['Link'] = response.url
yield items
Now i know, that i am definitely logged in, but it doesnt follow the "sub"-links, but i thought if i use response.xpath('//a/#href') , it will automatically searches the whole dom for every link.
Below my new console output

After you login, you go back to parsing your start url. Scrapy filters out duplicate requests by default, so in your case it stops here. You can avoid this by using 'dont_filter=True' in your request, like this:
yield response.follow(url=url, callback=self.search_links, dont_filter=True)

Related

Scrapy to login and then grab data from Weibo

I am still trying to use Scrapy to collect data from pages on Weibo which need to be logged in to access.
I now understand that I need to use Scrapy FormRequests to get the login cookie. I have updated my Spider to try to make it do this, but it still isn't working.
Can anybody tell me what I am doing wrong?
import scrapy
class LoginSpider(scrapy.Spider):
name = 'WB'
def start_requests(self):
return [
scrapy.Request("https://www.weibo.com/u/2247704362/home?wvr=5&lf=reg", callback=self.parse_item)
]
def parse_item(self, response):
return scrapy.FormRequest.from_response(response, formdata={'user': 'user', 'pass': 'pass'}, callback=self.parse)
def parse(self, response):
print(response.body)
When I run this spider. Scrapy redirects from the URL under start_requests, and then returns the following error:
ValueError: No element found in <200 https://passport.weibo.com/visitor/visitor?entry=miniblog&a=enter&url=https%3A%2F%2Fweibo.com%2Fu%2F2247704362%2Fhome%3Fwvr%3D5%26lf%3Dreg&domain=.weibo.com&ua=php-sso_sdk_client-0.6.28&_rand=1585243156.3952>
Does that mean I need to get the spider to look for something other than Form data in the original page. How do I tell it to look for the cookie?
I have also tried a spider like this below based on this post.
import scrapy
class LoginSpider(scrapy.Spider):
name = 'WB'
login_url = "https://www.weibo.com/overseas"
test_url = 'https://www.weibo.com/u/2247704362/'
def start_requests(self):
yield scrapy.Request(url=self.login_url, callback=self.parse_login)
def parse_login(self, response):
return scrapy.FormRequest.from_response(response, formid="W_login_form", formdata={"loginname": "XXXXX", "password": "XXXXX"}, callback=self.start_crawl)
def start_crawl(self, response):
yield Request(self.test_url, callback=self.parse_item)
def parse_item(self, response):
print("Test URL " + response.url)
But it still doesn't work, giving the error:
ValueError: No element found in <200 https://www.weibo.com/overseas>
Would really appreciate any help anybody can offer as this is kind of beyond my range of knowledge.

Scrapy spider outputs empy csv file

This is my first question here and I'm learning how to code by myself so please bear with me.
I'm working on a final CS50 project which I'm trying to built a website that aggregates online Spanish course from edx.org and other open online couses websites maybe. I'm using scrapy framework to scrap the filter results of Spanish courses on edx.org... Here is my first scrapy spider which I'm trying to get in each courses link to then get it's name (after I get the code right, also get the description, course url and more stuff).
from scrapy.item import Field, Item
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractor import LinkExtractor
from scrapy.loader import ItemLoader
class Course_item(Item):
name = Field()
#description = Field()
#img_url = Field()
class Course_spider(CrawlSpider):
name = 'CourseSpider'
allowed_domains = ['https://www.edx.org/']
start_urls = ['https://www.edx.org/course/?language=Spanish']
rules = (Rule(LinkExtractor(allow=r'/course'), callback='parse_item', follow='True'),)
def parse_item(self, response):
item = ItemLoader(Course_item, response)
item.add_xpath('name', '//*[#id="course-intro-heading"]/text()')
yield item.load_item()
When I run the spider with "scrapy runspider edxSpider.py -o edx.csv -t csv" I get an empty csv file and I also think is not getting into the right spanish courses results.
Basically I want to get in each courses of this link edx Spanish courses and get the name, description, provider, page url and img url.
Any ideas for why might be the problem?
You can't get edx content with a simple request, it uses javascript rendering for getting the course element dynamically, so CrawlSpider won't work on this case, because you need to find specific elements inside the response body to generate a new Request that will get what you need.
The real request (to get the urls of the courses) is this one, but you need to generate it from the previous response body (although you could just visit it an also get the correct data).
So, to generate the real request, you need data that is inside a script tag:
from scrapy import Spider
import re
import json
class Course_spider(Spider):
name = 'CourseSpider'
allowed_domains = ['edx.org']
start_urls = ['https://www.edx.org/course/?language=Spanish']
def parse(self, response):
script_text = response.xpath('//script[contains(text(), "Drupal.settings")]').extract_first()
parseable_json_data = re.search(r'Drupal.settings, ({.+})', script_text).group(1)
json_data = json.loads(parseable_json_data)
...
Now you have what you need on json_data and only need to create the string URL.
This page use JavaScript to get data from server and add to page.
It uses urls like
https://www.edx.org/api/catalog/v2/courses/course-v1:IDBx+IDB33x+3T2017
Last part is course's number which you can find in HTML
<main id="course-info-page" data-course-id="course-v1:IDBx+IDB33x+3T2017">
Code
from scrapy.http import Request
from scrapy.item import Field, Item
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractor import LinkExtractor
from scrapy.loader import ItemLoader
import json
class Course_spider(CrawlSpider):
name = 'CourseSpider'
allowed_domains = ['www.edx.org']
start_urls = ['https://www.edx.org/course/?language=Spanish']
rules = (Rule(LinkExtractor(allow=r'/course'), callback='parse_item', follow='True'),)
def parse_item(self, response):
print('parse_item url:', response.url)
course_id = response.xpath('//*[#id="course-info-page"]/#data-course-id').extract_first()
if course_id:
url = 'https://www.edx.org/api/catalog/v2/courses/' + course_id
yield Request(url, callback=self.parse_json)
def parse_json(self, response):
print('parse_json url:', response.url)
item = json.loads(response.body)
return item
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'FEED_FORMAT': 'csv', # csv, json, xml
'FEED_URI': 'output.csv', #
})
c.crawl(Course_spider)
c.start()
from scrapy.http import Request
from scrapy import Spider
import json
class edx_scraper(Spider):
name = "edxScraper"
start_urls = [
'https://www.edx.org/api/v1/catalog/search?selected_facets[]=content_type_exact%3Acourserun&selected_facets[]=language_exact%3ASpanish&page=1&page_size=9&partner=edx&hidden=0&content_type[]=courserun&content_type[]=program&featured_course_ids=course-v1%3AHarvardX+CS50B+Business%2Ccourse-v1%3AMicrosoft+DAT206x+1T2018%2Ccourse-v1%3ALinuxFoundationX+LFS171x+3T2017%2Ccourse-v1%3AHarvardX+HDS2825x+1T2018%2Ccourse-v1%3AMITx+6.00.1x+2T2017_2%2Ccourse-v1%3AWageningenX+NUTR101x+1T2018&featured_programs_uuids=452d5bbb-00a4-4cc9-99d7-d7dd43c2bece%2Cbef7201a-6f97-40ad-ad17-d5ea8be1eec8%2C9b729425-b524-4344-baaa-107abdee62c6%2Cfb8c5b14-f8d2-4ae1-a3ec-c7d4d6363e26%2Ca9cbdeb6-5fc0-44ef-97f7-9ed605a149db%2Cf977e7e8-6376-400f-aec6-84dcdb7e9c73'
]
def parse(self, response):
data = json.loads(response.text)
for course in data['objects']['results']:
url = 'https://www.edx.org/api/catalog/v2/courses/' + course['key']
yield response.follow(url, self.course_parse)
if 'next' in data['objects'] is not None:
yield response.follow(data['objects']['next'], self.parse)
def course_parse(self, response):
course = json.loads(response.text)
yield{
'name': course['title'],
'effort': course['effort'],
}

Scrapy crawler not processing XHR Request

My spider is only crawling the first 10 pages, so I am assuming it is not entering the load more button though the Request.
I am scraping this website: http://www.t3.com/reviews.
My spider code:
import scrapy
from scrapy.conf import settings
from scrapy.http import Request
from scrapy.selector import Selector
from reviews.items import ReviewItem
class T3Spider(scrapy.Spider):
name = "t3" #spider name to call in terminal
allowed_domains = ['t3.com'] #the domain where the spider is allowed to crawl
start_urls = ['http://www.t3.com/reviews'] #url from which the spider will start crawling
def parse(self, response):
sel = Selector(response)
review_links = sel.xpath('//div[#id="content"]//div/div/a/#href').extract()
for link in review_links:
yield Request(url="http://www.t3.com"+link, callback=self.parse_review)
#if there is a load-more button:
if sel.xpath('//*[#class="load-more"]'):
req = Request(url=r'http://www\.t3\.com/more/reviews/latest/\d+', headers = {"Referer": "http://www.t3.com/reviews", "X-Requested-With": "XMLHttpRequest"}, callback=self.parse)
yield req
else:
return
def parse_review(self, response):
pass #all my scraped item fields
What I am doing wrong? Sorry but I am quite new to scrapy. Thanks for your time, patience and help.
If you inspect the "Load More" button, you would not find any indication of how the link to load more reviews is constructed. The idea behind is rather easy - the numbers after http://www.t3.com/more/reviews/latest/ suspiciously look like a timestamp of the last loaded article. Here is how you can get it:
import calendar
from dateutil.parser import parse
import scrapy
from scrapy.http import Request
class T3Spider(scrapy.Spider):
name = "t3"
allowed_domains = ['t3.com']
start_urls = ['http://www.t3.com/reviews']
def parse(self, response):
reviews = response.css('div.listingResult')
for review in reviews:
link = review.xpath("a/#href").extract()[0]
yield Request(url="http://www.t3.com" + link, callback=self.parse_review)
# TODO: handle exceptions here
# extract the review date
time = reviews[-1].xpath(".//time/#datetime").extract()[0]
# convert a date into a timestamp
timestamp = calendar.timegm(parse(time).timetuple())
url = 'http://www.t3.com/more/reviews/latest/%d' % timestamp
req = Request(url=url,
headers={"Referer": "http://www.t3.com/reviews", "X-Requested-With": "XMLHttpRequest"},
callback=self.parse)
yield req
def parse_review(self, response):
print response.url
Notes:
this requires dateutil module to be installed
you should recheck the code and make sure you are getting all of the reviews without skipping any of them
you should somehow end this "Load more" thing

How to submit a form in scrapy?

I tried to use scrapy to complete the login and collect my project commit count. And here is the code.
from scrapy.item import Item, Field
from scrapy.http import FormRequest
from scrapy.spider import Spider
from scrapy.utils.response import open_in_browser
class GitSpider(Spider):
name = "github"
allowed_domains = ["github.com"]
start_urls = ["https://www.github.com/login"]
def parse(self, response):
formdata = {'login': 'username',
'password': 'password' }
yield FormRequest.from_response(response,
formdata=formdata,
clickdata={'name': 'commit'},
callback=self.parse1)
def parse1(self, response):
open_in_browser(response)
After running the code
scrapy runspider github.py
It should show me the result page of the form, which should be a failed login in the same page as the username and password is fake. However it shows me the search page. The log file is located in pastebin
How should the code be fixed? Thanks in advance.
Your problem is that FormRequest.from_response() uses a different form - a "search form". But, you wanted it to use a "log in form" instead. Provide a formnumber argument:
yield FormRequest.from_response(response,
formnumber=1,
formdata=formdata,
clickdata={'name': 'commit'},
callback=self.parse1)
Here is what I see opened in the browser after applying the change (used "fake" user):
Solution using webdriver.
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
import time
from scrapy.contrib.spiders import CrawlSpider
class GitSpider(CrawlSpider):
name = "gitscrape"
allowed_domains = ["github.com"]
start_urls = ["https://www.github.com/login"]
def __init__(self):
self.driver = webdriver.Firefox()
def parse(self, response):
self.driver.get(response.url)
login_form = self.driver.find_element_by_name('login')
password_form = self.driver.find_element_by_name('password')
commit = self.driver.find_element_by_name('commit')
login_form.send_keys("yourlogin")
password_form.send_keys("yourpassword")
actions = ActionChains(self.driver)
actions.click(commit)
actions.perform()
# by this point you are logged to github and have access
#to all data in the main menĂ¹
time.sleep(3)
self.driver.close()
Using the "formname" argument also works:
yield FormRequest.from_response(response,
formname='Login',
formdata=formdata,
clickdata={'name': 'commit'},
callback=self.parse1)

Scrapy callback after redirect

I have a very basic scrapy spider, which grabs urls from the file and then downloads them. The only problem is that some of them got redirected to a slightly modified url within same domain. I want to get them in my callback function using response.meta, and it works on a normal urls, but then url is redirected callback doesn't seem to get called. How can I fix it?
Here's my code.
from scrapy.contrib.spiders import CrawlSpider
from scrapy import log
from scrapy import Request
class DmozSpider(CrawlSpider):
name = "dmoz"
handle_httpstatus_list = [302]
allowed_domains = ["http://www.exmaple.net/"])
f = open("C:\\python27\\1a.csv",'r')
url = 'http://www.exmaple.net/Query?indx='
start_urls = [url+row for row in f.readlines()]
def parse(self, response):
print response.meta.get('redirect_urls', [response.url])
print response.status
print (response.headers.get('Location'))
I've also tried something like that:
def parse(self, response):
return Request(response.url, meta={'dont_redirect': True, 'handle_httpstatus_list': [302]}, callback=self.parse_my_url)
def parse_my_url(self, response):
print response.status
print (response.headers.get('Location'))
And it doesn't work either.
By default scrapy requests are redirected, although if you don't want to redirect you can do like this, use start_requests method and add flags in request meta.
def start_requests(self):
requests =[(Request(self.url+u, meta={'handle_httpstatus_list': [302],
'dont_redirect': True},
callback=self.parse)) for u in self.start_urls]
return requests

Categories