How to get video url from iframe? - python

I want to get the url of a video (.mp4) from an iframe using python(or rust) (doesn't matter which library). For example, I have:
<iframe src="https://spinning.allohalive.com/?kp=1332827&token=b51bdfc8af17dee996d3eae53726df" />
I really have no idea how to do this. Help me please! If you need some more information, just ask.
The code that I use to parse iframes from a website:
import scrapy
from cimber.models.website import Website
class KinokradSpider(scrapy.Spider):
name = "kinokrad"
start_urls = [Website.Kinokrad.value]
def __init__(self):
self.pages_count = 1
def parse(self, response):
pages_count = self.get_pages_count(response)
if self.pages_count <= pages_count:
for film in response.css("div.shorposterbox"):
film_url = film.css("div.postertitle").css("a").attrib["href"]
yield scrapy.Request(film_url, callback=self.parse_film)
next_page = f"{Website.Kinokrad.value}/page/{self.pages_count}"
if next_page is not None:
yield response.follow(next_page, callback=self.parse)
self.pages_count += 1
def parse_film(self, response):
name = response.css("div.fallsttitle").css("h1::text").get().strip()
players = []
for player in response.css("iframe::attr(src)").extract():
players.append(player)
yield {
"name": name,
"players": players
}
def get_pages_count(self, response) -> int:
links = response.css("div.navcent").css("a")
last_link = links[len(links) - 1].attrib["href"]
return int(last_link.split("/page/")[1].replace("/", "").strip())
I've been trying for 2 weeks but finally I'm asking this question on StackOverflow. First I used Bs4, then Selenium, and now scrapy. I have a large code to automatically parse iframes, but I need mp4 url. I've already tried solutions on StackOVerflow, but They don't work, so please don't remove my question.

Related

I am trying to scrape Feature Image using scrapy in python but it's giving 'None' in fact I have attempted to 3 to 4 methods to Scrape

I am trying to scrape Feature Image using scrapy in python but it's giving 'None' in fact I have attempted 3 to 4 methods to Scrape, but they are not working. can anyone please help me why any of my codes not giving the source link of the image, thanks in advance.
Here is the Code.
class NewsSpider(scrapy.Spider):
name = "cruisefever"
def start_requests(self):
url = input("Enter the article url: ")
yield scrapy.Request(url, callback=self.parse_dir_contents)
def parse_dir_contents(self, response):
Feature_Image = response.xpath('//*[#id="td_uid_2_634abd2257025"]/div/div[1]/div/div[8]/div/p[1]/img/#data-src').extract()[0]
#Feature_Image = response.xpath('//*[#id="td_uid_2_634abd2257025"]/div/div[1]/div/div[8]/div/p[1]/img/#data-img-url').extract()[0]
#Feature_Image = response.xpath('//*[#id="td_uid_2_634abd2257025"]/div/div[1]/div/div[8]/div/p[1]/img/#src').extract()[0]
#Feature_Image = [i.strip() for i in response.css('img[class*="alignnone size-full wp-image-39164 entered lazyloaded"] ::attr(src)').getall()][0]
yield{
'Feature_Image': Feature_Image,
}
Here is the website link https://cruisefever.net/carnival-cruise-lines-oldest-ship-sailing-final-cruise/
You can scrape the featured image using this xpath,
class NewsSpider(scrapy.Spider):
name = "cruisefever"
def start_requests(self):
url = input("Enter the article url: ")
yield scrapy.Request(url, callback=self.parse_dir_contents)
def parse_dir_contents(self, response):
image_tag = response.xpath('//div[#id="tdb-autoload-article"]/div/div/article/div/div/div/div/div/div//img')[1]
Feature_Image = image_tag.attrib['src']
yield{
'Feature_Image': Feature_Image,
}

struggling with Scrapy

I'm new to scrapy and I struggle a little with a special case.
Here is the scenario :
I want to scrap a website where there is a list of books.
httpx://...bookshop.../archive is the page where all the 10 firsts books are listed.
Then I want to get the informations (name, date, author) of all the books in the list. I have to go on another page for each books:
httpx://...bookshop.../book/{random_string}
So there is two types of request :
One for refreshing the list of books.
Another one for getting the book informations.
But some books can be added to the list at anytime.
So I would like to refresh the list every minutes.
and I also want to delay all the request by 5 seconds.
Here my basic solution, but it only works for one "loop" :
First I set the delay in settings.py :
DOWNLOAD_DELAY = 5
then the code of my spider :
from scrapy.loader import ItemLoader
class bookshopScraper(scrapy.Spider):
name = "bookshop"
url = "httpx://...bookshop.../archive"
history = []
last_refresh = 0
def start_requests(self):
self.last_refresh = time.time()
yield scrapy.Request(url=self.url, callback=self.parse)
def parse(self, response):
page = response.url.split("/")[3]
if page == 'archive':
return self.parse_archive(response)
else:
return self.parse_book(response)
def parse_archive(self, response):
links = response.css('SOME CSS ').extract()
for link in links:
if link not in self.history:
self.history.append(link)
yield scrapy.Request(url="httpx://...bookshop.../book/" + link, callback=self.parse)
if len(self.history) > 10:
n = len(self.history) - 10
self.history = history[-n:]
def parse_book(self, response):
"""
Load Item
"""
Now I would like to do something like :
if(time.time() > self.last_refresh + 80):
self.last_refresh = time.time()
return scrapy.Request(url=self.url, callback=self.parse, dont_filter=True)
But I really don't know how to implement this.
PS : I want the same instance of scrapy to run all the time without stopping.

IMDB scrapy get all movie data

I am working on a class project and trying to get all IMDB movie data (titles, budgets. etc.) up until 2016. I adopted the code from https://github.com/alexwhb/IMDB-spider/blob/master/tutorial/spiders/spider.py.
My thought is: from i in range(1874,2016) (since 1874 is the earliest year shown on http://www.imdb.com/year/), direct the program to the corresponding year's website, and grab the data from that url.
But the problem is, each page for each year only show 50 movies, so after crawling the 50 movies, how can I move on to the next page? And after crawling each year, how can I move on to next year? This is my code for the parsing url part so far, but it is only able to crawls 50 movies for a particular year.
class tutorialSpider(scrapy.Spider):
name = "tutorial"
allowed_domains = ["imdb.com"]
start_urls = ["http://www.imdb.com/search/title?year=2014,2014&title_type=feature&sort=moviemeter,asc"]
def parse(self, response):
for sel in response.xpath("//*[#class='results']/tr/td[3]"):
item = MovieItem()
item['Title'] = sel.xpath('a/text()').extract()[0]
item['MianPageUrl']= "http://imdb.com"+sel.xpath('a/#href').extract()[0]
request = scrapy.Request(item['MianPageUrl'], callback=self.parseMovieDetails)
request.meta['item'] = item
yield request
You can use CrawlSpiders to simplify your task. As you'll see below, start_requests dynamically generates the list of URLs while parse_page only extracts the movies to crawl. Finding and following the 'Next' link is done by the rules attribute.
I agree with #Padraic Cunningham that hard-coding values is not a great idea. I've added spider arguments so that you can call:
scrapy crawl imdb -a start=1950 -a end=1980 (the scraper will default to 1874-2016 if it doesn't get any arguments).
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from imdbyear.items import MovieItem
class IMDBSpider(CrawlSpider):
name = 'imdb'
rules = (
# extract links at the bottom of the page. note that there are 'Prev' and 'Next'
# links, so a bit of additional filtering is needed
Rule(LinkExtractor(restrict_xpaths=('//*[#id="right"]/span/a')),
process_links=lambda links: filter(lambda l: 'Next' in l.text, links),
callback='parse_page',
follow=True),
)
def __init__(self, start=None, end=None, *args, **kwargs):
super(IMDBSpider, self).__init__(*args, **kwargs)
self.start_year = int(start) if start else 1874
self.end_year = int(end) if end else 2016
# generate start_urls dynamically
def start_requests(self):
for year in range(self.start_year, self.end_year+1):
yield scrapy.Request('http://www.imdb.com/search/title?year=%d,%d&title_type=feature&sort=moviemeter,asc' % (year, year))
def parse_page(self, response):
for sel in response.xpath("//*[#class='results']/tr/td[3]"):
item = MovieItem()
item['Title'] = sel.xpath('a/text()').extract()[0]
# note -- you had 'MianPageUrl' as your scrapy field name. I would recommend fixing this typo
# (you will need to change it in items.py as well)
item['MainPageUrl']= "http://imdb.com"+sel.xpath('a/#href').extract()[0]
request = scrapy.Request(item['MainPageUrl'], callback=self.parseMovieDetails)
request.meta['item'] = item
yield request
# make sure that the dynamically generated start_urls are parsed as well
parse_start_url = parse_page
# do your magic
def parseMovieDetails(self, response):
pass
you can use the below piece of code to follow the next page
#'a.lister-page-next.next-page::attr(href)' is the selector to get the next page link
next_page = response.css('a.lister-page-next.nextpage::attr(href)').extract_first() # joins current and next page url
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse) # calls parse function again when crawled to next page
I figured out a very dumb way to solve this. I put all the links in the start_urls. Better solution would be very much appreciated!
class tutorialSpider(scrapy.Spider):
name = "tutorial"
allowed_domains = ["imdb.com"]
start_urls = []
for i in xrange(1874, 2017):
for j in xrange(1, 11501, 50):
# since the largest number of movies for a year to have is 11,400 (2016)
start_url = "http://www.imdb.com/search/title?sort=moviemeter,asc&start=" + str(j) + "&title_type=feature&year=" + str(i) + "," + str(i)
start_urls.append(start_url)
def parse(self, response):
for sel in response.xpath("//*[#class='results']/tr/td[3]"):
item = MovieItem()
item['Title'] = sel.xpath('a/text()').extract()[0]
item['MianPageUrl']= "http://imdb.com"+sel.xpath('a/#href').extract()[0]
request = scrapy.Request(item['MianPageUrl'], callback=self.parseMovieDetails)
request.meta['item'] = item
yield request
The code that #Greg Sadetsky has provided needs some minor changes. Well only one change that is in the first line of parse_page method.
Just change xpath in the for loop from:
response.xpath("//*[#class='results']/tr/td[3]"):
to
response.xpath("//*[contains(#class,'lister-item-content')]/h3"):
This worked like a charm for me!

Crawling hidden data using Scrapy

I just started crawling. I'm trying to crawl question and answers from website http://www.indiabix.com/verbal-ability/spotting-errors/ by downloading content using Scrapy framework and Python 2.7. I noticed that if you view its source, you'll notice that answer for every question should be in the b tag but its not:
<div class="div-spacer">
<p><span class="ib-green"><b>Answer:</b></span> Option <b class="jq-hdnakqb"></b></p>
<p><span class="ib-green"><b>Explanation:</b></span></p>
<p> No answer description available for this question. <b>Let us discuss</b>. </p>
If we inspect element on the webpage we can see the correct answer as text between the tags : Answer: Option A or B etc. for each question but the HTML source code doesn't.
To get the text within the b tag I've tried around 15 queries using xpath.
Ive written the most probable 4-5 queries as comments in the code below.
import scrapy
import urllib
import json
from errors1.items import Errors1Item
class Errors1Spider(scrapy.Spider) :
name = "errors1"
start_urls = ["http://www.indiabix.com/verbal-ability/spotting-errors/"]
def parse(self, response) :
i = 0
y = 0
j = json.loads(json.dumps(response.xpath('//td[contains(#id, "tdOption")]/text()').extract()))
x = json.loads(json.dumps(response.xpath('//div[#class="div-spacer"]/p[3]/text()').extract()))
#to get correct answer
#response.xpath('//div[#class = "div-spacer"]/p/b/text()').extract()
#response.xpath('//div[#class = "div-spacer"]/p[1]/b/text()').extract()
#response.xpath('//div[#class = "div-spacer"]/p//text()').extract()
#response.xpath('//b[#class = "jq-hdnakqb"]/text()').extract()
#response.xpath('string(//div[#class = "div-spacer"]/p/b/text())').extract()
while i<len(j) and y<len(x) :
item = Errors1Item()
item['optionA'] = j[i]
i+=1
item['optionB'] = j[i]
i+=1
item['optionC'] = j[i]
i+=1
item['optionD'] = j[i]
i+=1
item['explanation'] = x[y]
y+=1
yield item
Can someone please help me get the answer content from that webpage.
Thanks
From what I understand, there is a javascript logic involved in setting the correct option value.
What helped me to solve it is scrapyjs middleware, that uses Splash browser-as-a-service. Skipping the installation and configuration, here is the spider that I've executed:
# -*- coding: utf-8 -*-
import scrapy
class IndiaBixSpider(scrapy.Spider):
name = "indiabix"
allowed_domain = ["www.indiabix.com"]
start_urls = ["http://www.indiabix.com/verbal-ability/spotting-errors/"]
def start_requests(self):
for url in self.start_urls:
yield scrapy.Request(url, meta={
'splash': {
'endpoint': 'render.html',
'args': {'wait': 0.5}
}
})
def parse(self, response):
for question in response.css("div.bix-div-container"):
answer = question.xpath(".//input[starts-with(#id, 'hdnAnswer')]/#value").extract()
print answer
And here is what I've got on the console (correct answers):
[u'A']
[u'C']
[u'A']
[u'C']
[u'C']
[u'C']
[u'B']
[u'A']
[u'D']
[u'C']
[u'B']
[u'B']
[u'A']
[u'B']
[u'B']
See also:
https://stackoverflow.com/a/30378765/771848

Scrapy spider get information that is inside of links

I have done and spider that can take the information of this page and it can follow "Next page" links. Now, the spider just takes the information that i'm showing in the following structure.
The structure of the page is something like this
Title 1
URL 1 ---------> If you click you go to one page with more information
Location 1
Title 2
URL 2 ---------> If you click you go to one page with more information
Location 2
Next page
Then, that i want is that the spider goes on each URL link and get full information. I suppose that i must generate another rule that specify that i want do something like this.
The behaviour of the spider it should be:
Go to URL1 (get info)
Go to URL2 (get info)
...
Next page
But i don't know how i can implement it. Can someone guide me?
Code of my Spider:
class BcnSpider(CrawlSpider):
name = 'bcn'
allowed_domains = ['guia.bcn.cat']
start_urls = ['http://guia.bcn.cat/index.php?pg=search&q=*:*']
rules = (
Rule(
SgmlLinkExtractor(
allow=(re.escape("index.php")),
restrict_xpaths=("//div[#class='paginador']")),
callback="parse_item",
follow=True),
)
def parse_item(self, response):
self.log("parse_item")
sel = Selector(response)
sites = sel.xpath("//div[#id='llista-resultats']/div")
items = []
cont = 0
for site in sites:
item = BcnItem()
item['id'] = cont
item['title'] = u''.join(site.xpath('h3/a/text()').extract())
item['url'] = u''.join(site.xpath('h3/a/#href').extract())
item['when'] = u''.join(site.xpath('div[#class="dades"]/dl/dd[1]/text()').extract())
item['where'] = u''.join(site.xpath('div[#class="dades"]/dl/dd[2]/span/a/text()').extract())
item['street'] = u''.join(site.xpath('div[#class="dades"]/dl/dd[3]/span/text()').extract())
item['phone'] = u''.join(site.xpath('div[#class="dades"]/dl/dd[4]/text()').extract())
items.append(item)
cont = cont + 1
return items
EDIT After searching in internet I found a code with which i can do that.
First of all, I have to get all the links, then I have to call another parse method.
def parse(self, response):
#Get all URL's
yield Request( url= _url, callback=self.parse_details )
def parse_details(self, response):
#Detailed information of each page
If you want use Rules because the page have a paginator, you should change def parse to def parse_start_url and then call this method through Rule. With this changes you make sure that the parser begins at the parse_start_url and the code it would be something like this:
rules = (
Rule(
SgmlLinkExtractor(
allow=(re.escape("index.php")),
restrict_xpaths=("//div[#class='paginador']")),
callback="parse_start_url",
follow=True),
)
def parse_start_url(self, response):
#Get all URL's
yield Request( url= _url, callback=self.parse_details )
def parse_details(self, response):
#Detailed information of each page
Thant's all folks
There is an easier way of achieving this. Click next on your link, and read the new url carefully:
http://guia.bcn.cat/index.php?pg=search&from=10&q=*:*&nr=10
By looking at the get data in the url (everything after the questionmark), and a bit of testing, we find that these mean
from=10 - Starting index
q=*:* - Search query
nr=10 - Number of items to display
This is how I would've done it:
Set nr=100 or higher. (1000 may do as well, just be sure that there is no timeout)
Loop from from=0 to 34300. This is above the number of entries currently. You may want to extract this value first.
Example code:
entries = 34246
step = 100
stop = entries - entries % step + step
for x in xrange(0, stop, step):
url = 'http://guia.bcn.cat/index.php?pg=search&from={}&q=*:*&nr={}'.format(x, step)
# Loop over all entries, and open links if needed

Categories