How to make my crawler parse data from start page - python

I've written some code in python to grab details from a torrent site. However, when I run my code I found the results as I expected. The only problem with this crawler is that it skips the content of first page [as the pagination urls start from 2] which I can't fix. Any help on this will be highly appreciable.
import requests
from lxml import html
page_link = "https://yts.ag/browse-movies"
b_link = "https://yts.ag"
def get_links(main_link):
response = requests.get(main_link).text
tree = html.fromstring(response)
for item in tree.cssselect('ul.tsc_pagination a'):
if "page" in item.attrib["href"]:
movie_details(b_link + item.attrib["href"])
def movie_details(link):
response = requests.get(link).text
tree = html.fromstring(response)
for titles in tree.cssselect("div.browse-movie-wrap"):
title = titles.cssselect('div.browse-movie-bottom a.browse-movie-title')[0].text
link = titles.cssselect('div.browse-movie-year')[0].text
rating= titles.cssselect('figcaption.hidden-xs h4.rating')[0].text
genre = titles.cssselect('figcaption.hidden-xs h4')[0].text
genre1 = titles.cssselect('figcaption.hidden-xs h4')[1].text
print(title, link, rating, genre, genre1)
get_links(page_link)

Why not just call the movie_details() function on the main_link before the loop ?
def get_links(main_link):
response = requests.get(main_link).text
tree = html.fromstring(response)
movie_details(main_link)
for item in tree.cssselect('ul.tsc_pagination a'):
if "page" in item.attrib["href"]:
movie_details(b_link + item.attrib["href"])

Related

Pulling p tags from multiple URLs

I've struggled on this for days and not sure what the issue could be - basically, I'm trying to extract the profile box data (picture below) of each link -- going through inspector, I thought I could pull the p tags and do so.
I'm new to this and trying to understand, but here's what I have thus far:
-- a code that (somewhat) succesfully pulls the info for ONE link:
import requests
from bs4 import BeautifulSoup
# getting html
url = 'https://basketball.realgm.com/player/Darius-Adams/Summary/28720'
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
container = soup.find('div', attrs={'class', 'main-container'})
playerinfo = container.find_all('p')
print(playerinfo)
I then also have a code that pulls all of the HREF tags from multiple links:
from bs4 import BeautifulSoup
import requests
def get_links(url):
links = []
website = requests.get(url)
website_text = website.text
soup = BeautifulSoup(website_text)
for link in soup.find_all('a'):
links.append(link.get('href'))
for link in links:
print(link)
print(len(links))
get_links('https://basketball.realgm.com/dleague/players/2022')
get_links('https://basketball.realgm.com/dleague/players/2021')
get_links('https://basketball.realgm.com/dleague/players/2020')
So basically, my goal is to combine these two, and get one code that will pull all of the P tags from multiple URLs. I've been trying to do it, and I'm really not sure at all why this isn't working here:
from bs4 import BeautifulSoup
import requests
def get_profile(url):
profiles = []
req = requests.get(url)
soup = BeautifulSoup(req.text, 'html.parser')
container = soup.find('div', attrs={'class', 'main-container'})
for profile in container.find_all('a'):
profiles.append(profile.get('p'))
for profile in profiles:
print(profile)
get_profile('https://basketball.realgm.com/player/Darius-Adams/Summary/28720')
get_profile('https://basketball.realgm.com/player/Marial-Shayok/Summary/26697')
Again, I'm really new to web scraping with Python but any advice would be greatly appreciated. Ultimately, my end goal is to have a tool that can scrape this data in a clean way all at once.
(Player name, Current Team, Born, Birthplace, etc).. maybe I'm doing it entirely wrong but any guidance is welcome!
You need to combine your two scripts together and make requests for each player. Try the following approach. This searches for <td> tags that have the data-td=Player attribute:
import requests
from bs4 import BeautifulSoup
def get_links(url):
data = []
req_url = requests.get(url)
soup = BeautifulSoup(req_url.content, "html.parser")
for td in soup.find_all('td', {'data-th' : 'Player'}):
a_tag = td.a
name = a_tag.text
player_url = a_tag['href']
print(f"Getting {name}")
req_player_url = requests.get(f"https://basketball.realgm.com{player_url}")
soup_player = BeautifulSoup(req_player_url.content, "html.parser")
div_profile_box = soup_player.find("div", class_="profile-box")
row = {"Name" : name, "URL" : player_url}
for p in div_profile_box.find_all("p"):
try:
key, value = p.get_text(strip=True).split(':', 1)
row[key.strip()] = value.strip()
except: # not all entries have values
pass
data.append(row)
return data
urls = [
'https://basketball.realgm.com/dleague/players/2022',
'https://basketball.realgm.com/dleague/players/2021',
'https://basketball.realgm.com/dleague/players/2020',
]
for url in urls:
print(f"Getting: {url}")
data = get_links(url)
for entry in data:
print(entry)

Struggling to scrape a telegram public channel with BeautifulSoup

I'm practicing web scraping with BeautifulSoup but I struggle to finish printing a dictionary including the items I've scraped
The web targeted can be any telegram public channel (web version) and I pretend to collect and add as part of the dictionary the text message, timestamp, views and image url (if exist attached to the post).
I've inspected the code for the 4 elements but the one related to the image url has no class or span, so I've ended scraping them it via regex. The other 3 elements are easily retrievable.
Let's go by parts:
Importing modules
from bs4 import BeautifulSoup
import requests
import re
Function to get the images url from the public channel
def pictures(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'lxml')
link = str(soup.find_all('a', class_ = 'tgme_widget_message_photo_wrap')) #converted to str in order to be able to apply regex
image_url = re.findall(r"https://cdn4.*.*.jpg", link)
return image_url
Soup to get the text message, timestamp and views
picture_list = pictures(url)
url = "https://t.me/s/computer_science_and_programming"
channel = requests.get(url).text
soup = BeautifulSoup(channel, 'lxml')
tgpost = soup.find_all('div', class_ ='tgme_widget_message')
full_message = {}
for content in tgpost:
full_message['views'] = content.find('span', class_ = 'tgme_widget_message_views').text
full_message['timestamp'] = content.find('time', class_ = 'time').text
full_message['text'] = content.find('div', class_ = 'tgme_widget_message_text').text
print(full_message)
I would really appreciate if someone can help me, I'm new to Python and I don't know how I could do it to
Check if the post contains an image and if so, add it to the dictionary
Print the dictionary including image_url as key and the url as value for each post.
Thank you very much
I think you want something like this.
from bs4 import BeautifulSoup
import requests, re
url = "https://t.me/s/computer_science_and_programming"
channel = requests.get(url).text
soup = BeautifulSoup(channel, 'lxml')
tgpost = soup.find_all('div', class_ ='tgme_widget_message')
full_message = {}
for content in tgpost:
full_message['views'] = content.find('span', class_ = 'tgme_widget_message_views').text
full_message['timestamp'] = content.find('time', class_ = 'time').text
full_message['text'] = content.find('div', class_ = 'tgme_widget_message_text').text
if content.find('a', class_ = 'tgme_widget_message_photo_wrap') != None :
link = str(content.find('a', class_ = 'tgme_widget_message_photo_wrap'))
full_message['url_image'] = re.findall(r"https://cdn4.*.*.jpg", link)[0]
elif 'url_image' in full_message:
full_message.pop('url_image')
print(full_message)

Web Scraping multiple pages of a web site?

My problem is i am trying to scrape data form a web page with multiple web pages of witch each is a separate blog post. But the current code only scrapes the data form the url that i have set as variable source.
source = requests.get('https://www.trenerklemen.si/objave/').text.
I would like to scrape text from other url but one part is dynamic and i have no idea how to access it
source = requests.get('https://www.trenerklemen.si/?????/').text
How would i get the ???? part if it changes. Thanks for the answers.
from bs4 import BeautifulSoup
import requests
source = requests.get('https://www.trenerklemen.si/objave/').text
soup = BeautifulSoup(source,'lxml')
article = soup.find('article')
headline = article.h2.text
print(headline)
summary = article.find('div', class_='post-content').p.text
print(summary)
video = article.find('iframe', class_ ='youtube-player')['src']
video_id = video.split('/')[4]
video_id = video_id.split('?')[0]
yt_link = f'https://youtube.com/watch?v={video_id}'
print(yt_link)
from bs4 import BeautifulSoup
import requests
mainLink = "https://www.trenerklemen.si"
ALL_links = ["https://www.trenerklemen.si/objave/"]
counter = 0
while(counter < len(ALL_links)):
source = requests.get(ALL_links[counter]).text
soup = BeautifulSoup(source,'lxml')
for link in soup.findAll('a'):
LinkNow = str(link.get('href'))
if(len(LinkNow) > 0):
if(LinkNow not in ALL_links):
if(LinkNow[0] == '/'):
ALL_links.append("https://www."+mainLink+LinkNow)
if(mainLink in LinkNow):
ALL_links.append(LinkNow)
try:
article = soup.find('article')
headline = article.h2.text
print(headline)
summary = article.find('div', class_='post-content').p.text
print(summary)
video = article.find('iframe', class_ ='youtube-player')['src']
video_id = video.split('/')[4]
video_id = video_id.split('?')[0]
yt_link = f'https://youtube.com/watch?v={video_id}'
print(yt_link)
except Exception as e:
print("Error: "+str(e))
counter += 1
Websites usually use sitemaps for search engines to be able to crawl the content. You can use the sitemap as a source of your links to scrape.
Sitemap for your website: https://www.trenerklemen.si/post-sitemap.xml

Trouble parsing product names out of some links with different depth

I've written a script in python to reach the target page where each category has their avaiable item names in a website. My below script can get the product names from most of the links (generated through roving category links and then subcategory links).
The script can parse sub-category links revealed upon clicking + sign located right next to each category which are visible in the below image and then parse all the product names from the target page. This is one of such target pages.
However, few of the links do not have the same depth as other links. For example this link and this one are different from usual links like this one.
How can I get all the product names from all the links irrespective of their different depth?
This is what I've tried so far:
import requests
from urllib.parse import urljoin
from bs4 import BeautifulSoup
link = "https://www.courts.com.sg/"
res = requests.get(link)
soup = BeautifulSoup(res.text,"lxml")
for item in soup.select(".nav-dropdown li a"):
if "#" in item.get("href"):continue #kick out invalid links
newlink = urljoin(link,item.get("href"))
req = requests.get(newlink)
sauce = BeautifulSoup(req.text,"lxml")
for elem in sauce.select(".product-item-info .product-item-link"):
print(elem.get_text(strip=True))
How to find trget links:
The site has six main product categories. Products that belong to a subcategory can also be found in a main category (for example the products in /furniture/furniture/tables can also be found in /furniture), so you only have to collect products from the main categories. You could get the categories links from the main page, but it'd be easier to use the sitemap.
url = 'https://www.courts.com.sg/sitemap/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
As you've mentioned there are some links that have differend structure, like this one: /televisions. But, if you click the View All Products link on that page you will be redirected to /tv-entertainment/vision/television. So, you can get all the /televisions rpoducts from /tv-entertainment. Similarly, the products in links to brands can be found in the main categories. For example, the /asus products can be found in /computing-mobile and other categories.
The code below collects products from all the main categories, so it should collect all the products on the site.
from bs4 import BeautifulSoup
import requests
url = 'https://www.courts.com.sg/sitemap/'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
products = []
for link in links:
link += '?product_list_limit=24'
while link:
r = requests.get(link)
soup = BeautifulSoup(r.text, 'html.parser')
link = (soup.select_one('a.action.next') or {}).get('href')
for elem in soup.select(".product-item-info .product-item-link"):
product = elem.get_text(strip=True)
products += [product]
print(product)
I've increased the number of products per page to 24, but still this code takes a lot of time, as it collects products from all main categories and their pagination links. However, we could make it much faster with the use of threads.
from bs4 import BeautifulSoup
import requests
from threading import Thread, Lock
from urllib.parse import urlparse, parse_qs
lock = Lock()
threads = 10
products = []
def get_products(link, products):
soup = BeautifulSoup(requests.get(link).text, 'html.parser')
tags = soup.select(".product-item-info .product-item-link")
with lock:
products += [tag.get_text(strip=True) for tag in tags]
print('page:', link, 'items:', len(tags))
url = 'https://www.courts.com.sg/sitemap/'
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
cats = soup.select('li.level-0.category > a')[:6]
links = [i['href'] for i in cats]
for link in links:
link += '?product_list_limit=24'
soup = BeautifulSoup(requests.get(link).text, 'html.parser')
last_page = soup.select_one('a.page.last')['href']
last_page = int(parse_qs(urlparse(last_page).query)['p'][0])
threads_list = []
for i in range(1, last_page + 1):
page = '{}&p={}'.format(link, i)
thread = Thread(target=get_products, args=(page, products))
thread.start()
threads_list += [thread]
if i % threads == 0 or i == last_page:
for t in threads_list:
t.join()
print(len(products))
print('\n'.join(products))
This code collects 18,466 products from 773 pages in about 5 minutes. I'm using 10 threads because I don't want to stress the server too much, but you could use more (most servers can handle 20 threads easily).
I would recommend starting your scrape from the pages sitemap
Found here
If they were to add products, it's likely to show up here as well.
Since your main issue is finding the links, here is a generator that will find all of the category and sub-category links using the sitemap krflol pointed out in his solution:
from bs4 import BeautifulSoup
import requests
def category_urls():
response = requests.get('https://www.courts.com.sg/sitemap')
html_soup = BeautifulSoup(response.text, features='html.parser')
categories_sitemap = html_soup.find(attrs={'class': 'xsitemap-categories'})
for category_a_tag in categories_sitemap.find_all('a'):
yield category_a_tag.attrs['href']
And to find the product names, simply scrape each of the yielded category_urls.
I saw the website for parsing and found that all the products are available at the bottom left side of the main page https://www.courts.com.sg/ .After clicking one of these we goes to advertisement front page of a particular category. Where we have to go in click All Products for getting it.
Following is the code as whole:
import requests
from bs4 import BeautifulSoup
def parser():
parsing_list = []
url = 'https://www.courts.com.sg/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
ul = soup.find('footer',{'class':'page-footer'}).find('ul')
for l in ul.find_all('li'):
nextlink = url + l.find('a').get('href')
response = requests.get(nextlink)
inner_soup = BeautifulSoup(response.text, "html.parser")
parsing_list.append(url + inner_soup.find('div',{'class':'category-static-links ng-scope'}).find('a').get('href'))
return parsing_list
This function will return list of all products of all categories which your code didn't scrape from it.

Webscraping multiple pages from a site gives just the home page contents?

I am trying to scrape the pages from the site https://www.rithmschool.com/blog. Though the contents of the 1st page are getting scraped, the problem with my code is - the same content is being scraped even for all the other pages of the site. Below is the code.
Can anyone please help me fix the code.
My code
import requests
from bs4 import BeautifulSoup
from csv import writer
html = requests.get('https://www.rithmschool.com/blog')
soup = BeautifulSoup(html.text, 'html.parser')
articles = soup.find_all('article')
with open('scraped_rithm.csv', 'w') as f:
data = writer(f)
data.writerow(['Title','Link','Date'])
for article in articles:
title = article.find('a').get_text()
link = article.find('a')['href']
date = article.find('time')['datetime']
data.writerow([title,link,date])
spans = soup.find_all('span', {'class' : 'page'})
for span in spans:
if span.find('a'):
urls = ((span.find('a')['href'])).split(',')
for url in urls:
nw_urls = (f"https://www.rithmschool.com{url}")
print(nw_urls)
nw_response = requests.get(nw_urls)
nw_soup = BeautifulSoup(nw_response.text,'html.parser')
articles = soup.find_all('article')
for article in articles:
title = article.find('a').get_text()
link = article.find('a')['href']
date = article.find('time')['datetime']
data.writerow([title,link,date])
You are still using the old soup object after creating a new nw_soup object
Try changing this from
nw_soup = BeautifulSoup(nw_response.text,'html.parser')
articles = soup.find_all('article')
to
nw_soup = BeautifulSoup(nw_response.text,'html.parser')
articles = nw_soup.find_all('article')

Categories