get user_id, rating from webscraping pictures - python

I have a dataframe from web scraping all pages from the website animeka website:
import pandas as pd
import requests
from bs4 import BeautifulSoup
for page_no in range(1, 467):
url = 'http://www.animeka.com/animes/~_{}.html'.format(page_no)
titles, studios, genres, durations = [], [], [], []
for page_no in range(1, 467):
url = 'http://www.animeka.com/animes/~_{}.html'.format(page_no)
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
for table in soup.find_all('table', class_='animesindex'):
td = table.find_all('td', class_='animestxt')
titles.append(td[1].text.split(':')[1])
studios.append(td[3].text.split(':')[1])
genres.append(td[4].text.split(':')[1])
durations.append(td[6].text.split(':')[1])
headers = ['Title', 'Studio', 'Genres', 'Duration']
df = pd.DataFrame(dict(zip(headers, [titles, studios, genres, durations])))
df = pd.DataFrame({'duration':df["Duration"], "genre" : df["Genres"], 'studio':df["Studio"], "titre" : df["Title"]})
And I would like to get user_id and rating they put for each anime but this is in picture in "detail" subsection and I do not know how to do to gather that information.
This is a picture code where rating is:
<img src="/animes/13498.png" width="400" height="100" alt="graph">

You can use the find_previous method to find tags and strings that come before a particular tag in the document.
td[1].find_previous('td')
So, if you wanted to extract the name of the image, you'd try this:
td[1].find_previous('td').img['src'].split('/')[-1]

Related

Using Beautiful soup to find a phone number for company name and address

I have a script which scrapes a website for the name, region and province of companies in Spain. There is another link within the html, which takes you to a page that contains the phone number, but when I try to even scrape the html, it prints "none". Is there a way that the script can automatically move to the page, scrape the number and match it with the company row?
import requests
from googlesearch import search
from bs4 import BeautifulSoup
for page in range(1,65):
url = "https://www.expansion.com/empresas-de/ganaderia/granjas-en-general/{page}.html".format(page =page)
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
lists = soup.select("div#simulacion_tabla ul")
#scrape the list
for lis in lists:
title = lis.find('li', class_="col1").text
location = lis.find('li', class_="col2").text
province = lis.find('li', class_="col3").text
link = lis.find('href', class_ ="col1")
info = [title, location, province, link]
print(info)
Alternatively, is there is a way to do it with googlesearch library?
Many thanks
first url "https://www.expansion.com/empresas-de/ganaderia/granjas-en-general/index.html" not
"https://www.expansion.com/empresas-de/ganaderia/granjas-en-general/1.html"
for this reason your script does not return output.
you can try like this
import requests
# from googlesearch import search
from bs4 import BeautifulSoup
baseurl = ["https://www.expansion.com/empresas-de/ganaderia/granjas-en-general/index.html"]
urls = [f'https://www.expansion.com/empresas-de/ganaderia/granjas-en-general/{i}.html'.format(i) for i in range(2,5)]
allurls = baseurl + urls
print(allurls)
for url in allurls:
page = requests.get(url)
soup = BeautifulSoup(page.content, "html.parser")
lists = soup.select("div#simulacion_tabla ul")
#scrape the list
for lis in lists:
title = lis.find('li', class_="col1").text
location = lis.find('li', class_="col2").text
province = lis.find('li', class_="col3").text
link = lis.select("li.col1 a")[0]['href']
info = [title, location, province, link]
print(info)

How to list out all the h2, h3, and p tags then create a dataframe to store them

I had given a website to scrape all of the key items
But the output I got is only for one item using BeautifulSoup4. So wonder if I need to use anything like soup.findall to extract all the key items in a list from the website.
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
url=
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
column= soup.find(class_ = re.compile('columns is-multiline'))
print(column.prettify())
position = column.h2.text
company = column.h3.text
city_state= column.find_all('p')[-2].text
print (position, company, city_state)
Thank you.
Try this:
from urllib.request import urlopen
from bs4 import BeautifulSoup
url = 'https://realpython.github.io/fake-jobs/'
html = urlopen(url)
soup = BeautifulSoup(html, 'html.parser')
positions = [pos.text for pos in soup.find_all('h2')]
companies = [com.text for com in soup.find_all('h3')]
city_state0 = []
city_state1 = []
for p in soup.find_all('p', {'class' : 'location'}):
city_state0.append(p.text.split(',')[0].strip())
city_state1.append(p.text.split(',')[1].strip())
df = pd.DataFrame({
'city_state1': city_state0,
'city_state2': city_state1,
'companies' : companies,
'positions' : positions
})
print(df)
Output:
You need to use find_all to get all the elements like so. find only gets the first element.
titles = soup.find_all('h2', class_='title is-5')
companies = soup.find_all('h3', class_='subtitle is-6 company')
locations = soup.find_all('p', class_='location')
# loop over locations and extract the city and state
for location in locations:
city = location.split(', ')[0]
state = location.split(', ')[1]

Problem with For Loop in Python BeautifulSoup web scraping

I'm a beginner with Python & trying to learn with a BeautifulSoup webscraping project.
I'm looking to scrape the record item title, URL of item & purchase date from this URL & export to a CSV.
I made great progress with scraping title & URL but just cannot figure out how to properly code the purchase date info correctly in my for loop (purchase_date variable below).
What's currently happening is the data in the csv file for the purchase date (e.g. p_date title) just displays blank cells with no text.. no error message just no data getting put into csv. Any guidance is much appreciated.
Thank you!!
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
headers = {"Accept-Language": "en-US, en;q=0.5"}
url = "https://www.popsike.com/php/quicksearch.php?searchtext=metal+-signed+-promo+-beatles+-zeppelin+-acetate+-test+-sinatra&sortord=aprice&pagenum=1&incldescr=1&sprice=100&eprice=&endfrom=2020&endthru=2020&bidsfrom=&bidsthru=&layout=&flabel=&fcatno="
results = requests.get(url, headers=headers)
soup = BeautifulSoup(results.text, "html.parser")
title = []
date = []
URL = []
record_div = soup.find_all('div', class_='col-md-7 add-desc-box')
for container in record_div:
description = container.a.text
title.append(description)
link = container.find('a')
URL.append(link.get('href'))
purchase_date = container.find('span',class_= 'info-row').text
date.append(purchase_date)
test_data = pd.DataFrame({
'record_description': title,
'link': URL,
'p_date': date
})
test_data['link'] = test_data['link'].str.replace('../','https://www.popsike.com/',1)
print(test_data)
test_data.to_csv('popaaron.csv')
I suggest to change parser type:
soup = BeautifulSoup(results.text, "html5")
And fix search expression for purchase date:
purchase_date = container.select('span.date > b')[0].text.strip(' \t\n\r')

Python Data Scraping with Beautiful Soup - geting Data from within a href

I'm pretty new to Python and getting to know Beautiful Soup.
So I have this problem: I need to get data from an event company, specifically contact data. They have this main tables with all participant names and their location. But to get the contact data (phone, email) you need to press on each of the company name from the table and it opens the new window with all the additional information. I'm looking for a way to get that info from a href and combine it with the data in a main table.
So I can get the table and all the href:
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen
test_url = "https://standconstruction.messe-duesseldorf.de/vis/v1/en/hallindex/1.09?oid=2656&lang=2"
test_data = urlopen(test_url)
test_html = test_data.read()
test_data.close()
page_soup = soup(test_html, "html.parser")
test_table = page_soup.findAll("div", {"class": "exh-table-col"})
print(test_table)
As a result I get all the table and have this kind of info (example of one row), including Name, Adress and href:
<a class="flush" href="/vis/v1/en/exhibitors/aluminium2020.2661781?oid=2656&lang=2">
<h2 class="exh-table-item__name" itemprop="name">Aerospace Engineering Equipment (Suzhou) Co LTD</h2>
</a>
</div>, <div class="exh-table-col exh-table-col--address">
<span class=""><i class="fa fa-map-marker"></i> <span class="link-fix--text">Hall 9 / G57</span></span>
That's where my problem starts, I have no Idea how to get the additional data from the href and combine it with the main data.
I would be very thankful for any possible solutions or at least a tip, where can I find one.
Updating the question:
I needed a Table which contains the information of the following columns:
1.Name; 2.Hall; 3.PDF; 4.Phone; 5.Email.
If you collect the Data by hand - to get the Phone and Email you need to click the appropriate link for it to show.
I wanted to know if there was a way to export the Phone and Email from those Link and add them to the first 3 columns using Python.
import requests
from bs4 import BeautifulSoup
import pandas as pd
from time import sleep
params = {
"oid": "2656",
"lang": "2"
}
def main(url):
with requests.Session() as req:
r = req.get(url, params=params)
soup = BeautifulSoup(r.content, 'html.parser')
target = soup.select("div.exh-table-item")
names = [name.h2.text for name in target]
hall = [hall.span.text.strip() for hall in target]
pdf = [pdf.select_one("a.color--darkest")['href'] for pdf in target]
links = [f"{url[:46]}{link.a['href']}" for link in target]
phones = []
emails = []
for num, link in enumerate(links):
print(f"Extracting {num +1} of {len(links)}")
r = req.get(link)
soup = BeautifulSoup(r.content, 'html.parser')
goal = soup.select_one("div[class^=push--bottom]")
try:
phone = goal.select_one("span[itemprop=telephone]").text
except:
phone = "N/A"
try:
email = goal.select_one("a[itemprop=email]").text
except:
email = "N/A"
emails.append(email)
phones.append(phone)
sleep(1)
df = pd.DataFrame(list(zip(names, hall, pdf, phones, emails)), columns=[
"Name", "Hall", "PDF", "Phone", "Email"])
print(df)
df.to_csv("data.csv", index=False)
main("https://standconstruction.messe-duesseldorf.de/vis/v1/en/hallindex/1.09")
Output: View Online

Scraping multiple paginated links with BeautifulSoup and Requests

Python Beginner here. I'm trying to scrape all products from one category on dabs.com. I've managed to scrape all products on a given page, but I'm having trouble iterating over all the paginated links.
Right now, I've tried to isolate all the pagination buttons with the span class='page-list" but even that isn't working. Ideally, I would like to make the crawler keep clicking next until it has scraped all products on all pages. How can I do this?
Really appreciate any input
from bs4 import BeautifulSoup
import requests
base_url = "http://www.dabs.com"
page_array = []
def get_pages():
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
page_list = soup.findAll('span', class="page-list")
pages = page_list[0].findAll('a')
for page in pages:
page_array.append(page.get('href'))
def scrape_page(page):
html = requests.get(base_url)
soup = BeautifulSoup(html.content, "html.parser")
Product_table = soup.findAll("table")
Products = Product_table[0].findAll("tr")
if len(soup.findAll('tr')) > 0:
Products = Products[1:]
for row in Products:
cells = row.find_all('td')
data = {
'description' : cells[0].get_text(),
'price' : cells[1].get_text()
}
print data
get_pages()
[scrape_page(base_url + page) for page in page_array]
Their next page button has a title of "Next" you could do something like:
import requests
from bs4 import BeautifulSoup as bs
url = 'www.dabs.com/category/computing/11001/'
base_url = 'http://www.dabs.com'
r = requests.get(url)
soup = bs(r.text)
elm = soup.find('a', {'title': 'Next'})
next_page_link = base_url + elm['href']
Hope that helps.

Categories