Save and Scraping multiple pages with BeautifulSoup and pandas - python

I tested my code with jupiter notebook with this code
...
rname = soup.find('p', 'con_tx')
#rnamelis = rname.findAll('p')
rname
from urllib.request import urljoin
story=[]
#review_text = lis[0].find('p').getText()
#list_soup =soup.find_all('p', 'con_tx')
story=rname.getText()
story
and it worked well.
(result) '전 여친에 ...'
But when I tried to scrape multiple pages
from bs4 import BeautifulSoup
from urllib.request import urlopen
from urllib.request import urljoin
import pandas as pd
import numpy as np
import requests
base_url = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code='
pages =['177374','164102']
url = base_url + pages[0]
story = []
for n in pages:
# Create url
url = base_url + n
# Parse data using BS
print('Downloading page %s...' % url)
res = requests.get(url)
res.raise_for_status()
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser")
#print(soup.find('p', 'con_tx'))
rname = soup.find('p', 'con_tx')
story=rname.getText()
data = {story}
df = pd.DataFrame(data)
df.head()
df.to_csv('./moviestory.csv', sep=',', encoding='EUC-KR')
An error message came out.
ValueError: DataFrame constructor not properly called!
How do I fix my code?
Crawling area

Not sure what you are trying to do, but one thing I'm noticing is you are overwriting your dataframe each time. Also don;t know why you initialise story as a list, and then store it as a dictionary in the loop.
from bs4 import BeautifulSoup
import pandas as pd
import requests
base_url = 'https://movie.naver.com/movie/bi/mi/basic.nhn?code='
pages =['177374','164102']
df = pd.DataFrame()
for n in pages:
# Create url
url = base_url + n
# Parse data using BS
print('Downloading page %s...' % url)
res = requests.get(url)
soup = BeautifulSoup(res.text, "html.parser")
rname = soup.find('p', 'con_tx')
story=rname.getText()
data = [story]
df = df.append(pd.DataFrame(data), sort=True).reset_index(drop=True)
df.to_csv('./moviestory.csv', sep=',', encoding='EUC-KR')

Related

How do I scrape data from URLs in a python-scraped list of URLs?

I'm trying to use BeautifulSoup4 in Orange to scrape data from a list of URLs scraped from that same website.
I have managed to scraped the data from a single page when I set the URL manually.
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import csv
import re
url = "https://data.ushja.org/awards-standings/zone-points.aspx?year=2021&zone=1&section=1901"
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
rank = soup.find("table", class_="table-standings-body")
for child in rank.children:
print(url,child)
and I have been able to scrape the list of URLs I need
from urllib.request import urlopen
from bs4 import BeautifulSoup
import requests
import csv
import re
url = "https://data.ushja.org/awards-standings/zones.aspx?year=2021&zone=1"
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
rank = soup.find("table", class_="table-standings-body")
link = soup.find('div',class_='contentSection')
url_list = link.find('a').get('href')
for url_list in link.find_all('a'):
print (url_list.get('href'))
But so far I haven't been able to combine both to scrape the data from that URL list. Can I do that only by nesting for loops, and if so, how? Or how can I do it?
I am sorry if this is a stupid question, but I only started trying with Python and Web-Scraping yesterday and I have not been able to figure this by consulting similar-ish topics.
Try:
import requests
import pandas as pd
from bs4 import BeautifulSoup
url = "https://data.ushja.org/awards-standings/zones.aspx?year=2021&zone=1"
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
# get all links
url_list = []
for a in soup.find("div", class_="contentSection").find_all("a"):
url_list.append(a["href"].replace("§", "&sect"))
# get all data from URLs
all_data = []
for url in url_list:
print(url)
req = requests.get(url)
soup = BeautifulSoup(req.text, "html.parser")
h2 = soup.h2
sub = h2.find_next("p")
for tr in soup.select("tr:has(td)"):
all_data.append(
[
h2.get_text(strip=True),
sub.get_text(strip=True),
*[td.get_text(strip=True) for td in tr.select("td")],
]
)
# save data to CSV
df = pd.DataFrame(
all_data,
columns=[
"title",
"sub_title",
"Rank",
"Horse / Owner",
"Points",
"Total Comps",
],
)
print(df)
df.to_csv("data.csv", index=None)
This traverses all URLs and saves all data to data.csv (screenshot from LibreOffice):

Iterate Over URLs Using BeautifulSoup

I have written some code to gather URLs for each race course from https://www.horseracing.net/racecards. I have also written some code to scrape data from each race course page.
Each bit of code works as it should but I am having trouble creating a for loop to loop through all the race course URLs.
Here's the code to scrape the course URLs:
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
todays_racecard_url = 'https://www.horseracing.net/racecards'
base_url = "https://www.horseracing.net"
reqs = requests.get(todays_racecard_url)
content = reqs.text
soup = BeautifulSoup(content, 'html.parser')
course_urls = []
for h in soup.findAll('h3'):
a = h.find('a')
try:
if 'href' in a.attrs:
card_url = urljoin(base_url, a.get('href'))
course_urls.append(card_url)
except:
pass
for card_url in course_urls:
print(card_url)
And here's the code to scrape the pages:
import requests
from requests import get
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
url = "https://www.horseracing.net/racecards/fontwell/13-05-21"
results = requests.get(url)
soup = BeautifulSoup(results.text, "html.parser")
date = []
course = []
time = []
runner = []
tips = []
tipsters = []
runner_div = soup.find_all('div', class_='row-cell-right')
for container in runner_div:
runner_name = container.h5.a.text
runner.append(runner_name)
tips_no = container.find('span', class_='tip-text number-tip').text if container.find('span', class_='tip-text number-tip') else ''
tips.append(tips_no)
tipster_names = container.find('span', class_='pointers-text currency-text').text if container.find('span', class_='pointers-text currency-text') else ''
tipsters.append(tipster_names)
newspaper_tips = pd.DataFrame({
'Runners': runner,
'Tips': tips,
'Tipsters': tipsters,
})
newspaper_tips['Tipsters'] = newspaper_tips['Tipsters'].str.replace(' - ', '')
newspaper_tips.to_csv('NewspaperTips.csv', mode='a', header=False, index=False)
How do I join them to get the result I'm looking for?
It could be combined as follows:
import pandas as pd
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
todays_racecard_url = 'https://www.horseracing.net/racecards'
base_url = "https://www.horseracing.net"
req = requests.get(todays_racecard_url)
soup_racecard = BeautifulSoup(req.content, 'html.parser')
df = pd.DataFrame(columns=['Runners', 'Tips', 'Tipsters'])
for h in soup_racecard.find_all('h3'):
a = h.find('a', href=True) # only find tags with href present
if a:
url = urljoin(base_url, a['href'])
print(url)
results = requests.get(url)
soup_url = BeautifulSoup(results.text, "html.parser")
for container in soup_url.find_all('div', class_='row-cell-right'):
runner_name = container.h5.a.text
tips_no = container.find('span', class_='tip-text number-tip').text if container.find('span', class_='tip-text number-tip') else ''
tipster_names = container.find('span', class_='pointers-text currency-text').text if container.find('span', class_='pointers-text currency-text') else ''
row = [runner_name, tips_no, tipster_names]
df.loc[len(df)] = row # append the new row
df['Tipsters'] = df['Tipsters'].str.replace(' - ', '')
df.to_csv('NewspaperTips.csv', index=False)
Giving you a CSV starting:
Runners,Tips,Tipsters
Ajrad,2,NEWMARKET
Royal Tribute,1,The Times
Time Interval,1,Daily Mirror
Hemsworth,1,Daily Express
Ancient Times,,
Final Watch,,
Hala Joud,,
May Night,1,The Star
Tell'Em Nowt,,

Finding Table Class Python

I am having trouble finding a table while web scraping using python/Beautiful Soup
import requests
from bs4 import BeautifulSoup
url = 'https://www.espn.com/nba/player/gamelog/_/id/3907387/ben-simmons'
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
t = soup.find_all('table' , class_='Table Table--align-right')
This is returning null
You can use pandas read_html to read the table as a list and find the required list from that as below:
import requests
import pandas as pd
url = 'https://www.espn.com/nba/player/gamelog/_/id/3907387/ben-simmon'
html = requests.get(url).content
df_list = pd.read_html(html)
df = df_list[3]
print(df)

Unable to scrape this site. How to scrape data from this site?

Iam not able to scrape data from this site.
I tried with other sites but it's ok with other sites...
from bs4 import BeautifulSoup
from urllib.request import urlopen
response = urlopen("https://www.daraz.com.np/catalog/?spm=a2a0e.searchlistcategory.search.2.3eac4b8amQJ0zd&q=samsung%20m20&_keyori=ss&from=suggest_normal&sugg=samsung%20m20_1_1")
html = response.read()
parsed_html = BeautifulSoup(html, "html.parser")
containers = parsed_html.find_all("div", {"class" : "c2prKC"})
print(len(containers))
Look like JS render to page after loading .You can use Selenium to render the page and beautiful soup to get the element.
from bs4 import BeautifulSoup
from selenium import webdriver
import time
driver = webdriver.Chrome()
driver.get("https://www.daraz.com.np/catalog/?spm=a2a0e.searchlistcategory.search.2.3eac4b8amQJ0zd&q=samsung%20m20&_keyori=ss&from=suggest_normal&sugg=samsung%20m20_1_1")
time.sleep(5)
html = driver.page_source
parsed_html = BeautifulSoup(html, "html.parser")
containers = parsed_html.find_all("div", {"class" : "c2prKC"})
print(len(containers))
Info you want is in a script tag. You can use regex or loop script tags to get the right string to parse as json (with a small amendment)
import requests
import json
from bs4 import BeautifulSoup as bs
import pandas as pd
headers = {
'User-Agent' : 'Mozilla/5.0'
}
res = requests.get('https://www.daraz.com.np/catalog/?spm=a2a0e.searchlistcategory.search.2.3eac4b8amQJ0zd&q=samsung%20m20&_keyori=ss&from=suggest_normal&sugg=samsung%20m20_1_1', headers = headers)
soup = bs(res.content, 'lxml')
for script in soup.select('script'):
if 'window.pageData=' in script.text:
script = script.text.replace('window.pageData=','')
break
items = json.loads(script)['mods']['listItems']
results = []
for item in items:
#print(item)
#extract other info you want
row = [item['name'], item['priceShow'], item['productUrl'], item['ratingScore']]
results.append(row)
df = pd.DataFrame(results, columns = ['Name', 'Price', 'ProductUrl', 'Rating'])
print(df.head())
Regex version:
import requests
import json
from bs4 import BeautifulSoup as bs
import pandas as pd
headers = {
'User-Agent' : 'Mozilla/5.0'
}
res = requests.get('https://www.daraz.com.np/catalog/?spm=a2a0e.searchlistcategory.search.2.3eac4b8amQJ0zd&q=samsung%20m20&_keyori=ss&from=suggest_normal&sugg=samsung%20m20_1_1', headers = headers)
soup = bs(res.content, 'lxml')
r = re.compile(r'window.pageData=(.*)')
data = soup.find('script', text=r).text
script = r.findall(data)[0]
items = json.loads(script)['mods']['listItems']
results = []
for item in items:
row = [item['name'], item['priceShow'], item['productUrl'], item['ratingScore']]
results.append(row)
df = pd.DataFrame(results, columns = ['Name', 'Price', 'ProductUrl', 'Rating'])
print(df.head())
import requests
import json
from bs4 import BeautifulSoup as bs
import pandas as pd
import json
headers = {
'User-Agent' : 'Mozilla/5.0'
}
res = requests.get('https://www.daraz.com.np/catalog/?q=camera&_keyori=ss&from=input&spm=a2a0e.searchlist.search.go.71a64360Kgxf1m', headers = headers)
soup = bs(res.content, 'lxml')
scriptData=''
for d in containerSearch:
if 'window.pageData=' in str(d):
scriptData=str(d).replace('window.pageData=','')
break
scriptData=scriptData.replace('<script>','')
scriptData=scriptData.replace('</script>','')
items = json.loads(scriptData)
name=items['mods']['listItems'][0]['name']
image=items['mods']['listItems'][0]['image']
price=items['mods']['listItems'][0]['price']
priceShow=items['mods']['listItems'][0]['priceShow']
ratingScore=items['mods']['listItems'][0]['ratingScore']
productUrl=items['mods']['listItems'][0]['productUrl']
print(name)
print(price)

I want to crawl data from 1 to 10 pages automatically from website.How can i do it?

import requests
from bs4 import BeautifulSoup
My_Url = "http://questions.consumercomplaints.in/page/2"
Data = requests.get(My_Url)
Soup = BeautifulSoup(Data.content)
head_id = Soup.find_all({"div":"href"})
len(head_id)
for i in head_id:
print i.text
From above code i scrapped (reviews/complaints) from web page 2.
How do i craw data automatically all pages (http://questions.consumercomplaints.in/page/3)
Why not surround your function in a ranged for loop?
import requests
from bs4 import BeautifulSoup
for i in range(3,11):
My_Url = "http://questions.consumercomplaints.in/page/" + str(i)
Data = requests.get(My_Url)
Soup = BeautifulSoup(Data.content)
head_id = Soup.find_all({"div":"href"})
len(head_id)
for i in head_id:
print i.text
Have look at how the range function works here.

Categories