python script - Scraping Data but not looping and bringing back all results - python

I am new to python and need some help
See py script below, it brings back information for one entry but I want it to bring back all items that come up on that URL including on the pages not shown, What needs changing on the below to do that?
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as Soup
my_url = 'https://www.newegg.com/global/uk/Product/ProductList.aspx?
Submit=ENE&DEPA=0&Order=BESTMATCH&Description=graphics+card&N=-1&isNodeId=1'
uClient=uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = Soup(page_html, 'html.parser')
containers = page_soup.findAll('div',{'class':'item-container'})
for container in containers:
brand = container.div.div.a.img['title']
title_container = container.findAll('a',{'class':'item-title'})
product_name = title_container [0].text
price_container = container.findAll('li',{'class':'price-current'})
Price = price_container[0].text.strip()
print("brand: " + brand)
print("product_name: " + product_name)
print("Price: + " + Price)

Related

Python web scrape - not displaying all containers

page_soup.findall won't seem to get all containers. when running len(containers) it shows I have 12 containers but its only pulling info from one. can someone plz help. I'm trying to get info for all 12 containers.
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = 'https://www.newegg.com/Video-Cards-Video-Devices/Category/ID-38?Tpk=graphics%20card'
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("div",{"class":"item-container"})
for container in containers:
brand = container.img["title"]
title_container = container.findAll("a",{"class":"item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li",{"class":"price-ship"})
shipping = shipping_container[0].text.strip()
print ("brand: " + brand)
print ("product_name: " + product_name)
print ("shipping : " + shipping)
your code looks good and it is getting all 12 containers, but you are printing only last one.
in order to print all, use last three print lines inside for loop. like this
for container in containers:
brand = container.img["title"]
title_container = container.findAll("a", {"class": "item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li", {"class": "price-ship"})
shipping = shipping_container[0].text.strip()
print("brand: " + brand)
print("product_name: " + product_name)
print("shipping : " + shipping)

Python Web Scraping with Multiple URLs + merge datas

What I'm trying to do is
Take multiple URLs.
Take h2 text in every URL.
Merge h2 texts and then write csv.
In this code, I did:
Take one URL. Take h2 text in URL.
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
page_url = "https://example.com/ekonomi/20200108/"
#i am trying to do | urls = ['https://example.com/ekonomi/20200114/', 'https://example.com/ekonomi/20200113/', 'https://example.com/ekonomi/20200112/', 'https://example.com/ekonomi/20200111/]
uClient = uReq(page_url)
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# finds each product from the store page
containers = page_soup.findAll("div", {"class": "b-plainlist__info"})
out_filename = "output.csv"
headers = "title \n"
f = open(out_filename, "w")
f.write(headers)
container = containers[0]
for container in containers:
title = container.h2.get_text()
f.write(title.replace(",", " ") + "\n")
f.close() # Close the file
Provided your iteration through the containers is correct, this should work:
You want to iterate through the urls. Each url will grab the title, and append it into a list. Then just create a series with that list and write to csv with Pandas:
from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
import pandas as pd
urls = ['https://example.com/ekonomi/20200114/', 'https://example.com/ekonomi/20200113/', 'https://example.com/ekonomi/20200112/', 'https://example.com/ekonomi/20200111/']
titles = []
for page_url in urls:
uClient = uReq(page_url)
page_soup = soup(uClient.read(), "html.parser")
uClient.close()
# finds each product from the store page
containers = page_soup.findAll("div", {"class": "b-plainlist__info"})
for container in containers:
titles.append(container.h2.get_text())
df = pd.DataFrame(titles, columns=['title'])
df.to_csv("output.csv", index=False)

Why does my scraping script returns empty result

I am practicing here and my goal is to retrieve these data from the page in the url variable:
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
url = "https://www.newegg.com/global/bg-en/PS4-Accessories/SubCategory/ID-3142"
# opening connection, grabing the page
uClient = uReq(url)
page_html = uClient.read()
uClient.close()
# html parser
page_soup = soup(page_html, "html.parser")
# grabs each product
containers = page_soup.findAll("div", {"class": "item-container"})
for container in containers:
brand = container.select("div.item-info")[0].a.img["title"]
name = container.findAll("a", {"class": "item-title"})[0].text.strip()
shipping = container.findAll("li", {"class": "price-ship"})[0].text.strip()
print("brand " + brand)
print("name " + name)
print("shipping " + shipping)
Nothing more I can say for it :) I just simple as that but I still can't get it why no data is retrieved. Will be thankful for every advice!
You are invoking the find_all method with wrong arguments.
You should use the argument "class_" properly, according to the documentation found here:
https://www.crummy.com/software/BeautifulSoup/bs4/doc/#searching-by-css-class

Python Web Scraper issue

I'm new to to programming and trying to learn by building some small side projects. I have this code and it is working but I am having an issue with it formatting correctly in csv when it pulls all the information. It started adding weird spaces after I added price to be pulled as well. if I comment out price and remove it from write it works fine but I can't figure out why I am getting weird spaces when I add it back.
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
my_url = "https://www.newegg.com/Product/ProductList.aspx?Submit=ENE&N=-1&IsNodeId=1&Description=graphics%20card&bop=And&PageSize=12&order=BESTMATCH"
# Opening up connection, grabbing the page
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
#html parsing
page_soup = soup(page_html, "html.parser")
# grabs each products
containers = page_soup.findAll("div",{"class":"item-container"})
filename = "products.csv"
f = open(filename, "w")
headers = "brand, product_name, shipping\n"
f.write(headers)
for container in containers:
brand = container.div.div.a.img["title"]
title_container = container.findAll("a", {"class":"item-title"})
product_name = title_container[0].text
shipping_container = container.findAll("li", {"class":"price-ship"})
shipping = shipping_container[0].text.strip()
price_container = container.findAll("li", {"class":"price-current"})
price = price_container[0].text.strip()
print("brand: " + brand)
print("product_name: " + product_name)
print("Price: " + price)
print("shipping: " + shipping)
f.write(brand + "," + product_name.replace(",", "|") + "," + shipping + "," + price + "\n")
f.close()
You can write to a csv file like the way I've showed below. The output it produces should serve the purpose. Check out this documentation to get the clarity.
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
my_url = "https://www.newegg.com/Product/ProductList.aspx?Submit=ENE&N=-1&IsNodeId=1&Description=graphics%20card&bop=And&PageSize=12&order=BESTMATCH"
page_html = urlopen(my_url).read()
page_soup = BeautifulSoup(page_html, "lxml")
with open("outputfile.csv","w",newline="") as infile:
writer = csv.writer(infile)
writer.writerow(["brand", "product_name", "shipping", "price"])
for container in page_soup.findAll("div",{"class":"item-container"}):
brand = container.find(class_="item-brand").img.get("title")
product_name = container.find("a", {"class":"item-title"}).get_text(strip=True).replace(",", "|")
shipping = container.find("li", {"class":"price-ship"}).get_text(strip=True)
price = container.find("li", {"class":"price-current"}).get_text(strip=True).replace("|", "")
writer.writerow([brand,product_name,shipping,price])
You're getting the new lines and spam characters because that is the data you're getting back from BS4: it isn't a product of the writing process. This is because you're trying to get all the text in the list item, whilst there's a lot going on in there. Having a look at the page, if you'd rather just get the price, you can concatenate the text of the strong tag within the list with the text of the sup tag, e.g. price = price_container[0].find("strong").text + price_container[0].find("sup").text. That will ensure you're only picking out the data that you need.

Scraping multiple pages with beautifulsoup4 using python 3.6.3

I am trying to loop through multiple pages and my code doesn't extract anything. I am kind of new to scraping so bear with me. I made a container so I can target each listing. I also made a variable to target the anchor tag that you would press to go to the next page. I would really appreciate any help I could get. Thanks.
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
for page in range(0,25):
file = "breakfeast_chicago.csv"
f = open(file, "w")
Headers = "Nambusiness_name, business_address, business_city, business_region, business_phone_number\n"
f.write(Headers)
my_url = 'https://www.yellowpages.com/search?search_terms=Stores&geo_location_terms=Chicago%2C%20IL&page={}'.format(page)
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
# html parsing
page_soup = soup(page_html, "html.parser")
# grabs each listing
containers = page_soup.findAll("div",{"class": "result"})
new = page_soup.findAll("a", {"class":"next ajax-page"})
for i in new:
try:
for container in containers:
b_name = i.find("container.h2.span.text").get_text()
b_addr = i.find("container.p.span.text").get_text()
city_container = container.findAll("span",{"class": "locality"})
b_city = i.find("city_container[0].text ").get_text()
region_container = container.findAll("span",{"itemprop": "postalCode"})
b_reg = i.find("region_container[0].text").get_text()
phone_container = container.findAll("div",{"itemprop": "telephone"})
b_phone = i.find("phone_container[0].text").get_text()
print(b_name, b_addr, b_city, b_reg, b_phone)
f.write(b_name + "," +b_addr + "," +b_city.replace(",", "|") + "," +b_reg + "," +b_phone + "\n")
except: AttributeError
f.close()
If using BS4 try : find_all
Try dropping into a trace using import pdb;pdb.set_trace() and try to debug what is being selected in the for loop.
Also, some content may be hidden if it is loaded via javascript.
Each anchor tag or href for "clicking" is just another network request, and if you plan to follow the link consider slowing down the number of requests in between each request, so you don't get blocked.
You can try like the below script. It will traverse different pages through pagination and collect name and phone numbers from each container.
import requests
from bs4 import BeautifulSoup
my_url = "https://www.yellowpages.com/search?search_terms=Stores&geo_location_terms=Chicago%2C%20IL&page={}"
for link in [my_url.format(page) for page in range(1,5)]:
res = requests.get(link)
soup = BeautifulSoup(res.text, "lxml")
for item in soup.select(".info"):
try:
name = item.select(".business-name [itemprop='name']")[0].text
except Exception:
name = ""
try:
phone = item.select("[itemprop='telephone']")[0].text
except Exception:
phone = ""
print(name,phone)

Categories