Writing different columns of a Pandas DataFrame in one row? - python

I have scraped a website for extracting the shoes and clothes prices , their image ids , image URLs and some other features,I succeeded in writing the dataframe to a csv file but I realized that the dataframe write every feature in different rows while they have to be gathered in one row , i have showed a sample output from my csv file below.
Any suggestions on how to change the code ??
from bs4 import BeautifulSoup
import requests
import re
import csv
import pandas as pd
import os
import urllib.request
df = pd.DataFrame(columns = ['PostID','Description', 'Kind', 'Price', 'ImageID', 'ImageURL'])
def scraping():
global h , df
with open("/home/user/Documents/file.txt") as f:
urls = f.readlines()
urls = ([s.strip('\n') for s in urls ])
code_list = []
for url in urls:
code = url.split('/')[-1]
code_list.append(code)
df = df.append({'PostID': code}, ignore_index=True)
for br in soup.find_all("br"):
br.replace_with("\n")
try:
description = soup.find('div', attrs={'class':'ui fluid card post-description'}).find('div', attrs={'class':'content'})
print(description.text)
df = df.append({'Description': description.text}, ignore_index=True)
item_list = []
items = soup.find_all('span', attrs={'class':'item__title'})
for i in items:
item_list.append(i.text)
item_list.pop(0)
value_list=[]
values = soup.find_all('div', attrs={'class':'value'})
for v in values:
value_list.append(v.text)
my_dictionary = {}
for i in range(1,3):
my_dictionary[item_list[i]] = value_list[i]
df = df.append({'Kind':my_dictionary['نوع آگهی'] }, ignore_index=True)
df = df.append({'Price': my_dictionary['قیمت']}, ignore_index=True)
imageresult = []
path = '/home/user/images'
images = soup.find_all('img')
for img in images:
imgID = img.get('src').split('/')[-1]
df = df.append({'ImageID': imgID}, ignore_index=True)
df = df.append({'ImageURL': img.get('src')}, ignore_index=True)
urllib.request.urlretrieve(img.get('src'), os.path.join(my_path, os.path.basename(img.get('src'))))
print(imgID + img.get('src'))
else:
break
except:
print("your URL is invalid :" + url)
scraping()
df.to_csv('divartest14.csv', index = False , encoding = 'utf-8')
PostID Description Kind Price ImageID
QXZ5RjZj
adidas shoes
feminine
100$
QXZ5RjZj.jpg

That will continue to happen because when you call append, you're telling it to ignore_index, therefore each series get's put in it's own row. I'd suggest passing in all of the items you want in one row in one dictionary, ie:
df = df.append({'c1': 1, 'c2': 2, 'c3': 3, ...etc})

Related

How to scrape all values of AJAX search table with PYTHON?

I am trying to scrape the CPU Specs Database at TechPowerUp.
I have found the table updates using AJAX and created the following code:
import requests
from bs4 import BeautifulSoup
import csv
import string
cpus = []
base = 'https://www.techpowerup.com/cpu-specs/?ajaxsrch='
letters = list(string.ascii_lowercase)
letters.extend(range(0, 10))
for i in letters:
URL = base + str(i)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
with open('cpu2.csv', mode='a') as cpu_csv:
headers = ['name', 'family', 'socket', 'release']
writer = csv.DictWriter(cpu_csv, fieldnames=headers, lineterminator='\n')
for tr in soup.find_all('tr')[1:]:
tds = tr.find_all('td')
if tds[0].text.strip() not in cpus:
writer.writerow({'name': tds[0].text.strip(), 'family': tds[1].text.strip(), 'socket': tds[4].text.strip(), 'release': tds[8].text.strip()})
cpus.append(tds[0].text.strip())
else:
print("duplicate")
This code works in the fact it loops through A-Z then 0-9 and populates a csv file ignoring duplicates however I'm getting a logical error where I am only scraping ~600 results where there are over 2000 entries.
I believe this may be due to a limit on the returned items for each AJAX Search request so not all entries are discovered, is there a different approach to fetch all results?
Thanks
import pandas as pd
import string
items = string.digits + string.ascii_lowercase
def main(url):
data = []
for item in items:
print(f"{item}")
df = pd.read_html(url.format(item))[0]
df = df[["Name", "Codename", "Socket", "Released"]]
data.append(df)
data = pd.concat(data)
data.drop_duplicates(subset='Name', keep="first",inplace=True)
data.to_csv("data.csv", index=False)
main("https://www.techpowerup.com/cpu-specs/?ajaxsrch={}")
Total Output is 596 based on removing duplicates By column Name.
View Online
Sample of output:
The easiest way to get the table data using pandas.Get the data in DataFrame and import into csv.
Code:
import string
import pandas as pd
base = 'https://www.techpowerup.com/cpu-specs/?ajaxsrch='
letters = list(string.ascii_lowercase)
letters.extend(range(0, 10))
df=pd.DataFrame()
for i in letters:
URL = base + str(i)
df1=pd.read_html(URL)[0]
df = df.append(df1, ignore_index=True)
print(df[['Name','Codename','Socket','Released']]) #This will give you 1739 records
#If you want to delete duplicates use this
df.drop_duplicates(subset='Name', keep='first', inplace=True)
print(df[['Name','Codename','Socket','Released']]) #This will give you 595 records
#Import into Csv file
df[['Name','Codename','Socket','Released']].to_csv("cpu_csv.csv",index=False)

Saving multiple data frames from loop

I have been searching for a solution to my problem, but all answers I find uses print() at the end of the answer, and NOT saving the data frames as I would like to.
Below I have a (almost) functioning code that prints 3 seperate tables. How do I save these three tables in 3 seperate data frames with the names matches_october, matches_november and matches_december?
The last line in my code is not working as I want it to work. I hope it is clear what I would like the code to do (Saving a data frame at the end of each of the 3 rounds in the loop)
import pandas as pd
import requests
from bs4 import BeautifulSoup
base_url = 'https://www.basketball-reference.com/leagues/NBA_2019_games-'
valid_pages = ['october','november','december']
end = '.html'
for i in valid_pages:
url = '{}{}{}'.format(base_url, i, end)
res = requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))
print(df)
matches + valid_pages = df[0]
You can case it, but that's not very robust (and it's rather ugly).
if i == 'october':
matches_october = pd.read_html(str(table))
if i == 'november':
# so on and so forth
A more elegant solution is to use a dictionary. Before the loop, declare matches = {}. Then, in each iteration:
matches[i] = pd.read_html(str(table))
Then you can access the October matches DataFrame via matches['october'].
You can't compose variable names using +, try using a dict instead:
import pandas as pd
import requests
from bs4 import BeautifulSoup
matches = {} # create an empty dict
base_url = 'https://www.basketball-reference.com/leagues/NBA_2019_games-'
valid_pages = ['october','november','december']
end = '.html'
for i in valid_pages:
url = '{}{}{}'.format(base_url, i, end)
res = requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))
print(df)
matches[i] = df[0] # store it in the dict
Thanks guys. That worked! :)
import pandas as pd
import requests
from bs4 import BeautifulSoup
matches = {} # create an empty dict
base_url = 'https://www.basketball-reference.com/leagues/NBA_2019_games-'
valid_pages = ['october','november','december']
end = '.html'
for i in valid_pages:
url = '{}{}{}'.format(base_url, i, end)
res = requests.get(url)
soup = BeautifulSoup(res.content,'lxml')
table = soup.find_all('table')[0]
df = pd.read_html(str(table))
matches[i] = df[0] # store it in the dict
matches_october = matches['october']

Web-Scraping Python, Indexing Issue for DataFrame

I'm working on a web-scraper for Spotify Charts to extract the top 200 daily songs each day. I have done everything to extract the data I'm interested in including rank, artist, track title, and stream numbers. What I'm stuck on is putting everything into a DataFrame to export as a CSV to excel. Right now when I print my DataFrame, it is treating each cycle as 1 row with 4 columns as opposed to 200 rows with 4 columns.
I'm not sure what the issue is as I've tried just about everything and looked into it as much as I could. I know something is wrong with the indexing because each "what should be a row" has the same first "0" index, when they should go sequential to 199. Also, the column names for my DataFrame keep repeating after each "what should be a row", so I know there is definitely an issue there.
import requests
from bs4 import BeautifulSoup
from datetime import date, timedelta
from time import time
from time import sleep
from random import randint
import pandas as pd
import numpy as np
base_url = 'https://spotifycharts.com/regional/global/daily/'
r = requests.get(base_url)
soup = BeautifulSoup(r.text, 'html.parser')
chart = soup.find('table', {'class': 'chart-table'})
tbody = chart.find('tbody')
for tr in tbody.find_all('tr'):
rank_text = []
rank_text_elem = tr.find('td', {'class': 'chart-table-
position'})
for item in rank_text_elem:
rank_text = []
rank_text.append(item)
artist_text = []
artist_text_elem = tr.find('td', {'class': 'chart-table-
track'}).find_all('span')
for item in artist_text_elem:
artist_text = []
artist_text.append(item.text.replace('by ','').strip())
title_text = []
title_text_elem = tr.find('td', {'class': 'chart-table-
track'}).find_all('strong')
for item in title_text_elem:
title_text = []
title_text.append(item.text)
streams_text = []
streams_text_elem = tr.find('td', {'class': 'chart-table-streams'})
for item in streams_text_elem:
streams_text = []
streams_text.append(item)
# creating dataframe to store 4 variables
list_of_data = list(zip(rank_text, artist_text, title_text,
streams_text))
df = pd.DataFrame(list_of_data, columns =
['Rank','Artist','Title','Streams'])
print(df)
Basically, I'm trying to create a dataframe to hold 4 variables in each row for 200 rows for each date of spotify global charts. Please ignore some of the modules and libraries I've included at the top, they are used for iterating through each page of the historical data based on dynamic urls which I have already figured out. Any help is greatly appreciated! Thank you!
Before for loop I create list all_rows.
Inside for loop I add list with single row of data to all_rows.
After for loop I use all_rows to create DataFrame
import requests
from bs4 import BeautifulSoup
import pandas as pd
base_url = 'https://spotifycharts.com/regional/global/daily/'
r = requests.get(base_url)
soup = BeautifulSoup(r.text, 'html.parser')
chart = soup.find('table', {'class': 'chart-table'})
tbody = chart.find('tbody')
all_rows = []
for tr in tbody.find_all('tr'):
rank_text = tr.find('td', {'class': 'chart-table-position'}).text
artist_text = tr.find('td', {'class': 'chart-table-track'}).find('span').text
artist_text = artist_text.replace('by ','').strip()
title_text = tr.find('td', {'class': 'chart-table-track'}).find('strong').text
streams_text = tr.find('td', {'class': 'chart-table-streams'}).text
all_rows.append( [rank_text, artist_text, title_text, streams_text] )
# after `for` loop
df = pd.DataFrame(all_rows, columns=['Rank','Artist','Title','Streams'])
print(df.head())
You could use pandas and requests
import pandas as pd
import requests
headers = {'User-Agent': 'Mozilla/5.0'}
url ='https://spotifycharts.com/regional/global/daily/'
r = requests.get(url, headers = headers).content
table = pd.read_html(r)[0] #transfer html to pandas
table.dropna(axis = 1, how = 'all', inplace = True) #drop nan column
table[['Title','Artist']] = table['Unnamed: 3'].str.split(' by ',expand=True) #split title artist strings into two columns
del table['Unnamed: 3'] #remove combined column
table = table[['Track', 'Artist','Title', 'Unnamed: 4']] #re-order cols
table.columns= ['Rank', 'Artist','Title', 'Streams'] #rename cols
print(table)

Extraction from multiple html files (same kind of data) and storing the results in a single dataframe

I want to extract specific data from multiple html files (having the same arrangement) and store it in a single dataframe. The script I wrote (see below) extracts information only from the last file in the folder.
import pandas as pd
from bs4 import BeautifulSoup
import glob
import os.path
counter = 0
dir_path = r"/home"
for file_name in glob.glob(os.path.join(dir_path, "*.html")):
with open(file_name) as html_file:
soup = BeautifulSoup(html_file)
counter = counter+1
text = []
vessel_name = []
port = []
voy_no = []
for p_tag in soup.find_all('p', {'class' : 'MsoNormal'}):
text.append(p_tag.text)
text1 = [el.replace('\xa0',' ') for el in text]
a = text1[2].split(":")[1]
vessel_name.append((list(a.split())))
b = text1[4].split(":")[1]
voy_no.append((b.split()))
c = text1[5].split(":")[1]
port.append((c.split()))
dict_for_df = {'Vessel_Name':vessel_name, 'Voy_No':voy_no, 'Port':port}
final_df = pd.DataFrame.append(dict_for_df)
To make it work, create an emtpy dataframe at the beginning
final_df = pd.DataFrame()
and add to that later
final_df = final_df.append(dict_for_df)
The full code is
import pandas as pd
from bs4 import BeautifulSoup
import glob
import os.path
counter = 0
final_df = pd.DataFrame()
dir_path = r"/home"
for file_name in glob.glob(os.path.join(dir_path, "*.html")):
with open(file_name) as html_file:
soup = BeautifulSoup(html_file)
counter = counter+1
text = []
vessel_name = []
port = []
voy_no = []
for p_tag in soup.find_all('p', {'class' : 'MsoNormal'}):
text.append(p_tag.text)
text1 = [el.replace('\xa0',' ') for el in text]
a = text1[2].split(":")[1]
vessel_name.append((list(a.split())))
b = text1[4].split(":")[1]
voy_no.append((b.split()))
c = text1[5].split(":")[1]
port.append((c.split()))
dict_for_df = {'Vessel_Name':vessel_name, 'Voy_No':voy_no, 'Port':port}
final_df = final_df.append(dict_for_df)
Note that this is not the best way to do it. If your data fits in memory, better collect all data in a list of directories and create the dataframe at the end. For a small dataset of a 1000 lines or less this should be good enough.

Creating Large DataFrame from smaller DataFrames

I am having an issue with the structure of data as I get it off the PGA website. I have trouble putting the data into a dataframe and merging the data so that I can use the dataframe for analysis later. The dimensions of the scraped data are never right. I get a separate error each time I run the code that I cant seem to reconcile.
I have tried merging and concatenating dataframes but nothing seems to work. ANy help is appreciated
I would really like for my dataframe to contain the individual statistics from the separate sites but on the same row as the other data formatted by the year and PLAYER NAME.
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import socket
import urllib.error
import pandas as pd
import urllib
import sqlalchemy
import numpy as np
import functools
base = 'http://www.pgatour.com/'
inn = 'stats/stat'
end = '.html'
years = ['2017','2016']
alpha = []
#all pages with links to tables
urls = ['http://www.pgatour.com/stats.html','http://www.pgatour.com/stats/categories.ROTT_INQ.html','http://www.pgatour.com/stats/categories.RAPP_INQ.html','http://www.pgatour.com/stats/categories.RARG_INQ.html','http://www.pgatour.com/stats/categories.RPUT_INQ.html','http://www.pgatour.com/stats/categories.RSCR_INQ.html','http://www.pgatour.com/stats/categories.RSTR_INQ.html','http://www.pgatour.com/stats/categories.RMNY_INQ.html','http://www.pgatour.com/stats/categories.RPTS_INQ.html']
for i in urls:
data = urlopen(i)
soup = BeautifulSoup(data, "html.parser")
for link in soup.find_all('a'):
if link.has_attr('href'):
alpha.append(base + link['href'][17:]) #may need adjusting
#data links
beta = []
for i in alpha:
if inn in i:
beta.append(i)
gamma = []
for i in beta:
if i not in gamma:
gamma.append(i)
jan = []
for i in gamma:
try:
data = urlopen(i)
soup = BeautifulSoup(data, "html.parser")
for table in soup.find_all('section',{'class':'module-statistics-off-the-tee-details'}):
for j in table.find_all('h3'):
y=j.get_text().replace(" ","").replace("-","").replace(":","").replace(">","").replace("<","").replace(">","").replace(")","").replace("(","").replace("=","").replace("+","")
jan.append([i,str(y+'.csv')])
print([i,str(y+'.csv')])
except Exception as e:
print(e)
pass
#my problem starts here
#using urls list so that I can find error faster
urls = [['http://www.pgatour.com/stats/stat.02356.html','d']
,['http://www.pgatour.com/stats/stat.02568.html','f']
,['http://www.pgatour.com/stats/stat.111.html','r']]
list = []
master = pd.DataFrame()
#jan = [['http://www.pgatour.com/stats/stat.02356.html', 'Last15EventsScoring.csv']]
#make a list with url and title name and cleaned csv name
#write to csv
row_sp = []
rows_sp =[]
title1 = []
title = []
for i in urls:
try:
for y in years:
data = urlopen(i[0][:-4] +y+ end)
soup = BeautifulSoup(data, "html.parser")
data1 = urlopen(i[0])
soup1 = BeautifulSoup(data1, "html.parser")
for table in soup1.find_all('table',{'id':'statsTable'}):
title.append('year')
for k in table.find_all('tr'):
for n in k.find_all('th'):
title1.append(n.get_text())
for l in title1:
if l not in title:
title.append(l)
rows_sp.append(title)
for table in soup.find_all('table',{'id':'statsTable'}):
for h in table.find_all('tr'):
row_sp = [y]
for j in h.find_all('td'):
row_sp.append(j.get_text().replace(" ","").replace("\n","").replace("\xa0"," "))
rows_sp.append(row_sp)
df=pd.DataFrame(rows_sp)
df.columns = title
df.drop(df.index[1],inplace = True)
print(df)
list.append(df)
except Exception as e:
print(e)
pass
df_merge = functools.reduce(lambda left,right: pd.merge(left,right,on=['year','PLAYER NAME'], how='outer'), list)

Categories