Extract data from web page - python

I have a script to extract data from here: http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/
Part of obtaining the data in the script looks like this:
pts_start = data.find('">',mpg_end) + 2
pts_end = data.find('<',pts_start)
store.append(data[pts_start:pts_end])
mf_start = data.find(' >',pts_end) + 2
mf_end = data.find('<',mf_start)
store.append(data[mf_start:mf_end])
fg_start = data.find(' >',mf_end) + 2
fg_end = data.find('<',fg_start)
store.append(data[fg_start:fg_end])
I see that the names like fg and pts correspond to the table headlines, but I don't understand why certain ones are abbreviated in the script.
I want to modify the script to obtain the headlines on this table: http://espn.go.com/nba/statistics/player/_/stat/rebounds. I tried doing this by just plugging in the names as they appear at the top of the table but the resulting CSV file had missing information.
Full code :
import os
import csv
import time
import urllib2
uri = 'http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes'
def get_data():
try:
req = urllib2.Request(uri)
response = urllib2.urlopen(req, timeout=600)
content = response.read()
return content
except Exception, e:
print "\n[!] Error: " + str(e)
print ''
return False
def extract(data,rk):
print '\n[+] Extracting data.'
start = 0
while True:
store = [rk]
if data.find('nba/player/',start) == -1:
break
with open("data.csv", "ab") as fcsv:
main = data.find('nba/player/',start)
name_start = data.find('>',main) + 1
name_end = data.find('<',name_start)
store.append(data[name_start:name_end])
team_start = data.find('">',name_end) + 2
team_end = data.find('<',team_start)
store.append(data[team_start:team_end])
gp_start = data.find(' >',team_end) + 2
gp_end = data.find('<',gp_start)
store.append(data[gp_start:gp_end])
mpg_start = data.find(' >',gp_end) + 2
mpg_end = data.find('<',mpg_start)
store.append(data[mpg_start:mpg_end])
pts_start = data.find('">',mpg_end) + 2
pts_end = data.find('<',pts_start)
store.append(data[pts_start:pts_end])
mf_start = data.find(' >',pts_end) + 2
mf_end = data.find('<',mf_start)
store.append(data[mf_start:mf_end])
fg_start = data.find(' >',mf_end) + 2
fg_end = data.find('<',fg_start)
store.append(data[fg_start:fg_end])
m3_start = data.find(' >',fg_end) + 2
m3_end = data.find('<',m3_start)
store.append(data[m3_start:m3_end])
p3_start = data.find(' >',m3_end) + 2
p3_end = data.find('<',p3_start)
store.append(data[p3_start:p3_end])
ft_start = data.find(' >',p3_end) + 2
ft_end = data.find('<',ft_start)
store.append(data[ft_start:ft_end])
ftp_start = data.find(' >',ft_end) + 2
ftp_end = data.find('<',ftp_start)
store.append(data[ftp_start:ftp_end])
start = name_end
rk = rk + 1
csv.writer(fcsv).writerow(store)
fcsv.close()
def main():
print "\n[+] Initializing..."
if not os.path.exists("data.csv"):
with open("data.csv", "ab") as fcsv:
csv.writer(fcsv).writerow(["RK","PLAYER","TEAM","GP", "MPG","PTS","FGM-FGA","FG%","3PM-3PA","3P%","FTM-FTA","FT%"])
fcsv.close()
rk = 1
global uri
while True:
time.sleep(1)
start = 0
print "\n[+] Getting data, please wait."
data = get_data()
if not data:
break
extract(data,rk)
print "\n[+] Preparing for next page."
time.sleep(1.5)
rk = rk + 40
if rk > 300:
print "\n[+] All Done !\n"
break
uri = 'http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/sort/avg48Points/count/' + str(rk)
if __name__ == '__main__':
main()
I specifically want to know how to grab info based on the headlines. Like TEAM GP MPG PTS FGM-FGA FG% 3PM-3PA 3P% FTM-FTA FT%
So the script doesn't need to be changed besides things like pts or mpg in pts_start = data.find('">',mpg_end) + 2
I don't understand why I can't just input the name of the headline in the table has shown for certain ones. Like instead of FTM-FTA, the script puts ft.

Extracting html data rather easy with BeautifulSoup. Following example is you to get the idea but not a complete solution to your problem. However you can easily extend.
from bs4 import BeautifulSoup
import urllib2
def get_html_page_dom(url):
response = urllib2.urlopen(url)
html_doc = response.read()
return BeautifulSoup(html_doc, 'html5lib')
def extract_rows(dom):
table_rows = dom.select('.mod-content tbody tr')
for tr in table_rows:
# skip headers
klass = tr.get('class')
if klass is not None and 'colhead' in klass:
continue
tds = tr.select('td')
yield {'RK': tds[0].string,
'PLAYER': tds[1].select('a')[0].string,
'TEAM': tds[2].string,
'GP': tds[3].string
# you can fetch rest of the indexs for corresponding headers
}
if __name__ == '__main__':
dom = get_html_page_dom('http://espn.go.com/nba/statistics/player/_/stat/scoring-per-48-minutes/')
for data in extract_rows(dom):
print(data)
You can simply run and see the result ;).

Related

Scraping data, appending to list, and making it a dataframe

I am trying to scrape a bunch of baseball statistics and get all that data into separate data Frames so I can use it for my project. I am able to get all of the data, but I am having trouble figuring out how to store all of this data in variables and slice it accordingly.
def parse_row(rows):
return [str(x.string)for x in rows.find_all('td')]
def soop(url):
page = requests.get(url)
text = soup(page.text, features = 'lxml')
row = text.find_all('tr')
data = [parse_row(rows)for rows in row]
df = pd.DataFrame(data)
df = df.dropna()
if dp_num in url:
df.columns = dp_col
elif sb_num in url:
df.columns = sb_col
elif hr_num in url:
df.columns = hr_col
elif obp_num in url:
df.columns = obp_col
elif b2_num in url:
df.columns = b2_col
elif b3_num in url:
df.columns = b3_col
elif era_num in url:
df.columns = era_col
elif fld_num in url:
df.columns = fld_col
else:
print('error')
return(df)
# ncaa scraping function
def scrape(id_num):
loop = 1
page_num = 2
page_numii = 2
page_numiii = 2
url = 'https://www.ncaa.com/stats/baseball/d1/current/team/' + id_num
dii_url = 'https://www.ncaa.com/stats/baseball/d2/current/team/' + id_num
diii_url = 'https://www.ncaa.com/stats/baseball/d3/current/team/' + id_num
while loop == 1: #first di page
df = soop(url)
loop += 1
print(df)
while loop <= 6: #number of remaining di pages
df = soop(url + '/p' + str(page_num))
page_num += 1
loop += 1
print(df)
while loop == 7: # first d2 page
df = soop(dii_url)
loop += 1
print(df)
while loop <= 11:#remaining d2 pages
df = soop(dii_url + '/p' + str(page_numii))
page_numii += 1
loop += 1
print(df)
while loop == 12: #first diii page
df = soop(diii_url)
loop += 1
print(df)
while loop < 20:#remaining d3 pages
df = soop(diii_url + '/p' + str(page_numiii))
page_numiii += 1
loop += 1
print(df)
All of the code works, and I get no errors, but I would like to store the data it prints out in variables instead of printing it out, and then have those as separate data Frames for each stat page I scraped. But I have no clue where to start doing that, I have seen on here that maybe i should try appending it to a list? I am a statistics major in college, and I am pretty new to programming. Any help is appreciated.
To store dataframes into variables, you would have to construct a list or dictionary to store the dataframes.
With that being said, I probably wouldn't store the tables into variables, but rather write to a database or csv files so that you have the data locally available. Otherwise you'd have to run the scrape every time to get the data. Pandas can handle that for you (as well as parse the tables with .read_html()).
Not sure exactly what data you want or how you want it (I'm also surprised to not see an api here to get that data), but this will grab it and store it into folders with the structure of:
-data
-d1
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
.csv files
...
...
-d2
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
csv files
...
...
-d3
-INDIVIDUAL STATISTICS
csv files
...
...
-TEAM STATISTICS
csv files
...
...
So looks like this:
Code:
import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
statsIds_dict = {}
for division in [1,2,3]:
statsIds_dict[f'd{division}'] = {}
url = f'https://www.ncaa.com/stats/baseball/d{division}/'
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
statsIds = soup.find_all('div', {'class':'stats-header__filter'})
for each in statsIds:
statsType = each.text.split('\n')[0]
statsIds_dict[f'd{division}'][statsType] = {}
options = each.find_all('option')
for option in options:
if option['value']:
statsIds_dict[f'd{division}'][statsType][option.text] = 'https://www.ncaa.com' + option['value']
for division, v1 in statsIds_dict.items():
for statsType, v2 in v1.items():
for statTitle, link in v2.items():
response = requests.get(link)
soup = BeautifulSoup(response.text, 'html.parser')
try:
totPages = int(soup.find('ul', {'class':'stats-pager'}).find_all('li')[-2].text)
except:
totPages = 1
df = pd.read_html(link)[0]
print(link)
for page in range(2, totPages+1):
temp_df = pd.read_html(link + f'/p{page}')[0]
print(link + f'/p{page}')
df = df.append(temp_df).reset_index(drop=True)
path = f'data/{division}/{statsType}'
# Check whether the specified path exists or not
isExist = os.path.exists(path)
if not isExist:
# Create a new directory because it does not exist
os.makedirs(path)
print(f"The data/{division}/{statsType} directory is created!")
df.to_csv(f'data/{division}/{statsType}/{division}_{statsType}_{statTitle}.csv' , index=False)
print(f'Saved: {division} {statsType} {statTitle}')

JSONDecodeError: Expecting value: line 1 column 1 (char 0) when using Pushift API to scrape Reddit Data

import pandas as pd
import requests
import json
import datetime
import csv
def get_pushshift_data(after, before, sub):
url = 'https://api.pushshift.io/reddit/search/submission/?&after=' + str(after) + '&before='+ str(before) + '&subreddit='+ str(sub) + '&sort=asc&sort_type=created_utc&size=400'
print(url)
r = requests.get(url).json()
# data = json.loads(r.text, strict=False)
return r['data']
def collect_subData(subm):
subData = list() #list to store data points
title = subm['title']
url = subm['url']
try:
flair = subm['link_flair_text']
except KeyError:
flair = "NaN"
try:
# returns the body of the posts
body = subm['selftext']
except KeyError:
body = ''
author = subm['author']
subId = subm['id']
score = subm['score']
created = datetime.datetime.fromtimestamp(subm['created_utc']) #1520561700.0
numComms = subm['num_comments']
permalink = subm['permalink']
subData.append((subId,title,body,url,author,score,created,numComms,permalink,flair))
subStats[subId] = subData
def update_subFile():
upload_count = 0
location = "subreddit_data_uncleaned/"
print("Input filename of submission file, please add .csv")
filename = input()
file = location + filename
with open(file, 'w', newline='', encoding='utf-8') as file:
a = csv.writer(file, delimiter=',')
headers = ["Post ID","Title","Body","Url","Author","Score","Publish Date","Total No. of Comments","Permalink","Flair"]
a.writerow(headers)
for sub in subStats:
a.writerow(subStats[sub][0])
upload_count+=1
print(str(upload_count) + " submissions have been uploaded into a csv file")
# global dictionary to hold 'subData'
subStats = {}
# tracks no. of submissions
subCount = 0
#Subreddit to query
sub = 'politics'
# Unix timestamp of date to crawl from.
before = int(datetime.datetime(2021,5,17,0,0).timestamp())
after = int(datetime.datetime(2014,1,1,0,0).timestamp())
data = get_pushshift_data(after, before, sub)
while len(data) > 0:
for submission in data:
collect_subData(submission)
subCount+=1
# Calls getPushshiftData() with the created date of the last submission
print(len(data))
print(str(datetime.datetime.fromtimestamp(data[-1]['created_utc'])))
after = data[-1]['created_utc']
data = get_pushshift_data(after, before, sub)
print(len(data))
update_subFile()
At line 1: I call the get_pushshift_data(after, before, sub) function to scrape the data and there is no error. But then when I want to the same thing again at line 11 but with different time for after variable(type: int), the program comes out the error of JSONDecodeError: Expecting value: line 1 column 1 (char 0).
This is the image for you to refer to which I have just described above
This is the Error Image

Split data into multiple worksheets

Since I am going to create a number of dataframes I know won't fit inside just a single google worksheet (because of the limitation of columns) I want to split the data into multiple worksheets. I'm using set_with_dataframe() and defining which worksheet the dataframes is going to get imported to, so my first thought was to create and define several worksheets and then use the same method - the problem is just that I don't know how to "split" the data when there's no more columns in the first worksheet (and then the second, and the third and so on...)
I'm quite new at working with Python and I have been stuck with this for days so any kind of help would be appreciated.
My code looks like this:
import gspread
from gspread_dataframe import get_as_dataframe, set_with_dataframe
from google.oauth2 import service_account
from google.auth.transport.requests import AuthorizedSession
from bs4 import BeautifulSoup
import pandas as pd
import requests
import traceback
import os
class DataScraper():
def __init__(self, sheets):
self.data_worksheet = sheets.data_worksheet
self.total_urls = 0
self.urls = self.getAllUrls(sheets.url_worksheet)
def getAllUrls(self, urlWorkSheet):
urls = urlWorkSheet.get_all_values()
finalUrls = []
for r in urls:
# Get all urls
modifiedUrls = [d for d in r[:14] if "https://" in d]
if len(modifiedUrls) != 0:
self.total_urls += len(modifiedUrls)
finalUrls.append(modifiedUrls)
return finalUrls
def StartScrape(self):
current_column_count = 1
last_data_frame_max_width = 0
current_element = 0
for urlRow in self.urls:
current_row_count = 1
for url in urlRow:
current_element += 1
error = False
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
try:
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
labels = []
results = []
tbl = soup.find('table')
for tr in tbl.findAll('tr'):
headers = [th.text.strip() for th in tr.findAll('th')]
data = [td.text.strip() for td in tr.findAll('td')]
labels.append(headers)
results.append(data)
final_results = []
for final_labels, final_data in zip(labels, results):
final_results.append({'Labels': final_labels, 'Data': final_data})
df = pd.DataFrame(final_results)
df['Labels'] = df['Labels'].str[0]
df['Data'] = df['Data'].str[0]
indexNames = df[df['Labels'] == 'Links'].index
df.drop(indexNames , inplace=True)
set_with_dataframe(self.data_worksheet, df, col=current_column_count, row=current_row_count, include_column_header=False)
current_row_count += df.shape[0]+2
if df.shape[1] > last_data_frame_max_width:
last_data_frame_max_width = df.shape[1]
except Exception:
error = True
finally:
print(f"Processed page {current_element}/{self.total_urls} with status: {'success' if not error else 'error'}")
current_column_count += last_data_frame_max_width+5
last_data_frame_max_width = 0
class Sheets():
def __init__(self, filename, key):
self.filename = filename
self.key = key
self.data_worksheet = None
self.url_worksheet = None
self.getSheets(self.getCredentials())
def getCredentials(self):
# sep = seperator
_ = os.path.normpath(__file__).split(os.sep)
_.insert(1, "/")
credentials = service_account.Credentials.from_service_account_file(os.path.join(os.path.join(*_[0:-1]), self.filename))
return credentials.with_scopes( ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'])
def getSheets(self, scoped_credentials):
gc = gspread.Client(auth=scoped_credentials)
gc.session = AuthorizedSession(scoped_credentials)
spreadsheet_key = gc.open_by_key(self.key)
# Get sheet with data import
self.data_worksheet = spreadsheet_key.worksheet("Data")
# Get list with url's
self.url_worksheet = url_worksheet = spreadsheet_key.worksheet("Felix Copy")
# Get sheets
sheets = Sheets("credentials.json", "key_id")
# Start scraping
scraper = DataScraper(sheets)
scraper.StartScrape()

I can't figure out why I get a blank output file

import csv
import requests
import re
from bs4 import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf8')
#CREATE CSV FILE
outfile = open("./output.csv", "wb")
writer = csv.writer(outfile)
#IMPORT MATCHES
import csv
with open('matches.csv', 'rb') as f:
reader = csv.reader(f)
matches = list(reader)
for id in matches:
id = str(id)
id = re.sub("[^0-9]","",id)
url = 'http://www.virtualpronetwork.com/apps/fvpaa/matches/match_report/' + id
print (url)
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
#GET TEAMS AND SCORES
score = soup.findAll("div",{"class":"col-md-5 center"})
team_home = score[0]
team_home = str(team_home)
team_home = re.search('title="(.*)" />',team_home)
team_home = team_home.group(1)
team_away = score[1]
team_away = str(team_away)
team_away = re.search('title="(.*)" />',team_away)
team_away = team_away.group(1)
goals_home = score[2]
goals_home = str(goals_home)
goals_home = re.sub('</h2></div>','',goals_home)
goals_home = re.sub('<div class="col-md-5 center"><h2>','',goals_home)
goals_away = score[3]
goals_away = str(goals_away)
goals_away = re.sub('</h2></div>','',goals_away)
goals_away = re.sub('<div class="col-md-5 center"><h2>','',goals_away)
#GET HOME STATS
tables = soup.findChildren('table')
stats_home = tables[0]
list_of_rows_home = []
for row in stats_home.findChildren('tr')[1:]:
list_of_cells = []
for cell in row.findChildren('td')[0]:
text = cell.text
list_of_cells.append(text)
for cell in row.findChildren('td')[1]:
text = cell.text
list_of_cells.append(text)
for cell in row.findChildren('td')[2:]:
list_of_cells.append(cell)
list_of_rows_home.append(list_of_cells)
for i in range(len(list_of_rows_home)):
row = list_of_rows_home[i]
cell = list_of_rows_home[i][2]
cell = str(cell)
goal = re.findall('goal',cell)
goal = goal.count('goal')
goal = goal / 2
assist = re.findall('assist',cell)
assist = assist.count('assist')
assist = assist / 2
motm = re.findall('motm',cell)
motm = motm.count('motm')
row.append(goal)
row.append(assist)
row.append(motm)
for row in list_of_rows_home:
del row[2]
for i in range(len(list_of_rows_home)):
row = list_of_rows_home[i]
row.append(team_home)
row.append(goals_home)
row.append(team_away)
row.append(goals_away)
#GET AWAY STATS
stats_away = tables[1]
list_of_rows_away = []
for row in stats_away.findChildren('tr')[1:]:
list_of_cells = []
for cell in row.findChildren('td')[0]:
text = cell.text
list_of_cells.append(text)
for cell in row.findChildren('td')[1]:
text = cell.text
list_of_cells.append(text)
for cell in row.findChildren('td')[2:]:
list_of_cells.append(cell)
list_of_rows_away.append(list_of_cells)
for i in range(len(list_of_rows_away)):
row = list_of_rows_away[i]
cell = list_of_rows_away[i][2]
cell = str(cell)
goal = re.findall('goal',cell)
goal = goal.count('goal')
goal = goal / 2
assist = re.findall('assist',cell)
assist = assist.count('assist')
assist = assist / 2
motm = re.findall('motm',cell)
motm = motm.count('motm')
row.append(goal)
row.append(assist)
row.append(motm)
for row in list_of_rows_away:
del row[2]
for i in range(len(list_of_rows_away)):
row = list_of_rows_away[i]
row.append(team_away)
row.append(goals_away)
row.append(team_home)
row.append(goals_home)
#COMPILE INTO ONE TABLE
list_of_rows = list_of_rows_home + list_of_rows_away
#WRITE TO CSV
writer.writerows(list_of_rows)
My input file is a basic excel file with the match id's all lined up in column one of the excel file. When it creates the output file, it's blank. I am not getting any error messages either.
The issue is in your regex search, so perhaps change it to:
team_home = re.search('title="(.*)"',team_home)
team_home = team_home.group(1)
Alternative:
team_home = re.search('title="(.*)"/>',team_home)
team_home = team_home.group(1)
The /> is not needed, and this essentially makes title="" not match for group(1), which in turn creates an Attribute Error, and the script stops. If you want to include /> then remove the space in your regex pattern, since that is ultimately what kills it.

Python - Variable being printed over string

I am using python 2.7 and i have a problem that i haven't encountered before, when i print a certain string and then a variable on the same line the variable is printed over the string. e.g. the script is coded like so print 'IP Rating = ', ipRating and the output in command prompt will be 'IP20ating = '. I have no idea why this is happening but i have the same code for various variables and string in the same script and they all come out as expected, i have tried renaming the variable and changing the string but there is still no difference, has anybody encoutered this error before or have any ideas why this might be happening? i can post the code if requested.
Many thanks :)
EDIT
Here is the code - I know i may have repeated myself a few times and there are unneccessary library's in there but the way i work is by importing all libraries i might need and then removing unnecessary code at the end.
from bs4 import BeautifulSoup as Soup
from bs4 import BeautifulSoup
from urllib import urlopen
import webbrowser
import httplib
import urllib2
import urllib
import string
import mylib
import xlrd
import glob
import xlwt
import bs4
import sys
import os
import re
print '\nStarting Web Search'
found = False
while found == False:
excelFile = "F:\\len\\web sheets completed\\csv formatted\\imported\\re-imported\\Import Corrections\\saxby web spreadsheet.xls"
try:
inFi = xlrd.open_workbook(excelFile)
found = True
except IOError:
print 'File not found.'
inFi = xlrd.open_workbook(excelFile)
inWS = inFi.sheet_by_index(0)
headers = mylib.getHeader(inWS)
supplyHead = mylib.findHeader('Supplier Part Ref', headers)
saxbeginurl = "http://www.saxbylighting.com/index.php?pg=search&ser="
badLink = "index.php?pg=search&ser=10180&next=0"
resLink = "http://www.saxbylighting.com/images/ProductImages/Zoomed/"
overCount = 0
for t in range(524,534):
projection = 0
ipRating = 0
diameter = 0
width = 0
weight = 0
length = 0
height = 0
i = 0
w = 0
l = 0
h = 0
d = 0
p = 0
x = 0
iP = 0
wei = 0
imgStock = str(inWS.cell(t, supplyHead).value.encode('latin-1'))
overCount = overCount + 1
print '\n',imgStock
if imgStock == '3TRAWI':
url = 'http://www.saxbylighting.com/index.php?pg=details&prod=53'
elif imgStock == '10313':
url = 'http://www.saxbylighting.com/index.php?pg=details&prod=204'
else:
url = saxbeginurl + imgStock
html_page = urllib2.urlopen(url)
soup = BeautifulSoup(html_page)
img_tags = soup.find_all("img")
the_image_tag = soup.find("img", src='/images/dhl_logo.png')
try:
for dataSheet in soup.find('div',{'class':'panes'}):
#print dataSheet, ' -- ', str(i)
i = i + 1
if i == 4:
reqData = str(dataSheet).split('<img', 1)[0]
first_Data = reqData.replace('<br/>','\n')
second_Data = first_Data.replace('<b>','')
third_Data = second_Data.replace('</b>','')
fourth_Data = third_Data.replace(':',': ')
dataList = fourth_Data.split('\n')
#print dataList
for information in dataList:
if 'Weight' in dataList[wei]:
pre_Weight = dataList[wei]
sec_weight = str(pre_Weight).replace('Weight :','')
weight = sec_weight.replace(' ','')
wei += 1
if 'IP' in dataList[iP]:
ipRating = str(dataList[iP])
iP += 1
for product_Dimensions in dataList:
if 'Product dimensions :' in dataList[x]:
#print dataList[x]
dimensionList = str(dataList[x]).replace('mm','mm:')
#print dimensionList
prelim_Dimensions = dimensionList.replace('Product dimensions :','')
first_Dimensions = prelim_Dimensions.replace('cm','0mm')
sec_Dimensions = first_Dimensions.replace(' ',' ')
third_Dimensions = sec_Dimensions.strip()
dimenList = third_Dimensions.split('mm:')
#print dimenList
for project in dimenList:
if 'Proj' in dimenList[p]:
pre_pro = str(dimenList[p]).replace('Proj','')
sec_pro = pre_pro.replace(':','')
thro_pro = sec_pro.replace(' ','')
projection = thro_pro
elif p == len(dimenList):
print 'Projection not found'
p += 1
for diamet in dimenList:
if 'dia' in dimenList[d]:
pre_dia = str(dimenList[d]).replace('dia','')
sec_dia = pre_dia.replace(':','')
third_dia = sec_dia.replace(' ','')
diameter = third_dia
elif d == len(dimenList):
print 'Diameter not found'
d += 1
for heig in dimenList:
if 'H:' in dimenList[h]:
pre_hei = str(dimenList[h]).replace('H','')
sec_hei = pre_hei.replace(':','')
third_hei = sec_hei.replace(' ','')
height = third_hei
elif h == len(dimenList):
print 'Height not found'
h += 1
for lent in dimenList:
if 'L:' in dimenList[l]:
pre_leng = str(dimenList[l]).replace('L','')
sec_leng = pre_leng.replace(':','')
third_leng = sec_leng.replace(' ','')
length = third_leng
elif l == len(dimenList):
print 'Length not found'
l += 1
for wid in dimenList:
if 'W:' in dimenList[w]:
pre_wid = str(dimenList[w]).replace('W','')
sec_wid = pre_wid.replace(':','')
third_wid = sec_wid.replace(' ','')
width = third_wid
elif w == len(dimenList):
print 'Width not found'
w += 1
x += 1
print 'IP Rating = ', ipRating
print 'Weight = ', weight
print 'Projection = ', projection, 'mm'
print 'Diameter = ',diameter, 'mm'
print 'Length = ',length, 'mm'
print 'Height = ',height, 'mm'
print 'Width = ',width, 'mm'
except TypeError:
print 'Type Error... skipping this product and carrying on.'
Here is an example output
IP44ating =
Weight = .51KGS
Projection = 35 mm
Diameter = 0 mm
Length = 0 mm
Height = 90 mm
Width = 120 mm
I strongly suspect that your data ipRating that you think is IP20 is actually \rIP20. That is: that you have a stray 0x13 carriage return character in there at the start of the variable. The carriage return character is moving the print position to the start of the line and then the variable is overwriting what you printed before.
You can test whether this is the problem by adding the line:
ipRating = ipRating.replace("\r", "")
before your print statement.
This is the proper way to do what you're doing.
print('IP Rating = %s' % ipRating)
or
print('IP Rating = %d' % ipRating)
That is just one example from all the print statements you have at the end of your code.
If you're putting a string variable in print, use a %s or otherwise use a %d. If you have any more questions just ask.

Categories