CSV file missing some data on write in python - python

I am creating a python program using scrapy that crawls a given domain and when it finds pdf's, it will scan them for information (location of pdf, num of pages, image count, field count, tagged, etc) and place all of this into a CSV file.
It downloads all the pdf's just fine, but when I open the csv file, only a fraction of the files downloaded are in the file. I'm not sure what I am doing wrong. I thought perhaps I was not properly closing the file once opened but I'm not sure that's the problem. Code is below:
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy.http import Request
import urllib.parse as urlparse
import os.path
import validators
import csv
from .. info import isTagged
from .. get_metadata import get_data, count_images
from .. fieldCount import getFieldCount
class PdfspiderSpider(CrawlSpider):
name = 'pdfspider'
allowed_domain = input('Enter the domain name of the website to be crawled (domain of https://google.com is "google"): ')
allowed_domains = [allowed_domain]
#need domain to name folder pdfs will be put into
global domain
domain = allowed_domains[0]
global start
start = input('Enter the url of the page you wish to start the crawl on (include http/https): ')
start_urls = [start]
global base_path
base_path = input('Where do you wish to save the folder containing the pdfs?: ')
rules = (
Rule(LinkExtractor(), callback='parse_item', follow=True),
)
def parse_item(self, response):
base_url = start
for a in response.xpath('//a[#href]/#href'):
link = a.extract()
if link.endswith('.pdf'):
link = urlparse.urljoin(base_url, link)
yield Request(link, callback=self.save_pdf)
def create_csv(self):
header = ['location', 'title', 'author', '# of pages', 'tagged?', 'field count', 'image count']
filename = base_path + '/' +domain + '/' + domain + '.csv'
f = open(filename, 'x')
writer = csv.writer(f)
writer.writerow(header)
f.close()
def save_pdf(self, response):
url=response.url
if response.status == 200:
save_dir = base_path + '/' + domain
isExist = os.path.exists(save_dir)
if not isExist:
# Create a new directory because it does not exist
os.makedirs(save_dir)
csvFile = domain + '.csv'
csvPath = save_dir + '/' + csvFile
csvPathExist = os.path.exists(csvPath)
if not csvPathExist:
self.create_csv()
file = response.url.split('/')[-1]
full_path = os.path.join(save_dir, file)
with open(full_path, 'wb') as f:
f.write(response.body)
is_tagged = isTagged(full_path)
metaData = get_data(full_path)
fieldCount = getFieldCount(full_path)
imageCount = count_images(full_path)
row = [url, metaData[0], metaData[1], metaData[2], is_tagged, fieldCount, imageCount]
self.add_to_csv(row)
f.close()
else:
print(f"Failed to load pdf: {url}")
def add_to_csv(self,row):
filename = base_path + '/' +domain + '/' + domain + '.csv'
f = open(filename, 'a', newline='')
writer = csv.writer(f)
writer.writerow(row)
f.close()
So I think its the function "add_to_csv" thats the problem, but I can't figure out why. Any help woul be appreciated.

The issue is when you are calling
self.add_to_csv(row) method inside save_pdf() method
After calling this you are closing the file, which makes to write incomplete information to csv. What you can do is, put your code in try except clause and close all files in finally block.
Nothing wrong with the logic in add_to_csv() method.

Related

How to know the cause for assertion errors Python?

I am compiling a script for adding custom property in PDF files using PdfMerger() in PyPdf2. It worked fine for almost all the files except a few. And error occurs in some function inside the PdfMerge. I don't understand what exactly is causing this error or how to rectify it. Here is the entire program - not sure if giving a snippet would be helpful.
import os
import pandas as pd
from PyPDF2 import PdfReader, PdfMerger
df = pd.read_excel('S:\\USERS\\VINTON\\F001A - Item Master (Stock and Cost)- 270001.xlsx')
folder_path = "U:\\BMP" pdf_files = [os.path.splitext(f)[0] for f in os.listdir(folder_path) if f.endswith('.pdf')]
for EachFile in pdf_files:
search_value = EachFile
print(EachFile)
search_result = df[df['Item Number 02'] == search_value]
# Find the corresponding value in the "Name" column of the same w
if not search_result.empty:
print("Found in JDE")
Revision = search_result['Rev'].values[0]
Description = search_result['Item Description 01'].values[0]
FileName = "U:\\BMP\\" + search_value + ".pdf"
# Get the file from BMP Folder
file_in = open(FileName, 'rb')
pdf_reader = PdfReader(file_in)
if pdf_reader.is_encrypted:
print("Encrypted")
continue
metadata = pdf_reader.metadata
# Adding entire existing file to the new file created
pdf_merger = PdfMerger()
pdf_merger.append(file_in)
pdf_merger.add_metadata({
'/Revision': Revision,
'/Description': Description
})
file_out = open("S:\\USERS\\VINTON\\BMP-Rev\\" + search_value ".pdf", 'wb')
pdf_merger.write(file_out)
file_in.close()
file_out.close()
print("All Done!!")
I cannot figure out how to overcome assertion errors because the error is shown to have occurred in several layers below the simplified syntax.
There is "+" sign missing in this line before ".pdf"
file_out = open("S:\USERS\VINTON\BMP-Rev\" + search_value ".pdf", 'wb')
try this:
file_out = open("S:\USERS\VINTON\BMP-Rev\" + search_value + ".pdf", 'wb')
hope it works
Use try and except statements when reading or merging pdf files to throw the exception messages if failed. It's always a good practice to throw errors and exceptions when working with files or memory for development purposes.
import os
import pandas as pd
from PyPDF2 import PdfReader, PdfMerger
df = pd.read_excel('S:\\USERS\\VINTON\\F001A - Item Master (Stock and Cost)- 270001.xlsx')
folder_path = "U:\\BMP"
pdf_files = [os.path.splitext(f)[0] for f in os.listdir(folder_path) if f.endswith('.pdf')]
for EachFile in pdf_files:
search_value = EachFile
print(EachFile)
search_result = df[df['Item Number 02'] == search_value]
# Find the corresponding value in the "Name" column of the same w
if not search_result.empty:
print("Found in JDE")
Revision = search_result['Rev'].values[0]
Description = search_result['Item Description 01'].values[0]
FileName = "U:\\BMP\\" + search_value + ".pdf"
# Get the file from BMP Folder
file_in = open(FileName, 'rb')
try:
pdf_reader = PdfReader(file_in)
if pdf_reader.is_encrypted:
print("Encrypted")
continue
metadata = pdf_reader.metadata
# Adding entire existing file to the new file created
pdf_merger = PdfMerger()
pdf_merger.append(file_in)
pdf_merger.add_metadata({
'/Revision': Revision,
'/Description': Description
})
except Exception as e:
print(e)
file_out = open("S:\\USERS\\VINTON\\BMP-Rev\\" + search_value ".pdf", 'wb')
pdf_merger.write(file_out)
file_in.close()
file_out.close()
print("All Done!!")

Name different files from another file having different names respectively

I have created a script to download multiple images. I have another file (linkVars.py) in which there are URLs of the images to download. This script import the linkVars.py file then reads one URL at a time, downloads that image from the URL, and writes it into a file named {file_name}.jpg
Below is the code for the explanation of upper lines:
import linksVars as lV # file with urls
def download_url(url):
# Creating a function
print(f"\nDownloading from: ", url)
file_name_start_pos = url.rfind("=") + 1 # naming image by using text in url
name_from_url = url[file_name_start_pos:]
file_name = name_from_url
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
# Opening the image file to write data in it
with open(f'{file_name}.jpg', 'wb') as f:
for data in r:
f.write(data)
Now, I have multiple names written in name_file.txt(external file). As I download the image, I want to name file_name in {file_name}.jpg from one name in name_file.txt. Then as the code starts to download the next file, the next name in name_file.txt should be assigned to {file_name}.jpg If someone could help me then I will be grateful!
Below is the complete code:
import requests
import linksVars as lV
def download_url(url):
print(f"\nDownloading from: ", url)
file_name_start_pos = url.rfind("=") + 1
name_from_url = url[file_name_start_pos:]
file_name = name_from_url
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(f'{file_name}.jpg', 'wb') as f:
for data in r:
f.write(data)
links = lV.List1
try:
for listLinks in links:
download_url(listLinks)
except(KeyboardInterrupt):
print("\n\n===> Script ended by USER! <===")
Try this:
import requests
import linksVars as lV # Importing file with URLs stored in variables
import nameVars as nV # Importing file with names stored in variables
links = lV.List1 # List1 is the list of URLs stored in variables
names = nV.Name1 # Name1 is the list of names stored in variables
# This function will download image from URL and name it from Name1
def download_url(url, names):
print(f"\nDownloading from: ", url)
file_name_start_pos = url.rfind("v=") + 1 # It will find "v=" in given URL and move to next line
name_from_url = url[file_name_start_pos:]
file_name = names
r = requests.get(url, stream=True)
if r.status_code == requests.codes.ok:
with open(f'{file_name}.jpg', 'wb') as f: # Downloaded file will opened and named
for data in r:
f.write(data)
try:
for listLinks, listNames in zip (links, names): # "For loop" will use two arguments
download_url(listLinks, listNames)
except(KeyboardInterrupt):
print("\n\n===> Script ended by USER! <===")

How to down load image form the text file and give the name in the text file

I have a text file that contains several ids and links associate with that id, the link represents the images. now I want to download this image using this link and name the downloaded image with the corresponding ID, how to do this in python.
below is the same of the first lines in the text
000000001,[image_link]
import os.path
import urllib.request
links = open('/content/photos/photos.txt', 'r')
for link in links:
link = link.strip()
name = link.rsplit('/', 1)[-1]
filename = os.path.join('downloads', name)
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(link, filename)
except Exception as inst:
print(inst)
print(' Encountered unknown error. Continuing.')
here the solution.
import os.path
import urllib.request
links = open('./photos.txt', 'r')
for link in links:
link = link.strip()
name ,link = link.rsplit(',',-1) # name of file
filename = os.path.join('downloads', name)
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(link, filename)
except Exception as inst:
print(inst)
print(' Encountered unknown error. Continuing.')
just update the split. make sure you have the save path present there.
I guess you have two problems here:
the way you are splitting the line, since the line is comma separated it should be:
name ,link = line.split(',')
your downloads directory doesn't exist yet, you should create it with:
os.makedirs('downloads', exist_ok=True)
Here is my code example:
import os
import urllib.request
images_dir = 'downloads'
os.makedirs(images_dir, exist_ok=True)
with open('/content/photos/photos.txt', 'r') as f:
lines = f.readlines()
for line in lines:
name, link = line.split(',')
filename = os.path.join(images_dir, name + '.jpg')
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
urllib.request.urlretrieve(link, filename)
except Exception as inst:
print(inst)

Get python code to persist after IndexError

I am querying an API from a website. The API will be down for maintenance from time to time and also, there may not be data available for querying at times. I have written the code to keep forcing the program to query the API even after an error, however it doesn't seem to be working.
The following is the code:
import threading
import json
import urllib
from urllib.parse import urlparse
import httplib2 as http #External library
import datetime
import pyodbc as db
import os
import gzip
import csv
import shutil
def task():
#Authentication parameters
headers = { 'AccountKey' : 'secret',
'accept' : 'application/json'} #this is by default
#API parameters
uri = 'http://somewebsite.com/' #Resource URL
path = '/something/TrafficIncidents?'
#Build query string & specify type of API call
target = urlparse(uri + path)
print(target.geturl())
method = 'GET'
body = ''
#Get handle to http
h = http.Http()
#Obtain results
response, content = h.request(target.geturl(), method, body, headers)
api_call_time = datetime.datetime.now()
filename = "traffic_incidents_" + str(datetime.datetime.today().strftime('%Y-%m-%d'))
createHeader = 1
if os.path.exists(filename + '.csv'):
csvFile = open(filename + '.csv', 'a')
createHeader = 0
else:
#compress previous day's file
prev_filename = "traffic_incidents_" + (datetime.datetime.today()-datetime.timedelta(days=1)).strftime('%Y-%m-%d')
if os.path.exists(prev_filename + '.csv'):
with open(prev_filename + '.csv' , 'rb') as f_in, gzip.open(prev_filename + '.csv.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(prev_filename + '.csv')
#create new csv file for writing
csvFile = open(filename + '.csv', 'w')
#Parse JSON to print
jsonObj = json.loads(content)
print (json.dumps(jsonObj, sort_keys=True, indent=4))
with open("traffic_incidents.json","w") as outfile:
#Saving jsonObj["d"]
json.dump(jsonObj, outfile, sort_keys=True, indent=4,ensure_ascii=False)
for i in range(len(jsonObj["value"])):
jsonObj["value"][i]["IncidentTime"] = jsonObj["value"][i]["Message"].split(' ',1)[0]
jsonObj["value"][i]["Message"] = jsonObj["value"][i]["Message"].split(' ',1)[1]
jsonObj["value"][i]["ApiCallTime"] = api_call_time
#Save to csv file
header = jsonObj["value"][0].keys()
csvwriter = csv.writer(csvFile,lineterminator='\n')
if createHeader == 1:
csvwriter.writerow(header)
for i in range(len(jsonObj["value"])):
csvwriter.writerow(jsonObj["value"][i].values())
csvFile.close()
t = threading.Timer(120,task)
t.start()
while True:
try:
task()
except IndexError:
pass
else:
break
I get the following error and the program stops:
"header = jsonObj["value"][0].keys()
IndexError: list index out of range"
I would like the program to keep running even after the IndexError has occured.
How can I edit the code to achieve that?

Iterate a piece of code over a set of files

I have written a piece of code which lets me extract the table from a file named 195775.html. I save the output in a text file. Now I need to iterate this code for all the 20,000 files which I have in the same directory. In addition, I also want the files to be tagged with their respective file names. i.e. each file should have a column (in the table) which takes the filename as its value. Also, I want the output text files to be named as per the input files (i.e. the names should match).
Here is my code:
import urllib2
import os
import time
import traceback
from bs4 import BeautifulSoup
outfile= open('C:/Users/Manvendra/Dropbox/Python/195775.txt','wb')
rfile = open('C:/Users/Manvendra/Dropbox/PRI/Data/AP/195775.html')
rsoup = BeautifulSoup(rfile)
nodes = rsoup.find('div',{'class':'frmhdtitle'})
if nodes!= None:
#print "div present"
x = nodes.findNext('table')
if x!= None:
#print "table present"
y = x.find('tbody')
if y!= None:
#print "tbody present"
z= y.findAll('tr')
if z!= None:
#print "tr present"
for wx in z[1:]:
num= wx.find('td').get_text()
print num
name= wx.find('td').findNext('td').get_text()
print name
age = wx.find('td').findNext('td').findNext('td').get_text()
print age
caste= wx.find('td').findNext('td').findNext('td').findNext('td').get_text()
print caste
gender= wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
print gender
quali = wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
print quali
occu = wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
print occu
#email = wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
#print email
#ward = wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
#print ward
resr = wx.find('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').findNext('td').get_text()
print resr
outfile.write(str(num) +"\t" + str(name) +"\t" + str(age) +"\t" + str (caste) +"\t" + str(quali) +"\t" + str(occu) + "\t" + str(resr) + str(infile) +"\n")
outfile.close()
Put your code into a separate function and call it for each html file in the directory:
#!/usr/bin/env python2
import os
from glob import glob
dest_dir = 'C:/Users/Manvendra/Dropbox/Python'
for html_filename in glob('C:/Users/Manvendra/Dropbox/PRI/Data/AP/*.html'):
basename = os.path.splitext(os.path.basename(html_filename))[0]
with open(html_filename, 'rb') as html_file, \
open(os.path.join(dest_dir, basename + '.txt'), 'wb') as csv_file:
html2csv(html_file, csv_file)
where html2csv() is:
import logging
from bs4 import BeautifulSoup
log = logging.getLogger(__name__)
def html2csv(html_file, csv_file):
writerow = csv.writer(csv_file, dialect=csv.excel_tab).writerow
div = BeautifulSoup(html_file).find('div', 'frmhdtitle')
try:
rows = div.find_next('table').tbody.find_all('tr')[1:]
except AttributeError:
log.warning("No info in %s file", html_file.name)
else:
for tr in rows:
writerow([td.get_text().encode('utf-8')
for td in tr.find_all('td')[:8]] + [html_file.name])
Note: findNext('td') method in your code searches the html document without any regard for elements boundaries i.e., it may find td that belongs to a different row or even a different table as long as it is further in the document. I rewrote the loop assuming that you want to find eight adjacent <td> elements in each row.
Do something like this:
files = os.listdir(directoryPath)
for file in files:
*your code*
Note that if you want to open the files you need to open the path: directoryPath + "/" + file.
Regarding all the tags and filenames you want to name the files, "file" is now a variable which contains the name of the file you are now processing so do with it what you want.

Categories