I was trying to make a prophecy bot that generates prophecies from rap and country lyrics, but I keep hitting an error on the generation. The specific error is below. I am pulling the lyrics from a couple thousand line text files divided by individual artist.
The error appears about 80% of the time and with the same position for each artist. I have tried utf8, cp452, and just straight binary but none of them seem to work. I have pasted my code below.
import random
import linecache
artists = ["astronautalis","common","garth brooks","johnny cash","marty robbins","mf doom","nas","willie nelson"]
#def file_len(fname):
# with open(fname) as f:
# for i, l in enumerate(f):
# pass
#return i + 1
def prophecy(futuresize):
if futuresize == "Large":
for x in range(8):
print(x)
randartist = artists[random.randint(0,7)]
print(randartist)
fiellen = 'lyrics/' + randartist + '.txt'
print(fiellen)
file = open('lyrics/' + randartist + '.txt', 'r', encoding='cp1252', errors='ignore')
print(file)
count = 0
count = len(open(fiellen).readlines())
line_to_read = random.randint(0,(count - 1))
line = linecache.getline(fiellen, line_to_read)
print(line)
file.close()
prophecy("Large")
Here are the current files I am working with as well.
Lyric Files
Thanks again for your help everyone!
Related
import os, re
config_file = "jsm_gyro_config.txt"
#fptr = open(config, "w")
#text = "demo text"
#fptr.write(text)
#fptr.close()
file = open(config_file, 'r')
file-read = file.read()
for line in file-read:
if re.search(userinput, file-read):
x = re.search(userinput, file-read)
# iteminputted is what the user wants to replace
iteminputted = "ref"
startpostion = x.span[1] + 3
endpostion = startposition + len(iteminputted)
# Find out how to write to a specific location in a file that will finish this off
else:
print("Item not found")
This is what i've tried and here is my thought process as always any help is appreatated and please make it understandable for an idiot :(
To begin with, you should not use - in your variable declarations as it is actually an operator and will always be treated as such. It will attempt to subtract.
Here is the same code with that fixed and also with the input
import os, re
config_file = "jsm_gyro_config.txt"
#fptr = open(config, "w")
#text = "demo text"
#fptr.write(text)
#fptr.close()
file = open(config_file, 'r')
file_read = file.read()
file.close() # You should always close your files.
for line in file_read:
if re.search(userinput, file_read):
x = re.search(userinput, file_read)
# iteminputted is what the user wants to replace
iteminputted = input("Input what you would like to replace > ")
startpostion = x.span[1] + 3
endpostion = startposition + len(iteminputted)
# Find out how to write to a specific location in a file that will finish this off
else:
print("Item not found")
However your question is very unclear, I did the best I could.
I am trying to download few videos from list of links.
Every line in text file is one link.
When I try to download all videos in loop, only first one is working.
Videos are from 60 - 100 MB.
Loop continues afterwards, but files are empty.
Thank you for help.
def download():
name = 'video'
a = 1
with open('download.txt') as f:
lines = f.readlines()
for line in lines:
url = line
response = requests.get(url, stream=True)
name = name + str(a)
filename = name + '.mp4'
with open(filename, 'wb') as f:
f.write(response.content)
a = a + 1
def download():
name = 'video'
a = 1
with open('download.txt') as f:
lines = f.readlines()
for line in lines:
urllib.request.urlretrieve(line.strip(), name + str(a) + ".mpg")
a += 1
This code worked for me.
Depending on your purpose, you may want to account for security, robustness (what happens if one download fails?), performance (concurrency?).
My code attempts to create a folder which then downloads a pdf to the corresponding folder. In my current code the variable and counter "i" keeps track of which folder to download to but it seems to not be updating for some reason. At the end of the else if statement I want the variable i to increase by 1. Not understanding what the issue is here, I'm fairly new to python and if a similar situation was coded in java I know this would work just file but not sure why it's not working in python.
import re
import os
import urllib
f = open("newfile.txt")
suffix = '.pdf'
for line in f:
i = 0
folderopt = str(i)
if suffix in line:
print('download')
url = line.rstrip('\n')
pdfname = url.split('LTN',1)[1]
print ('download to:'+'/Users/user/Desktop/PDF/'+folderopt+'/'+pdfname)
urllib.urlretrieve(url,'/Users/user/Desktop/PDF/'+folderopt+'/'+pdfname)
elif line>i:
filename = line.rstrip('\n')
print ('code:'+filename)
os.mkdir('/Users/user/Desktop/PDF/'+filename)
global i
i = i+1
f.close
EDIT: I put the variable outside of the for loop still getting this
IOError: [Errno 2] No such file or directory: '/Users/user/Desktop/PDF/0/20160412398.pdf'
the i count has not increased even though the folder /Users/user/Desktop/PDF/1 was created.
changed elif statement to
elif int(line.rstrip('\n'))>i
still not working
You set i to 0 in the loop,try this:
import re
import os
import urllib
f = open("newfile.txt")
suffix = '.pdf'
i = 0
for line in f:
folderopt = str(i)
if suffix in line:
print('download')
url = line.rstrip('\n')
pdfname = url.split('LTN',1)[1]
print ('download to:'+'/Users/user/Desktop/PDF/'+folderopt+'/'+pdfname)
urllib.urlretrieve(url,'/Users/user/Desktop/PDF/'+folderopt+'/'+pdfname)
elif line>i:
filename = line.rstrip('\n')
print ('code:'+filename)
os.mkdir('/Users/user/Desktop/PDF/'+filename)
global i
i+= 1
f.close
Thats because the first line in your loop sets i to 0.
Try
i = 0
for line in f:
instead of
for line in f:
i = 0
EDIT:
Also, the i+=1 should be outside the elif condition, like so
elif:
...
i+=1
not
elif:
...
i+=1
I'm converting text directly to epub and I'm having a problem automatically splitting the HTML book file into separate header/chapter files. At the moment, the code below partially works but only creates every other chapter file. So half the header/chapter files are missing from the output. Here is the code:
def splitHeaderstoFiles(fpath):
infp = open(fpath, 'rt', encoding=('utf-8'))
for line in infp:
# format and split headers to files
if '<h1' in line:
#-----------format header file names and other stuff ------------#
# create a new file for the header/chapter section
path = os.getcwd() + os.sep + header
with open(path, 'wt', encoding=('utf-8')) as outfp:
# write html top meta headers
outfp = addMetaHeaders(outfp)
# add the header
outfp = outfp.write(line)
# add the chapter/header bodytext
for line in infp:
if '<h1' not in line:
outfp.write(line)
else:
outfp.write('</body>\n</html>')
break
else:
continue
infp.close()
The problem occurs in the second 'for loop' at the bottom of the code, when I look for the next h1 tag to stop the split. I cannot use seek() or tell() to rewind or move back one line so the program can find the next header/chapter on the next iteration. Apparently you cannot use these in python in a for loop containing an implicit iter or next object in operation. Just gives a 'can't do non-zero cur-relative seeks' error.
I've also tried the while line != ' ' + readline() combination in the code which also gives the same error as above.
Does anyone know an easy way to split HTML headers/chapters of varying lengths into separate files in python? Are there any special python modules(such as pickles) that could help make this task easier?
I'm using Python 3.4
My grateful thanks in advance for any solutions to this problem...
I ran into similar problem a while ago, here is a simplified solution:
from itertools import count
chapter_number = count(1)
output_file = open('000-intro.html', 'wb')
with open('index.html', 'rt') as input_file:
for line in input_file:
if '<h1' in line:
output_file.close()
output_file = open('{:03}-chapter'.format(next(chapter_number)), 'wb')
output_file.write(line)
output_file.close()
In this approach, the first block of text leading to the first h1 block is written into 000-intro.html, the first chapter will be written into 001-chapter.html and so on. Please modify it to taste.
The solution is a simple one: Upon encountering the h1 tag, close the last output file and open a new one.
You are looping over your input file twice, which is likely causing your problems:
for line in infp:
...
with open(path, 'wt', encoding=('utf-8')) as outfp:
...
for line in infp:
...
Each for is going to have it's own iterator, so you are going to loop over the file many times.
You might try transforming your for loop into a while so you're not using two different iterators:
while infp:
line = infp.readline()
if '<h1' in line:
with open(...) as outfp:
while infp:
line = infp.readline()
if '<h1' in line:
break
outfp.writeline(...)
Alternatively, you may wish to use an HTML parser (i.e., BeautifulSoup). Then you can do something like what is described here: https://stackoverflow.com/a/8735688/65295.
Update from comment - essentially, read the entire file all at once so you can freely move back or forward as necessary. This probably won't be a performance issue unless you have a really really big file (or very little memory).
lines = infp.readlines() # read the entire file
i = 0
while i < len(lines):
if '<h1' in lines[i]:
with open(...) as outfp:
j = i + 1
while j < len(lines):
if '<h1' in lines[j]:
break
outfp.writeline(lines[j])
# line j has an <h1>, set i to j so we detect the it at the
# top of the next loop iteration.
i = j
else:
i += 1
I eventually found the answer to the above problem. The code below does alot more that just get the file header. It also simultaneously loads two parallel list arrays with formatted file name data(with extension) and pure header name data respectively so I can use these lists to fill in the and formatted filename extension in these html files within a while loop in one hit. The code now works well and is shown below.
def splitHeaderstoFiles(dir, inpath):
count = 1
t_count = 0
out_path = ''
header = ''
write_bodytext = False
file_path_names = []
pure_header_names = []
inpath = dir + os.sep + inpath
with open(inpath, 'rt', encoding=('utf-8')) as infp:
for line in infp:
if '<h1' in line:
#strip html tags, convert to start caps
p = re.compile(r'<.*?>')
header = p.sub('', line)
header = capwords(header)
line_save = header
# Add 0 for count below 10
if count < 10:
header = '0' + str(count) + '_' + header
else:
header = str(count) + '_' + header
# remove all spaces + add extension in header
header = header.replace(' ', '_')
header = header + '.xhtml'
count = count + 1
#create two parallel lists used later
out_path = dir + os.sep + header
outfp = open(out_path, 'wt', encoding=('utf-8'))
file_path_names.insert(t_count, out_path)
pure_header_names.insert(t_count, line_save)
t_count = t_count + 1
# Add html meta headers and write it
outfp = addMainHeaders(outfp)
outfp.write(line)
write_bodytext = True
# add header bodytext
elif write_bodytext == True:
outfp.write(line)
# now add html titles and close the html tails on all files
max_num_files = len(file_path_names)
tmp = dir + os.sep + 'temp1.tmp'
i = 0
while i < max_num_files:
outfp = open(tmp, 'wt', encoding=('utf-8'))
infp = open(file_path_names[i], 'rt', encoding=('utf-8'))
for line in infp:
if '<title>' in line:
line = line.strip(' ')
line = line.replace('<title></title>', '<title>' + pure_header_names[i] + '</title>')
outfp.write(line)
else:
outfp.write(line)
# add the html tail
if '</body>' in line or '</html>' in line:
pass
else:
outfp.write(' </body>' + '\n</html>')
# clean up
infp.close()
outfp.close()
shutil.copy2(tmp, file_path_names[i])
os.remove(tmp)
i = i + 1
# now rename just the title page
if os.path.isfile(file_path_names[0]):
title_page_name = file_path_names[0]
new_title_page_name = dir + os.sep + '01_Title.xhtml'
os.rename(title_page_name, new_title_page_name)
file_path_names[0] = '01_Title.xhtml'
else:
logmsg27(DEBUG_FLAG)
os._exit(0)
# xhtml file is no longer needed
if os.path.isfile(inpath):
os.remove(inpath)
# returned list values are also used
# later to create epub opf and ncx files
return(file_path_names, pure_header_names)
#Hai Vu and #Seth -- Thanks for all your help.
I am having problems deleting a specific line/entry within a text file. With the code I have the top line in the file is deleted no matter what line number I select to delete.
def erase():
contents = {}
f = open('members.txt', 'a')
f.close()
f = open('members.txt', 'r')
index = 0
for line in f:
index = index + 1
contents[index] = line
print ("{0:3d}) {1}".format(index,line))
f.close()
total = index
entry = input("Enter number to be deleted")
f = open('members.txt', 'w')
index = 0
for index in range(1,total):
index = index + 1
if index != entry:
f.write(contents[index])
Try this:
import sys
import os
def erase(file):
assert os.path.isfile(file)
with open(file, 'r') as f:
content = f.read().split("\n")
#print content
entry = input("Enter number to be deleted:")
assert entry >= 0 and entry < len(content)
new_file = content[:entry] + content[entry+1:]
#print new_file
with open(file,'w') as f:
f.write("\n".join(new_file))
if __name__ == '__main__':
erase(sys.argv[1])
As already noted you were starting the range from 1 which is incorrect. List slicing which I used in new_file = content[:entry] + content[entry+1:] makes the code more readable and it is an approach less prone to similar errors.
Also you seem to open and close the input file at the beginning for no reason. Also you should use with if possible when doing operations with files.
Finally I used the join and split to simplify the code so you don't need a for loop to process the lines of the file.