execfile issue with the filename argument in python - python

I have two python scripts. One match script, the other is regex script. I want to start with my matchParser.py script and then go to regexParser.py. I want that my regexParser knows the filename of the matchParser and continue to use. I hope I could explain it clearly. I tried a lot, but unfortunately without success.
My OutputError: TypeError: coercing to Unicode: need string or buffer, file found
matchParser.py
import glob
intfiles = glob.glob("C:\Users\x\Desktop\x\*.csv")
header_saved = False
with open('outputdre121.csv','wb') as fout:
for filename in intfiles:
with open(filename) as fin:
header = next(fin)
if not header_saved:
fout.write(header)
header_saved = True
for line in fin:
fout.write(line)
print "start next script: regexParser.py"
execfile("regexParser.py")
regexParser.py
import re
import matchParser
lines = [line.strip() for line in open(matchParser.fout)] ## here the filename from MatchParser.py
with open('outputregexdre13.csv', "w") as output_csv:
for result in lines:
match = re.search(r'((\s\d+?[1-9])!?[ ])', result)
if match: output_csv.write(match.group(0) + '\n')
thanks!

I have found the solution:
Very simple actually ..
I add to matchParser.fout the .name method
lines = [line.strip() for line in open(matchParser.fout.name)]

Related

How to replace a line that has a specific text

I want to search for particular text and replace the line if the text is present in that line.
In this code I replace line 125, but want to replace dynamically according to the text:
file = open("config.ini", "r")
lines = file.readlines()
lines[125] = "minimum_value_gain = 0.01" + '\n'
f.writelines(lines)
f.close()
How do I make it that if a line has:
minimum_value_gain =
then replace that line with:
minimum_value_gain = 0.01
There is no reason for you to manually parse a config.ini file textually. You should use configparser to make things much simpler. This library reads the file for you, and in a way converts it to a dict so processing the data is much easier. For your task you can do something like:
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
for section in config:
if config.has_option(section, "minimum_value_gain"):
config.set(section, "minimum_value_gain", "0.01")
with open("config.ini", 'w') as f:
config.write(f)
Since you are replacing complete line so if statement will do the trick for you, no need to replace text
#updated make sure one line doesn't have both values
file = open("config.ini", "r")
lines=file.readlines()
newlines = []
for line in lines:
if "minimum_value_gain" in line:
line = "minimum_value_gain = 0.01" + '\n'
if "score_threshold" in line:
line = "Values you want to add"+'\n'
newlines.append(line)
f.writelines(newlines)
f.close()
Little bit messy and not optimized but get's the job the, first readlines and inserts the next_text to the given pos(line). If the line doesn't exists Raises IndexError, else writes to the file
def replace_in_file(filename: str, search_text: str, string_to_add: str) -> None:
with open(filename, "r+") as file_to_write:
lines = file_to_write.readlines()
file_to_write.seek(0)
file_to_write.truncate()
for line in lines:
if line.startswith(search_text):
line = line.rstrip("\n") + string_to_add + "\n"
file_to_write.write(line)
replace_in_file("sdf.txt", "minimum_value_gain", " = 0.01")
You can use also the regex library of Python.
Here is an example.
It is better not to read and write in the same file, that is not good practice. Write in a different file then eventually rename it.
import re
pattern = 'minimum_value_gain'
string_to_replace = 'minimum_value_gain = 0.01\n'
file = open("config.ini", "r")
fileout = open("new_config.ini", "a")
lines=file.readlines()
newlines = [string_to_replace if re.match(pattern, line) else line for line in lines]
f.close()
fileout.writelines(lines)
fileout.close()
You can rename the file afterwards :
import os
os.remove("config.ini")
os.rename("new_config.ini", "config.ini")
Set the string you would like to look for (match_string = 'example')
Have a list output_list that is empty
Use with open(x,y) as z: (this will automatically close the file after completion)
for each line in file.readlines() - run through each line of the file
The if statement adds your replacement line if the match_string is in the line, else just the adds the line
NOTE: All variables can be any name that is not reserved (don't call something just 'list')
match_string = 'example'
output_list = []
with open("config.ini", "r") as file:
for line in file.readlines():
if match_string in line:
output_list.append('minimum_value_gain = 0.01\n')
else:
output_list.append(line)
Maybe not ideal for the first introduction to Python (or more readable) - But I would have done the problem as follows:
with open('config.ini', 'r') as in_file:
out_file = ['minimum_value_gain = 0.01\n' if 'example' in line else line for line in in_file.readlines()]
To replace a specific text in a string
a = 'My name is Zano'
b = a.replace('Zano', 'Zimmer')

Check if a variable string exist in a text file

So guys, i'm tryng to make a password generator but i'm having this trouble:
First, the code i use for tests:
idTest= "TEST"
passwrd= str(random.randint(11, 99))
if not os.path.exists('Senhas.txt'):
txtFileW = open('Senhas.txt', 'w')
txtFileW.writelines(f'{idTest}: {passwrd}\n')
txtFileW.close()
else:
txtFileA = open('Senhas.txt', 'a')
txtFileA.write(f'{idTest}: {passwrd}\n')
txtFileA.close()
print(f'{idTest}: {passwrd}')
Well, what i'm expecting is something like this:
else:
with open('Senhas.txt', 'r+') as opened:
opened.read()
for lines in opened:
if something == idTest:
lines.replace(f'{something}', f'{idTest}')
else:
break
txtFileA = open('Senhas.txt', 'a')
txtFileA.write(f'{idTest}: {passwrd}\n')
txtFileA.close()
print(f'{idTest}: {passwrd}')
I've searched for it but all i've found are ways to separate it in 2 files (for my project it doesn't match) or with "static" strings, that doesn't match for me as well.
You can use the fileinput module to update the file in place.
import fileinput
with fileinput.input(files=('Senhas.txt'), inplace=True) as f:
for line in f:
if (line.startswith(idTest+':'):
print(f'{idTest}: {passwrd}')
else:
print(line)

How to add for loop in python?

I'm creating new files from originally existing ones in the mdp folder by changing a couple of lines in those files using python. I need to do this for 1000 files. Can anyone suggest a for loop which reads all files and changes them and creates new in one go?
This way I have to change the the number followed by 'md_' in the path and it's tedious because there are a 1000 files here.
I tried using str() but there was a 'could not read file error'
fin = open("/home/abc/xyz/mdp/md_1.mdp", "rt")
fout = open("/home/abc/xyz/middle/md_1.mdp", "wt")
for line in fin:
fout.write(line.replace('integrator = md', 'integrator
= md-vv'))
fin = open("/home/abc/xyz/middle/md_1.mdp", "rt")
fout = open("/home/abc/xyz/mdb/md_1.mdp", "wt")
for line in fin:
fout.write(line.replace('dt = 0.001', 'dt
= -0.001'))
fin.close()
fout.close()
os.listdir(path) is your friend:
import os
sourcedir = "/home/abc/xyz/mdp"
destdir = "/home/abc/xyz/middle"
for filename in os.listdir(sourcedir):
if not filename.endswith(".mdp"):
continue
source = os.path.join(sourcedir, filename)
dest = os.path.join(destdir, filename)
# with open(xxx) as varname makes sure the file(s)
# will be closed whatever happens in the 'with' block
# NB text mode is the default, and so is read mode
with open(source) as fin, open(dest, "w") as fout:
# python files are iterable... avoids reading
# the whole file in memory at once
for line in fin:
# will only work for those exact strings,
# you may want to use regexps if number of
# whitespaces vary etc
line = line.replace("dt = 0.001", "dt = -0.001")
line = line.replace(
'integrator = md',
'integrator = md-vv'
)
fout.write(line)
Assuming you want to edit all files that are located in the mdp folder you could do something like this.
import os
dir = "/home/abc/xyz/mdp/"
for filename in os.listdir(dir):
with open(dir + filename, "r+") as file:
text = file.read()
text = text.replace("dt = 0.001", "dt = -0.001")
file.seek(0)
file.write(text)
file.truncate()
This will go through every file and change it using str.replace().
If there are other files in the mdp folder that you do not want to edit, you could use and if-statement to check for the correct file name. Add something like this to encase the with open statement.
if filename.startswith("md_")

How to search and replace text in a file?

How do I search and replace text in a file using Python 3?
Here is my code:
import os
import sys
import fileinput
print ("Text to search for:")
textToSearch = input( "> " )
print ("Text to replace it with:")
textToReplace = input( "> " )
print ("File to perform Search-Replace on:")
fileToSearch = input( "> " )
#fileToSearch = 'D:\dummy1.txt'
tempFile = open( fileToSearch, 'r+' )
for line in fileinput.input( fileToSearch ):
if textToSearch in line :
print('Match Found')
else:
print('Match Not Found!!')
tempFile.write( line.replace( textToSearch, textToReplace ) )
tempFile.close()
input( '\n\n Press Enter to exit...' )
Input file:
hi this is abcd hi this is abcd
This is dummy text file.
This is how search and replace works abcd
When I search and replace 'ram' by 'abcd' in above input file, it works as a charm. But when I do it vice-versa i.e. replacing 'abcd' by 'ram', some junk characters are left at the end.
Replacing 'abcd' by 'ram'
hi this is ram hi this is ram
This is dummy text file.
This is how search and replace works rambcd
As pointed out by michaelb958, you cannot replace in place with data of a different length because this will put the rest of the sections out of place. I disagree with the other posters suggesting you read from one file and write to another. Instead, I would read the file into memory, fix the data up, and then write it out to the same file in a separate step.
# Read in the file
with open('file.txt', 'r') as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace('abcd', 'ram')
# Write the file out again
with open('file.txt', 'w') as file:
file.write(filedata)
Unless you've got a massive file to work with which is too big to load into memory in one go, or you are concerned about potential data loss if the process is interrupted during the second step in which you write data to the file.
fileinput already supports inplace editing. It redirects stdout to the file in this case:
#!/usr/bin/env python3
import fileinput
with fileinput.FileInput(filename, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(text_to_search, replacement_text), end='')
As Jack Aidley had posted and J.F. Sebastian pointed out, this code will not work:
# Read in the file
filedata = None
with file = open('file.txt', 'r') :
filedata = file.read()
# Replace the target string
filedata.replace('ram', 'abcd')
# Write the file out again
with file = open('file.txt', 'w') :
file.write(filedata)`
But this code WILL work (I've tested it):
f = open(filein,'r')
filedata = f.read()
f.close()
newdata = filedata.replace("old data","new data")
f = open(fileout,'w')
f.write(newdata)
f.close()
Using this method, filein and fileout can be the same file, because Python 3.3 will overwrite the file upon opening for write.
You can do the replacement like this
f1 = open('file1.txt', 'r')
f2 = open('file2.txt', 'w')
for line in f1:
f2.write(line.replace('old_text', 'new_text'))
f1.close()
f2.close()
You can also use pathlib.
from pathlib2 import Path
path = Path(file_to_search)
text = path.read_text()
text = text.replace(text_to_search, replacement_text)
path.write_text(text)
(pip install python-util)
from pyutil import filereplace
filereplace("somefile.txt","abcd","ram")
Will replace all occurences of "abcd" with "ram".
The function also supports regex by specifying regex=True
from pyutil import filereplace
filereplace("somefile.txt","\\w+","ram",regex=True)
Disclaimer: I'm the author (https://github.com/MisterL2/python-util)
Open the file in read mode. Read the file in string format. Replace the text as intended. Close the file. Again open the file in write mode. Finally, write the replaced text to the same file.
try:
with open("file_name", "r+") as text_file:
texts = text_file.read()
texts = texts.replace("to_replace", "replace_string")
with open(file_name, "w") as text_file:
text_file.write(texts)
except FileNotFoundError as f:
print("Could not find the file you are trying to read.")
Late answer, but this is what I use to find and replace inside a text file:
with open("test.txt") as r:
text = r.read().replace("THIS", "THAT")
with open("test.txt", "w") as w:
w.write(text)
DEMO
With a single with block, you can search and replace your text:
with open('file.txt','r+') as f:
filedata = f.read()
filedata = filedata.replace('abc','xyz')
f.truncate(0)
f.write(filedata)
Your problem stems from reading from and writing to the same file. Rather than opening fileToSearch for writing, open an actual temporary file and then after you're done and have closed tempFile, use os.rename to move the new file over fileToSearch.
My variant, one word at a time on the entire file.
I read it into memory.
def replace_word(infile,old_word,new_word):
if not os.path.isfile(infile):
print ("Error on replace_word, not a regular file: "+infile)
sys.exit(1)
f1=open(infile,'r').read()
f2=open(infile,'w')
m=f1.replace(old_word,new_word)
f2.write(m)
Using re.subn it is possible to have more control on the substitution process, such as word splitted over two lines, case-(in)sensitive match. Further, it returns the amount of matches which can be used to avoid waste of resources if the string is not found.
import re
file = # path to file
# they can be also raw string and regex
textToSearch = r'Ha.*O' # here an example with a regex
textToReplace = 'hallo'
# read and replace
with open(file, 'r') as fd:
# sample case-insensitive find-and-replace
text, counter = re.subn(textToSearch, textToReplace, fd.read(), re.I)
# check if there is at least a match
if counter > 0:
# edit the file
with open(file, 'w') as fd:
fd.write(text)
# summary result
print(f'{counter} occurence of "{textToSearch}" were replaced with "{textToReplace}".')
Some regex:
add the re.I flag, short form of re.IGNORECASE, for a case-insensitive match
for multi-line replacement re.subn(r'\n*'.join(textToSearch), textToReplace, fd.read()), depending on the data also '\n{,1}'. Notice that for this case textToSearch must be a pure string, not a regex!
Besides the answers already mentioned, here is an explanation of why you have some random characters at the end:
You are opening the file in r+ mode, not w mode. The key difference is that w mode clears the contents of the file as soon as you open it, whereas r+ doesn't.
This means that if your file content is "123456789" and you write "www" to it, you get "www456789". It overwrites the characters with the new input, but leaves any remaining input untouched.
You can clear a section of the file contents by using truncate(<startPosition>), but you are probably best off saving the updated file content to a string first, then doing truncate(0) and writing it all at once.
Or you can use my library :D
I got the same issue. The problem is that when you load a .txt in a variable you use it like an array of string while it's an array of character.
swapString = []
with open(filepath) as f:
s = f.read()
for each in s:
swapString.append(str(each).replace('this','that'))
s = swapString
print(s)
I tried this and used readlines instead of read
with open('dummy.txt','r') as file:
list = file.readlines()
print(f'before removal {list}')
for i in list[:]:
list.remove(i)
print(f'After removal {list}')
with open('dummy.txt','w+') as f:
for i in list:
f.write(i)
you can use sed or awk or grep in python (with some restrictions). Here is a very simple example. It changes banana to bananatoothpaste in the file. You can edit and use it. ( I tested it worked...note: if you are testing under windows you should install "sed" command and set the path first)
import os
file="a.txt"
oldtext="Banana"
newtext=" BananaToothpaste"
os.system('sed -i "s/{}/{}/g" {}'.format(oldtext,newtext,file))
#print(f'sed -i "s/{oldtext}/{newtext}/g" {file}')
print('This command was applied: sed -i "s/{}/{}/g" {}'.format(oldtext,newtext,file))
if you want to see results on the file directly apply: "type" for windows/ "cat" for linux:
####FOR WINDOWS:
os.popen("type " + file).read()
####FOR LINUX:
os.popen("cat " + file).read()
I have done this:
#!/usr/bin/env python3
import fileinput
import os
Dir = input ("Source directory: ")
os.chdir(Dir)
Filelist = os.listdir()
print('File list: ',Filelist)
NomeFile = input ("Insert file name: ")
CarOr = input ("Text to search: ")
CarNew = input ("New text: ")
with fileinput.FileInput(NomeFile, inplace=True, backup='.bak') as file:
for line in file:
print(line.replace(CarOr, CarNew), end='')
file.close ()
I modified Jayram Singh's post slightly in order to replace every instance of a '!' character to a number which I wanted to increment with each instance. Thought it might be helpful to someone who wanted to modify a character that occurred more than once per line and wanted to iterate. Hope that helps someone. PS- I'm very new at coding so apologies if my post is inappropriate in any way, but this worked for me.
f1 = open('file1.txt', 'r')
f2 = open('file2.txt', 'w')
n = 1
# if word=='!'replace w/ [n] & increment n; else append same word to
# file2
for line in f1:
for word in line:
if word == '!':
f2.write(word.replace('!', f'[{n}]'))
n += 1
else:
f2.write(word)
f1.close()
f2.close()
def word_replace(filename,old,new):
c=0
with open(filename,'r+',encoding ='utf-8') as f:
a=f.read()
b=a.split()
for i in range(0,len(b)):
if b[i]==old:
c=c+1
old=old.center(len(old)+2)
new=new.center(len(new)+2)
d=a.replace(old,new,c)
f.truncate(0)
f.seek(0)
f.write(d)
print('All words have been replaced!!!')
I have worked this out as an exercise of a course: open file, find and replace string and write to a new file.
class Letter:
def __init__(self):
with open("./Input/Names/invited_names.txt", "r") as file:
# read the list of names
list_names = [line.rstrip() for line in file]
with open("./Input/Letters/starting_letter.docx", "r") as f:
# read letter
file_source = f.read()
for name in list_names:
with open(f"./Output/ReadyToSend/LetterTo{name}.docx", "w") as f:
# replace [name] with name of the list in the file
replace_string = file_source.replace('[name]', name)
# write to a new file
f.write(replace_string)
brief = Letter()
Like so:
def find_and_replace(file, word, replacement):
with open(file, 'r+') as f:
text = f.read()
f.write(text.replace(word, replacement))
def findReplace(find, replace):
import os
src = os.path.join(os.getcwd(), os.pardir)
for path, dirs, files in os.walk(os.path.abspath(src)):
for name in files:
if name.endswith('.py'):
filepath = os.path.join(path, name)
with open(filepath) as f:
s = f.read()
s = s.replace(find, replace)
with open(filepath, "w") as f:
f.write(s)

file crawler OSError

I want this to recursively call the next file, listed in a manually inputted file. It is the first word listed in the file.
The current error messege is:
OSError: [Errno 22] Invalid argument: 'file1.txt\n'.
This is my current code:
import os
def crawl(fname):
infile = open(fname, 'r')
if os.stat(fname)[6]==0:
return "Visiting {}".format(fname)
infile.close()
else:
print ("Visiting {}".format(fname))
lines = infile.read().splitlines()
nextfile = lines[0].strip()
for line in lines:
crawl(nextfile)
Try:
import os
def crawl(fname):
with open(fname, "r") as infile:
print("Visiting {}".format(fname))
if os.stat(fname).st_size:
lines = infile.read().splitlines()
for line in lines:
crawl(line)
I'm pretty sure the problem is that you're getting a newline at the end of the filename you are reading from the first file. You can easily fix it, by using the strip method to remove the newline:
nextfile = lines[0].strip()

Categories