I Have files in a directory that have many lines like this
cd /oasis/projects/nsf/ets100/
oconv /oasis/projects/nsf/ets100/rla
I would like to insert the word "sky/" so the second line reads like this
oconv /oasis/projects/nsf/sky/ets100/rla
What is the best way to do this?
I know that the beginning of the code will be something like this:
path = r"c:\test"
for root, subFolders, files in os.walk(path):
for file in files:
with open(os.path.join(root, file), 'r') as fRead:
if line.startswith("oconv"):
You can do something like this
def replace_line(file):
with open(os.path.abspath(file), 'r') as file_obj:
data = file_obj.readlines()
for index, lines in enumerate(data):
if lines.startswith('oconv'):
data[index] = lines.replace('/nsf/', '/nsf/sky/')
with open(os.path.abspath(file), 'w') as file_obj:
file_obj.writelines(data)
path = r"c:\test"
for root, subFolders, files in os.walk(path):
for file in files:
replace_line(file)
You can use regex to update the string:
import os
import re
path = r"c:\test"
for root, subFolders, files in os.walk('test'):
for file in files:
with open(os.path.join(root, file), 'r+') as fh: #open the file with 'r+' to read and write
content = fh.read()
new_content = re.sub(r'oconv \/oasis\/projects\/nsf\/(.*)', r'oconv /oasis/projects/nsf/sky/\1', content) #replace strings in file
fh.seek(0)
fh.truncate()
fh.write(new_content)
Related
I have text files in multiple folders(folder names are the names of categories/labels). I want to generate a csv file(dataset) that also has a column as the label(folder name) of that category of text.
import csv
import os
folder = os.path.dirname("/home/jaideep/Desktop/folder/ML DS/Csv/Datasets/")
folder_list = os.listdir(folder)
with open("/home/jaideep/Desktop/folder/ML DS/Csv/data.csv", "w") as outfile:
writer = csv.writer(outfile)
writer.writerow(['Label', 'Email','Message'])
for f in folder_list:
file_list = os.listdir(folder+"/"+f+"/")
print(file_list)
for file in file_list:
with open(file, "r") as infile:
contents = infile.read()
outfile.write(f+',')
outfile.write(contents)
But I'm getting
File "/home/jaideep/Desktop/folder/ML DS/Csv/Main.py", line 15, in <module>
with open(file, "r") as infile:
FileNotFoundError: [Errno 2] No such file or directory: 'file2.txt'
I know there are similar questions previously asked, but I couldn't file solution to my issue. Any help would be appreciated, thanks.
os.listdir only lists the filenames of a directory, so you need to reconstruct the path.
You may want to check out glob for that matter.
This version should solve your issue.
import csv
import os
folder = os.path.dirname("/home/jaideep/Desktop/folder/ML DS/Csv/Datasets/")
folder_list = os.listdir(folder)
with open("/home/jaideep/Desktop/folder/ML DS/Csv/data.csv", "w") as outfile:
writer = csv.writer(outfile)
writer.writerow(['Label', 'Email','Message'])
for f in folder_list:
file_list = os.listdir(os.path.join(folder, f))
print(file_list)
for file in file_list:
with open(os.path.join(folder, f, file), "r") as infile:
contents = infile.read()
outfile.write(f+',')
outfile.write(contents)
I have a folder consisting of 7 files, each having several text files inside. I intend to read through them and write each of those nested text files into a single file called ZebraAllRaw.txt. In the end, there must be only one single file containing all the text files that existed in each of those 7 files.
This is the function I have written:
def CombineFiles(folder):
with open('D:/ZebraAllRaw.txt', 'a', encoding="utf-8") as OutFile:
for root, dirs, files in os.walk(folder, topdown= False):
for filename in files:
file_path = os.path.join(root, filename)
with open(file_path, 'r', encoding="utf-8") as f:
content = f.read()
new_content = content.replace('\n', '')
OutFile.write(new_content + "\n")
However, it seems that all the content is written into the new file 9 times, as if it had read through them more than expected.
make sure you con't append the files from different runs.
I only replaced the file mode append with write at the open
def CombineFiles(folder):
with open('D:/ZebraAllRaw.txt', 'w', encoding="utf-8") as OutFile: # mode "w", not "a"
for root, dirs, files in os.walk(folder, topdown= False):
for filename in files:
file_path = os.path.join(root, filename)
with open(file_path, 'r', encoding="utf-8") as f:
content = f.read()
new_content = content.replace('\n', '')
OutFile.write(new_content + "\n")
I try writing a script that counts the lines of code in a project.
The problem I have is it doesn't find all files.
The script looks like this:
import os
root = r"C:\Users\username\data\projects\test"
allLines = []
for path, subdirs, files in os.walk(root):
for name in files:
filepath = os.path.join(path, name)
if not filepath.endswith( ('.cs','.vb') ):
break
with open(filepath) as f:
lines = f.read().splitlines()
for line in lines:
allLines.append(line)
print(len(allLines))
What's wrong with the code?
In your case the issue is the break, if the file doesn't end with .cs or .vb you just skip the directory, you need to change it for continue as follows:
import os
root = r"C:\Users\frank\Daten\Projekte\CS\mpv.net"
allLines = []
for path, subdirs, files in os.walk(root):
for name in files:
filepath = os.path.join(path, name)
if not filepath.endswith( ('.cs','.vb') ):
continue
with open(filepath) as f:
lines = f.read().splitlines()
for line in lines:
allLines.append(line)
print(len(allLines))
This code can also receive improvements:
import os
root = r"C:\Users\frank\Daten\Projekte\CS\mpv.net"
allLines = 0
for path, subdirs, files in os.walk(root):
for name in files:
if not filepath.endswith( ('.cs','.vb') ):
continue
filepath = os.path.join(path, name)
with open(filepath) as f:
lines += len(f.read().splitlines())
print(allLines)
I have a folder with multiple files like so:
1980
1981
1982
In each of these files is some text. I want to loop through each of these files and do some operation to each file then save the edited file to another folder and move onto the next file and so on. The result would be that I have the original folder and then another folder with the edited version of each file in it like so:
1980_filtered
1981_filtered
1982_filtered
Is it possible to do this?
Currently I have some code that loops through the files in a folder, does some filtering to each file and then saves all the edits of each file into one massive file. Here is my code:
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/mouse'
for root, dir, files in os.walk(input_location):
for file in files:
os.chdir(input_location)
with open(file, 'r') as f, open('NLTK-stop-word-list', 'r') as f2:
mouse_file = f.read().split() # reads file and splits it into a list
stopwords = f2.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
with open(output_location, 'a') as output_file:
output_file.write(x)
Any help would be greatly appreciated!
You need to specify what each new file is called. To do so, Python has some good string formatting methods. Fortunately, your new desired file names are easy to do in a loop
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/mouse'
for root, dir, files in os.walk(input_location):
for file in files:
new_file = "{}_filtered.txt".format(file)
os.chdir(input_location)
with open(file, 'r') as f, open('NLTK-stop-word-list', 'r') as f2:
mouse_file = f.read().split()
stopwords = f2.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
with open(output_location+'/'+new_file, 'w') as output_file: # Changed 'append' to 'write'
output_file.write(x)
If you're in Python 3.7, you can do
new_file = f"{file}_filtered.txt"
and
with open(f"{output_location}/{new_file}", 'w') as output_file:
output_file.write(x)
First of all you should start by opening the NLTK-stop-word-list only once, so I moved it outside of your loops. Second, os.chdir() is redundant, you can use os.path.join() to get your current file path (and to construct your new file path):
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/'
stop_words_path = 'C:/Users/User/Desktop/NLTK-stop-word-list.txt'
with open(stop_words_path, 'r') as stop_words:
for root, dirs, files in os.walk(input_location):
for name in files:
file_path = os.path.join(root, name)
with open(file_path, 'r') as f:
mouse_file = f.read().split() # reads file and splits it into a list
stopwords = stop_words.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
new_file_path = os.path.join(output_location, name) + '_filtered'
with open(new_file_path, 'a') as output_file:
output_file.write(x)
P.S: I took the liberty to change some of your variable names as they were part of python's built in words ('file' and 'dir'). If you'll run __builtins__.__dict__.keys() you'll see them there.
I am attempting to move pdf files from sub-directories in a folder. This code works and moves all pdfs found. I would like to only move pdf files that match number from a text file using this code:
with open('LIST.txt', 'r') as f:
myNames = [line.strip() for line in f]
print myNames
Full code:
import os
import shutil
with open('LIST.txt', 'r') as f:
myNames = [line.strip() for line in f]
print myNames
dir_src = r"C:\Users\user\Desktop\oldfolder"
dir_dst = r"C:\Users\user\Desktop\newfolder"
for dirpath, dirs, files in os.walk(dir_src):
for file in files:
if file.endswith(".pdf"):
shutil.copy( os.path.join(dirpath, file), dir_dst )
example of text file content:
111111
111112
111113
111114
First, create a set instead of a list here so lookup will be faster:
myNames = {line.strip() for line in f}
Then for the filter, I assume that myNames must match the basename (minus extension) of your file(s). So instead of:
if file.endswith(".pdf"):
shutil.copy( os.path.join(dirpath, file), dir_dst )
check the extension and if the basename minus extension belongs to your previously created set:
bn,ext = os.path.splitext(file)
if ext == ".pdf" and bn in myNames:
shutil.copy( os.path.join(dirpath, file), dir_dst )
To match the filename with a substring within myNames, you cannot rely on the in method. You can do:
if ext == ".pdf" and any(s in file for s in myNames):