I have a Dataset, which has 5 folders, in which each folder has 100 .txt files. Below code you can see that I am looping through every file, and removing certain words from those files using my StopWords.txt file.
After I remove the words I am appending the output in one file(filteredtext.txt). But I want to have these output exactly as my Dataset (5 folders which has 100 .txt file).
This is my code.
import re
import os
#insert stopwords files
stopwordfile = open("StopWords.txt", encoding='utf-8')
# Use this to read file content as a stream:
readstopword = stopwordfile.read()
stop_words = readstopword.split()
#file path to dataset
for path, _, files in os.walk("sinhala-set1"):
for file_name in files:
filepath = os.path.join(path, file_name)
print(f"Checking --> {filepath}")
file1 = open(filepath, encoding='utf-8')
# Use this to read file content as a stream:
line = file1.read()
words = line.split()
for r in words:
if not r in stop_words:
appendFile = open('filteredtext.txt','a', encoding='utf-8')
appendFile.write(" "+r)
appendFile.close()
You are appending the file because you are opening the same .txt file with appending mode appendFile = open('filteredtext.txt','a', encoding='utf-8') If you want a separate file for each loop, open a different file like this:
output_file = open('output_' + file_name), 'w', encoding='utf-8')
Related
I am trying to get only numbers from a particular row from 10 different text files. As an output, I want those numbers appended as a list. I'm a new learner. I would appreciate your help.
tried this one but not working
import os
import sys,re
line_number=69
path = r'C:\Users\arpan\OneDrive\Desktop\New folder'
for filename in os.listdir(path):
with open(os.path.join(path, filename), 'r') as f:
#print (filename)
file = open(filename)
all_lines_variable = file.readlines()
sys.stdout = open("output", "a") #print output file
print(filename, all_lines_variable[line_number])
sys.stdout.close()
You can try this script, it will extract from all files line number 69 and then appends it to output.txt file:
import os
import re
line_number=69
path = r'C:\Users\arpan\OneDrive\Desktop\New folder'
with open('output.txt', 'w') as f_out:
for file in os.listdir(path):
with open(os.path.join(path, file), 'r') as f_in:
lines = f_in.readlines()
print(' '.join(re.findall(r'\d+', lines[line_number])), file=f_out)
I have a folder that contains 5 different folders, where each folder has 50 email documents belonging to a particular topic (so, there are a total of 5 topics/classes).
Train two classifiers – a Decision Tree, and an SVC (with linear kernel). Report microaveraged and macro-averaged F1-scores of 10-fold cross-validation. You may need to preprocess the data, prune the decision tree, and find good C values for SVC
Can you help me with Table containing micro-averaged and macro-averaged F1-scores.
I tried putting the mails per folder in one txt file but still when I doing the decision tree the process is not letting me do it.
unable to the get the results..
Should I put all the files in a said folder into one text file?
with open ("C:/Users/*******/DS Assign/toclassify/cwx.txt", "w") as outfile:
for f in files:
with open(f) as infile:
for line in infile:
outfile.write(line)
path = ("C:/Users/*******/DS Assign/toclassify/ra/*")
files = glob.glob(path)
#print(files)
with open ("C:/Users/*******/DS Assign/toclassify/ra.txt", "w") as outfile:
for f in files:
with open(f) as infile:
for line in infile:
outfile.write(line)
path = ("C:/Users/*******/DS Assign/toclassify/rsh/*")
files = glob.glob(path)
#print(files)
with open ("C:/Users/*******/DS Assign/toclassify/rsh.txt", "w") as outfile:
for f in files:
with open(f) as infile:
for line in infile:
outfile.write(line)
path = ("C:/Users/*******/DS Assign/toclassify/src/*")
files = glob.glob(path)
#print(files)
with open ("C:/Users/*******/DS Assign/toclassify/src.txt", "w") as outfile:
for f in files:
with open(f) as infile:
for line in infile:
outfile.write(line)
path = ("C:/Users/*******/DS Assign/toclassify/tpm/*")
files = glob.glob(path)
#print(files)
import os
import pandas as pd
data_dir = os.path.join('.', 'data')
data_ids = []
data_txt = []
# Create a helper function to read the data from a particular folder and file
def get_data(file_name, folder_dir):
file_path = os.path.join(folder_dir, file_name)
return open(file_path, 'r').read()
# Loop through each folder in the data directory
for folder in os.listdir(data_dir):
# Create the folder directory from the data directory
folder_dir = os.path.join(data_dir, folder)
# Store the IDs of each file in the particular folder directory into a list
data_ids += os.listdir(folder_dir)
# Using list comprehension to create a list of the text contained in each file
# for a particular ID in the folder directory
data_txt += [get_data(data_id, folder_dir) for data_id in os.listdir(folder_dir)]
# Store into a Pandas dataframe for easy integration into modelling packages
df = pd.DataFrame({
'id': data_ids,
'text': data_txt
})
I have a folder consisting of 7 files, each having several text files inside. I intend to read through them and write each of those nested text files into a single file called ZebraAllRaw.txt. In the end, there must be only one single file containing all the text files that existed in each of those 7 files.
This is the function I have written:
def CombineFiles(folder):
with open('D:/ZebraAllRaw.txt', 'a', encoding="utf-8") as OutFile:
for root, dirs, files in os.walk(folder, topdown= False):
for filename in files:
file_path = os.path.join(root, filename)
with open(file_path, 'r', encoding="utf-8") as f:
content = f.read()
new_content = content.replace('\n', '')
OutFile.write(new_content + "\n")
However, it seems that all the content is written into the new file 9 times, as if it had read through them more than expected.
make sure you con't append the files from different runs.
I only replaced the file mode append with write at the open
def CombineFiles(folder):
with open('D:/ZebraAllRaw.txt', 'w', encoding="utf-8") as OutFile: # mode "w", not "a"
for root, dirs, files in os.walk(folder, topdown= False):
for filename in files:
file_path = os.path.join(root, filename)
with open(file_path, 'r', encoding="utf-8") as f:
content = f.read()
new_content = content.replace('\n', '')
OutFile.write(new_content + "\n")
I have a folder with multiple files like so:
1980
1981
1982
In each of these files is some text. I want to loop through each of these files and do some operation to each file then save the edited file to another folder and move onto the next file and so on. The result would be that I have the original folder and then another folder with the edited version of each file in it like so:
1980_filtered
1981_filtered
1982_filtered
Is it possible to do this?
Currently I have some code that loops through the files in a folder, does some filtering to each file and then saves all the edits of each file into one massive file. Here is my code:
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/mouse'
for root, dir, files in os.walk(input_location):
for file in files:
os.chdir(input_location)
with open(file, 'r') as f, open('NLTK-stop-word-list', 'r') as f2:
mouse_file = f.read().split() # reads file and splits it into a list
stopwords = f2.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
with open(output_location, 'a') as output_file:
output_file.write(x)
Any help would be greatly appreciated!
You need to specify what each new file is called. To do so, Python has some good string formatting methods. Fortunately, your new desired file names are easy to do in a loop
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/mouse'
for root, dir, files in os.walk(input_location):
for file in files:
new_file = "{}_filtered.txt".format(file)
os.chdir(input_location)
with open(file, 'r') as f, open('NLTK-stop-word-list', 'r') as f2:
mouse_file = f.read().split()
stopwords = f2.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
with open(output_location+'/'+new_file, 'w') as output_file: # Changed 'append' to 'write'
output_file.write(x)
If you're in Python 3.7, you can do
new_file = f"{file}_filtered.txt"
and
with open(f"{output_location}/{new_file}", 'w') as output_file:
output_file.write(x)
First of all you should start by opening the NLTK-stop-word-list only once, so I moved it outside of your loops. Second, os.chdir() is redundant, you can use os.path.join() to get your current file path (and to construct your new file path):
import os
input_location = 'C:/Users/User/Desktop/mini_mouse'
output_location = 'C:/Users/User/Desktop/filter_mini_mouse/'
stop_words_path = 'C:/Users/User/Desktop/NLTK-stop-word-list.txt'
with open(stop_words_path, 'r') as stop_words:
for root, dirs, files in os.walk(input_location):
for name in files:
file_path = os.path.join(root, name)
with open(file_path, 'r') as f:
mouse_file = f.read().split() # reads file and splits it into a list
stopwords = stop_words.read().split()
x = (' '.join(i for i in mouse_file if i.lower() not in (x.lower() for x in stopwords)))
new_file_path = os.path.join(output_location, name) + '_filtered'
with open(new_file_path, 'a') as output_file:
output_file.write(x)
P.S: I took the liberty to change some of your variable names as they were part of python's built in words ('file' and 'dir'). If you'll run __builtins__.__dict__.keys() you'll see them there.
I have many files ('*.pl-pl'). My script has to find each of this files and merge them into one xlsx file using openpyxl.
Now, I want to rebuild those files, I want rebuild the same files as originals.
But there is a problem after writing:
(content variable contains content of one file (read from one excel cell))
with open(path,'w') as f:
f.write(content.encode('utf-8'))
So now, I check, whether original files are the same as new files. Text in those files seems to be the same but there are little differencies in size. When I use WinDiff application to check them, it finds some touples which are different but it says that they are different in blanks only.
Could you give me an advice how to rebuild those files to be the same as before?
Or is this way correct?
Note: I try to rebuild them to be sure that there will be the same encoding etc. because the merged excel file will be used to translation and then translated files has to be rebuilt instead of originals.
Here is the code - it checks directory and prints all file names and contents into the one temporary file. Then, it creates an excel file - 1st. column is path (to be able reconstruct dir) and 2nd column contains content of the file, where new lines has been switched to '='
def print_to_file():
import os
for root, dirs, files in os.walk("OriginalDir"):
for file in files:
text = []
if file.endswith(".pl-pl"):
abs_path = os.path.join(root, file)
with open(abs_path) as f:
for line in f:
text.append(line.strip('\n'))
mLib.printToFile('files.mdoc', abs_path + '::' + '*=*'.join(text)) #'*=*' represents '\n'
def write_it():
from openpyxl import Workbook
import xlsxwriter
file = 'files.mdoc'
workbook = Workbook()
worksheet = workbook.worksheets[0]
worksheet.title = "Translate"
i = 0
with open(file) as f:
classes = set()
for line in f:
i += 1
splitted = line.strip('\n').split('::')
name = splitted[0]
text = splitted[1].split('*=*')
text = [x.encode('string-escape') for x in text]
worksheet.cell('B{}'.format(i)).style.alignment.wrap_text = True
worksheet.cell('B{}'.format(i)).value = splitted[1]
worksheet.cell('A{}'.format(i)).value = splitted[0]
workbook.save('wrap_text1.xlsx')
import openpyxl
def rebuild():
wb = openpyxl.load_workbook('wrap_text1.xlsx')
ws = wb.worksheets[0]
row_count = ws.get_highest_row()
for i in xrange(1, row_count + 1):
dir_file = ws.cell('A{}'.format(i)).value
content = ws.cell('B{}'.format(i)).value
remake(dir_file, content)
import os
def remake(path, content):
content = re.sub('\*=\*', '\n', content)
result = ''
splt = path.split('\\')
file = splt[-1]
for dir in splt[:-1]:
result += dir + '/'
# print result
if not os.path.isdir(result):
# print result
os.mkdir(result)
with open(path, 'w') as f:
f.write(content.encode('utf-8'))
# print_to_file() # print to temp file - paths and contents separated by '::'
# write_it() # write it into the excel file
# rebuilt() # reconstruct directory