How to replace text in the multiple svg files using python - python

I need to alter the text in the multiple svg files please help me out from this.
for filename in os.listdir(dirname):
filename = dirname+filename
#print(filename)
#for filename in pprint(dirlist):
if filename.endswith(".svg"):
with open(filename) as f:
data = f.read()
data = data.replace('NS', 'NSSS')
f.close()
f = open(/files/file.svg "wt")
f.write(data)
f.close()

import os
import re
for filename in os.listdir(dirname):
if filename.endswith(".svg"):
filename = os.path.join(dirname, filename)
with open(filename) as f:
data = f.read()
data = re.sub(r"NS", r"NSSS", data)
with open('/files/file.svg', 'w') as f:
f.write(data)

Related

Go through files in given directory with python, read each file line by line and remove first and last string in the line and save updated file

So I have some .txt files inside of directory. Each .txt file contains some paths like:
'C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module.c'
'C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module2.c'
'C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module3.c'
I need just some small function that will go through each line of each file inside of a dir and remove there ', so only clear path is left like:
C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module.c
C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module2.c
C:\d\folder\project\folder\Folder1\Folder2\Folder3\Module3.c
My code at the moment is:
for filename in files:
with open(filename, 'r') as file:
content = file.read().split('\n')
for line in content:
if line.startswith('')and line.endswith(''):
remove('')
Please assist!
SOLUTION:
I have managed to find a solution with a bit different approach:
for filename in files:
f = open(filename, 'rt')
filedata = f.read()
filedata = filedata.replace("'","")
f.close()
f = open(filename, 'wt')
f.write(filedata)
f.close()
Thanks!
python has a hirarchy to strings ', ", "" and so on so you can wrap a uptick into quotes for a split. Since we have the first element '' before the tick the second is your path
line.split("'")[1]
Edit: If i understood you correctly you want this
for filename in files:
paths = []
with open(filename, 'r') as file:
content = file.read().split('\n')
for line in content:
paths.append(line.split("'")[1])
file.close()
with open(filename, 'w') as file:
file.writelines(paths)
file.close()
Soo I just did bit different approach and managed to find a solution:
for filename in files:
f = open(filename, 'rt')
filedata = f.read()
filedata = filedata.replace("'","")
f.close()
f = open(filename, 'wt')
f.write(filedata)
f.close()
Thanks guys anyway!

Concatenate all files that map values in the same key

I have a dictionnary that group different pattern :
dico_cluster={'cluster_1': ['CUX2', 'CUX1'], 'cluster_2': ['RFX3', 'RFX2'],'cluster_3': ['REST']}
Then I have files in a folder :
"/path/to/test/files/CUX1.txt"
"/path/to/test/files/CUX2.txt"
"/path/to/test/files/RFX3.txt"
"/path/to/test/files/RFX2.txt"
"/path/to/test/files/REST.txt"
"/path/to/test/files/ZEB.txt"
"/path/to/test/files/TEST.txt"
I'm trying to concatenate the files that are in the same cluster. The output file name should be the name of pattern join by underscore "_"
I tried this :
filenames = glob.glob('/path/to/test/files/*.txt')
for clee in dico_cluster.keys():
fname='_'.join(dico_cluster[clee])
outfilename ='/path/to/test/outfiles/'+ fname + ".txt"
for file in filenames:
tf_file=file.split('/')[-1].split('.')[0]
if tf_file in dico_cluster[clee]:
with open(outfilename, 'wb') as outfile:
for filename in filenames:
if filename == outfilename:
# don't want to copy the output into the output
continue
with open(filename, 'rb') as readfile:
shutil.copyfileobj(readfile, outfile)
But it's not working. I'm just concatenating all the files.
I want to cat the file that are in the same cluster.
I would recommend to use os package, it's easier to use.
If I understood your problem I would try to do this by loading the whole content of your files before writing it.
import os
for clee in dico_cluster.keys():
my_clusters =list(set(dico_cluster[clee]))
fname = "_".join(my_clusters)
data = list()
outfilename = os.path.join("/path/to/test/outfiles", fname + ".txt")
for file in filenames:
tmp_dict = dict()
tf_file = os.path.basename(file).split(".")[0]
if tf_file in my_clusters:
with open(file, 'rb') as f1:
data.extend([elm for elm in f1.readlines()])
with open(outfilename, "wb") as _output_file:
for elm in data:
_output_file.write(elm)

How to get number only values from a specific row from different text file

I am trying to get only numbers from a particular row from 10 different text files. As an output, I want those numbers appended as a list. I'm a new learner. I would appreciate your help.
tried this one but not working
import os
import sys,re
line_number=69
path = r'C:\Users\arpan\OneDrive\Desktop\New folder'
for filename in os.listdir(path):
with open(os.path.join(path, filename), 'r') as f:
#print (filename)
file = open(filename)
all_lines_variable = file.readlines()
sys.stdout = open("output", "a") #print output file
print(filename, all_lines_variable[line_number])
sys.stdout.close()
You can try this script, it will extract from all files line number 69 and then appends it to output.txt file:
import os
import re
line_number=69
path = r'C:\Users\arpan\OneDrive\Desktop\New folder'
with open('output.txt', 'w') as f_out:
for file in os.listdir(path):
with open(os.path.join(path, file), 'r') as f_in:
lines = f_in.readlines()
print(' '.join(re.findall(r'\d+', lines[line_number])), file=f_out)

How to show filenames in csv format

I am fetching file names from my local file directory and there are 411 items there but after printing it shows me file names and I am not able to count them in python and second problem is by saving in CSV format, it doesn't show me all the file names? Where is my mistake?
import os
FOLDER_PATH = '/home/bilal/Documents/Books/English/'
def listDir(dir):
fileNames = os.listdir(dir)
for fileName in fileNames:
my_file = open('my_file.csv', 'w')
my_file.write(fileName)
# print('File Name: ' + fileName)
if __name__ == '__main__':
listDir(FOLDER_PATH)
import glob
path = r"/home/bilal/Documents/Books/English/*.csv"
my_file = open('my_file.csv', 'w')
fileList = list(map(os.path.basename, glob.glob(path)))
for filename in fileList:
print(filename)
my_file.write(filename)
my_file.close()
or
import glob
path = r"/home/bilal/Documents/Books/English/*.csv"
with open('my_file.csv', 'w') as my_file:
fileList = list(map(os.path.basename, glob.glob(path)))
for filename in fileList:
print(filename)
my_file.write(filename)

batch process text to csv using python

I need some help with converting a number of text files to csv files. All my text files are in one folder and I want to convert them to csv files into another folder. The names of individual files should remain the same. Below is the script I got so far...converting an individual file works fine but to work on all the files within a folder is where I am stuck. Any help will be appreciated.
import csv
import os
directory = raw_input("INPUT Folder:")
output = raw_input("OUTPUT Folder")
txt_files = directory
csv_files = output
try:
for txt_file in txt_files:
in_txt = csv.reader(open(txt_file, "rb"), delimiter = '=')
for csv_file in csv_files:
out_csv = csv.writer(open(csv_file, 'wb'))
out_csv.writerows(in_txt)
except:
print ()
glob.glob() is perfectly suited for the task. Also, use with context manager when working with files:
import csv
import glob
import os
directory = raw_input("INPUT Folder:")
output = raw_input("OUTPUT Folder:")
txt_files = os.path.join(directory, '*.txt')
for txt_file in glob.glob(txt_files):
with open(txt_file, "rb") as input_file:
in_txt = csv.reader(input_file, delimiter='=')
filename = os.path.splitext(os.path.basename(txt_file))[0] + '.csv'
with open(os.path.join(output, filename), 'wb') as output_file:
out_csv = csv.writer(output_file)
out_csv.writerows(in_txt)

Categories