Search string and replace value - python

Here is my code,
import os, os.path
import collections
import sys
import re
DIR_DAT = "dat"
DIR_OUTPUT = "output"
filenames = []
data = []
#in case if output folder doesn't exist
if not os.path.exists(DIR_OUTPUT):
os.makedirs(DIR_OUTPUT)
input_file = 'axcfgpasww-from-server.dat'
element = sys.argv[1]
output_value = sys.argv[2]
with open(input_file) as infile, open('axcfgpasww-modified.dat', "w") as outfile:
if element in open(input_file).read():
regex = re.findall("\s*([\S\s]+)", element)
outfile.write(regex[0])
print(regex[0])
else:
print('No match found')
The input_file :
CMD_VERS=2
CMD_TRNS=O
CMD_REINIT=N
CMD_ORDER=MAJECR
CMD_COMM=2590552
NUM_COMM:nNN0.7=2590552
I execute my script this way : modify_file.py NUM_COMM:nNN0.7 Hello world !
So if NUM_COMM:nNN0.7 exists in the file, it writes "NUM_COMM:nNN0.7" in a new axcfgpasww-modified.dat file.
But what I want to do, is execute my command as written above. And the result is the input file, with only the new value.
So my output file would be :
CMD_VERS=2
CMD_TRNS=O
CMD_REINIT=N
CMD_ORDER=MAJECR
CMD_COMM=2590552
NUM_COMM:nNN0.7=Hello world !
Can anyone help me on this ?
Thanks !

I have made some refactoring to your original code, and made it produce the output you seek,
import os, os.path
import collections
import sys
import re
DIR_DAT = "dat"
DIR_OUTPUT = "output"
filenames = []
data = []
found = False
#in case if output folder doesn't exist
if not os.path.exists(DIR_OUTPUT):
os.makedirs(DIR_OUTPUT)
input_file = 'axcfgpasww-from-server.dat'
element = sys.argv[1]
output_value = sys.argv[2]
with open(input_file) as infile:
for line in infile.readlines():
if element in line:
old_value = line.split("=")[1]
data.append(line.replace(old_value, output_value))
found = True
else:
data.append(line)
if not found:
print('No match found')
with open(input_file, 'w') as outfile:
for line in data:
outfile.write(line)
output:
CMD_VERS=2
CMD_TRNS=O
CMD_REINIT=N
CMD_ORDER=MAJECR
CMD_COMM=2590552
NUM_COMM:nNN0.7=Hello World!
Hope this helps

Related

Python looping through files and saving content to dict

I have the following code:
import os
import json
import ipaddress
iplist = []
ipiflist = []
mydict = {}
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
mydict.update(data)
print(mydict)
In the data directory there are several JSON files that I open in this loop.
I update the dict in every loop and for this reason I get the following output:
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.169', 'ipif_1004': '10.10.160.173', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126'}
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.170', 'ipif_1004': '10.10.160.174', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126', 'ipif_1005': '10.10.160.178', 'ipif_1006': '10.10.160.182'}
{'ipif_1001': '10.10.160.129', 'ipif_1002': '10.10.160.142', 'ipif_1003': '10.10.160.170', 'ipif_1004': '10.10.160.174', 'ipif_3334': '10.10.160.194', 'IpIf3337': '10.10.160.126', 'ipif_1005': '10.10.160.178', 'ipif_1006': '10.10.160.182', 'IpIf1001': '10.10.160.138', 'IpIf1002': '10.10.160.141', 'IpIf1003': '10.10.160.153', 'IpIf1006': '10.10.160.181', 'IpIf_CPEDCN': '10.10.160.241', 'IpIf_DCNMgt': '10.10.191.253', 'ipif1164': '10.10.160.166', 'IpIf1010': '10.10.170.1'}
I only need the summarized output from the last loop. How can I only access this?
Thanks for your help
The for loop in python has an else statement, which will only be executed when the loop was successful. Thus there you can plot your last resut?
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
mydict.update(data)
else:
print(mydict)
import os
import json
import ipaddress
iplist = []
ipiflist = []
mydict = {}
list = os.listdir('data/')
for filename in os.listdir('data/'):
with open(os.path.join('data/', filename), 'r') as f:
data = json.load(f)
if list[list.count-1] == filename: #check last filename in the directory with the current filename in the loop
mydict.update(data)
print(mydict)
Try it like this

How to pass a file as an argument in python

I am having some issues passing an argument in a python script to take a specific file like a csv, txt, or xml
I am reviewing python and would like some feedback on why I don't see any output after running the following command: ./my_script some3455.csv
#!/usr/bin/python
import sys
import csv
import xml.etree.ElementTree as ET
FILE = str(sys.argv[1])
def run_files():
if FILE == '*.csv'
run_csv()
elif FILE == '*.txt'
run_txt()
else
run_xml()
def run_csv():
csv_file = csv.register_dialect('dialect', delimiter = '|')
with open(FILE, 'r') as file:
reader = csv.reader(file, dialect='dialect')
for row in reader:
print(row)
def run_txt():
with open(FILE, 'r') as file:
txt_contents = file.read()
print(txt_contents)
def run_xml():
tree = ET.parse(FILE)
root = tree.getroot()
for child in root.findall('Attributes')
car = child.find('Car').text
color = child.find('Color').text
print(car, color)
I have tried to pass it as without the FILE but works just for one and the other file types doesn't get identify.
You need to use fnmatch and not == to compare a string with a glob pattern:
import fnmatch
def run_files():
if fnmatch.fnmatch(FILE, '*.csv'):
run_csv()
elif fnmatch.fnmatch(FILE, '*.txt'):
run_txt()
else:
run_xml()

How to open file, convert string and write to new file

I am trying to open a text file, remove certain words that have a ] after them, and then write the new contents to a new file. With the following code, new_content contains what I need, and a new file is created, but it's empty. I cannot figure out why. I've tried indenting differently and passing in an encoding type, with no luck. Any help greatly appreciated.
import glob
import os
import nltk, re, pprint
from nltk import word_tokenize, sent_tokenize
import pandas
import string
import collections
path = "/pathtofiles"
for file in glob.glob(os.path.join(path, '*.txt')):
if file.endswith(".txt"):
f = open(file, 'r')
flines = f.readlines()
for line in flines:
content = line.split()
for word in content:
if word.endswith(']'):
content.remove(word)
new_content = ' '.join(content)
f2 = open((file.rsplit( ".", 1 )[ 0 ] ) + "_preprocessed.txt", "w")
f2.write(new_content)
f.close
This should work #firefly. Happy to answer questions if you have them.
import glob
import os
path = "/pathtofiles"
for file in glob.glob(os.path.join(path, '*.txt')):
if file.endswith(".txt"):
with open(file, 'r') as f:
flines = f.readlines()
new_content = []
for line in flines:
content = line.split()
new_content_line = []
for word in content:
if not word.endswith(']'):
new_content_line.append(word)
new_content.append(' '.join(new_content_line))
f2 = open((file.rsplit( ".", 1 )[ 0 ] ) + "_preprocessed.txt", "w")
f2.write('\n'.join(new_content))
f.close
f2.close

Python: Issue when trying to read and write multiple files

This script reads and writes all the individual html files in a directory. The script reiterates, highlight and write the output.The issue is, after highlighting the last instance of the search item, the script removes all the remaining contents after the last search instance in the output of each file. Any help here is appreciated.
import os
import sys
import re
source = raw_input("Enter the source files path:")
listfiles = os.listdir(source)
for f in listfiles:
filepath = os.path.join(source+'\\'+f)
infile = open(filepath, 'r+')
source_content = infile.read()
color = ('red')
regex = re.compile(r"(\b in \b)|(\b be \b)|(\b by \b)|(\b user \b)|(\bmay\b)|(\bmight\b)|(\bwill\b)|(\b's\b)|(\bdon't\b)|(\bdoesn't\b)|(\bwon't\b)|(\bsupport\b)|(\bcan't\b)|(\bkill\b)|(\betc\b)|(\b NA \b)|(\bfollow\b)|(\bhang\b)|(\bbelow\b)", re.I)
i = 0; output = ""
for m in regex.finditer(source_content):
output += "".join([source_content[i:m.start()],
"<strong><span style='color:%s'>" % color[0:],
source_content[m.start():m.end()],
"</span></strong>"])
i = m.end()
outfile = open(filepath, 'w')
outfile.seek(0, 2)
outfile.write(output)
print "\nProcess Completed!\n"
infile.close()
outfile.close()
raw_input()
After your for loop is over, you need to include whatever is left after the last match:
...
i = m.end()
output += source_content[i:]) # Here's the end of your file
outfile = open(filepath, 'w')
...

python parse file

I have a file with username and emails, in this format :
pete,pbellyer#gmail.com
I want to only keep the email, so i thought about using a regex like this :
import re,sys
Mailfile = sys.argv[1]
file = open(Mailfile, "r")
for MAIL in file.readlines():
tmp = re.split("\n+", MAIL)
m = re.match( ',(.+)', MAIL)
m.group(0)
But then I don't know how to store the result in a file.
I always get the last email address in the new file.
What would be the best way to store the results in a file ?
Thanks!
import sys
infile, outfile = sys.argv[1], sys.argv[2]
with open(infile) as inf, open(outfile,"w") as outf:
line_words = (line.split(',') for line in inf)
outf.writelines(words[1].strip() + '\n' for words in line_words if len(words)>1)
You could use the csv module (since your data looks comma-separated, at least in your example):
import sys
import csv
with open('mail_addresses.txt', 'w') as outfile:
for row in csv.reader(open(sys.argv[1], 'rb')):
outfile.write("%s\n" % row[1])
Try something like this:
import sys
Mailfile = sys.argv[1]
Outfile = sys.argv[2]
try:
in_file = open(Mailfile, 'r')
out_file = open(Outfile, 'a')
for mail in in_file.readlines():
address = mail.split(',')[1].strip()
out_file.write(address+',') #if you want to use commas to seperate the files, else use something like \n to write a new line.
finally:
in_file.close()
out_file.close()

Categories