I'm trying to remove a key/value pair if the key contains 'empty' values.
I have tried the following dictionary comprehension and tried doing it in long form, but it doesn't seem to actually do anything and I get no errors.
def get_Otherfiles():
regs = ["(.*)((U|u)ser(.*))(\s=\s\W\w+\W)", "(.*)((U|u)ser(.*))(\s=\s\w+)", "(.*)((P|p)ass(.*))\s=\s(\W(.*)\W)", "(.*)((P|p)ass(.*))(\s=\s\W\w+\W)"]
combined = "(" + ")|(".join(regs) + ")"
cred_results = []
creds = []
un_matched = []
filesfound = []
d = {}
for root, dirs, files in os.walk(dir):
for filename in files:
if filename.endswith(('.bat', '.vbs', '.ps', '.txt')):
readfile = open(os.path.join(root, filename), "r")
d.setdefault(filename, [])
for line in readfile:
m = re.match(combined, line)
if m:
d[filename].append(m.group(0).rstrip())
else:
pass
result = d.copy()
result.update((k, v) for k, v in d.iteritems() if v is not None)
print result
Current output:
{'debug.txt': [], 'logonscript1.vbs': ['strUser = "guytom"', 'strPassword = "P#ssw0rd1"'], 'logonscript2.bat': ['strUsername = "guytom2"', 'strPass = "SECRETPASSWORD"']}
As you can see I have entries with empty values. I'd like to remove these before printing the data.
In this part of your code:
d.setdefault(filename, [])
for line in readfile:
m = re.match(combined, line)
if m:
d[filename].append(m.group(0).rstrip())
else:
pass
You always add filename as a key to the dictionary, even if you don't subsequently add anything to the resulting list. Try
for line in read file:
m = re.match(combined, line)
if m:
d.setdefault(filename, []).append(m.group(0).rstrip())
which will only initialize d[filename] to an empty list if it is actually necessary to have something on which to call append.
result = dict((k, v) for k, v in d.iteritems() if v is not None)
update wont remove entries ... it will only add or change
a = {"1":2}
a.update({"2":7})
print a # contains both "1" and "2" keys
Looking at the first matching group in your regex, (.*), if the regex matches but there are no characters to match, group(0) is "", not None. So, you can filter there.
result.update((k, v) for k, v in d.iteritems() if not v)
But you can also have your regex do that part for you. Change that first group to (.+) and you won't have empty values to filter out.
EDIT
Instead of removing empty values at the end, you can avoid adding them to the dict altogether.
def get_Otherfiles():
# fixes: make it a raw string so that \s works right and
# tighten up filtering, ... (U|u) should probably be [Uu] ...
regs = ["(.+)\s*((U|u)ser(.*))(\s=\s\W\w+\W)", "(.*)((U|u)ser(.*))(\s=\s\w+)", "(.*)((P|p)ass(.*))\s=\s(\W(.*)\W)", "(.*)((P|p)ass(.*))(\s=\s\W\w+\W)"]
combined = "(" + ")|(".join(regs) + ")"
cred_results = []
creds = []
un_matched = []
filesfound = []
d = {}
for root, dirs, files in os.walk(dir):
for filename in files:
if filename.endswith(('.bat', '.vbs', '.ps', '.txt')):
readfile = open(os.path.join(root, filename), "r")
# assuming you want to aggregate matching file names...
content_list = d.get(filename, [])
content_orig_len = len(content_list)
for line in readfile:
m = re.match(combined, line)
if m:
content_list.append(m.group(0))
if len(content_list) > content_orig_len:
d[filename] = content_list
Related
I am trying to solve the problem (for example)
I have a name file
VOG00001
VOG00002
VOG00004
and database file
VOG00001!962!834!Xu!sp|O31936|YOPB_BACSU_Putative_antitoxin_YopB
VOG00002!206!17!Xh!sp|Q5UPJ9|YL122_MIMIV_Putative_ankyrin_repeat_protein_L12
VOG00003!1284!960!Xr!sp|O22001|VXIS_BPMD2_Excisionase
VOG00004!353!304!Xu!sp|P03795|Y28_BPT7_Protein_2.8
VOG00005!253!60!Xu!REFSEQ_hypothetical_protein
I need to extract rows from the database that match the words in the file names
results:
VOG00001!962!834!Xu!sp|O31936|YOPB_BACSU_Putative_antitoxin_YopB
VOG00002!206!17!Xh!sp|Q5UPJ9|YL122_MIMIV_Putative_ankyrin_repeat_protein_L12
VOG00004!353!304!Xu!sp|P03795|Y28_BPT7_Protein_2.8
def log(x, y):
output = open('output.txt', 'a')
output.write(x + y)
output.close
def main():
i = 0
nfile = 'input/' + input('Enter file with names: ')
dfile = 'input/' + input('Enter file with data: ')
names = list(open(nfile, 'r'))
data = list(open(dfile, 'r'))
while i != len(data):
line = data[i]
if 'VOG' in line:
line1 = line.replace("!*" , "")
if line1 in names:
log(line, data[i + 1])
i += 1
return(0)
main()
I want to trim the unnecessary and compare with the list of names
line1 = line.replace("!*" , "")
The best way to do this is to create a dictionary based on the database file. The database file uses '!' to delimit the key so we just split the strings to find the key and associated the entire line with that key (the dictionary value). Then iterate over the "names" file and do lookups in the dictionary.
names = input('Enter path to names file: ')
db = input('Enter path to database file: ')
with open(names) as n, open(db) as d:
dict_ = {}
for line in map(str.strip, d):
key, *_ = line.split('!')
dict_[key] = line
for name in map(str.strip, n):
if (v := dict_.get(name)):
print(v)
I want to define a function, that reads a table of a textfile as a dictionary and than use it for returning specific values. The keys are chemical symbols (like "He" for Helium,...). The values return their specific atom masses.
I don't understand, what I have to do...
The first five lines of the textfile read:
H,1.008
He,4.0026
Li,6.94
Be,9.0122
B,10.81
Here are my attempts: (I don't know where to place the parameter key so that I can define it)
def read_masses():
atom_masses = {}
with open["average_mass.csv") as f:
for line in f:
(key, value) = line.split(",")
atom_masses[key] = value
return(value)
m = read_masses("average_mass.csv)
print(m["N"]) #for the mass of nitrogen ```
once return has called, the code below it doesn't execute. What you need to return is the atom_masses not value and you have to place it outside the for loop
def read_masses(file):
atom_masses = {}
with open(file) as f:
for line in f:
(key, value) = line.split(",")
atom_masses[key] = value
return (atom_masses)
m = read_masses("average_mass.csv")
print(m["H"])
>>> 1.008
Try:
def read_masses(name):
data = {}
with open(name, "r") as f_in:
for line in map(str.strip, f_in):
if line == "":
continue
a, b = map(str.strip, line.split(",", maxsplit=1))
data[a] = float(b)
return data
m = read_masses("your_file.txt")
print(m.get("He"))
Prints:
4.0026
What i'm trying to do is grab some text from a folder split it into words, count the words, sort it into a list and write it into a file. All is well except, instead of splitting into words, it splits the text into letters and counts them. Seems like a easy fix but i have no clue what i'm doing so... thanks in advance
import os
import os.path
import string
prefix_path = ("C:/Users/User/Desktop/Python/sampleTexts")
files = [f for f in os.listdir(prefix_path) if f.endswith(".txt")]
files.sort()
files = [os.path.join(prefix_path,name) for name in files]
textOut = open("texthere.txt", "w", encoding="utf-8")
def readText(file):
for i in file:
with open(i, "r", encoding= "utf-8") as f:
textin = f.read()
first_dict= dict()
for i in textin:
i = i.strip()
i = i.lower()
i = i.translate(i.maketrans("","", string.punctuation))
words = i.split()
for word in words:
if word in first_dict:
first_dict[word] = first_dict[word] + 1
else:
first_dict[word] = 1
sorted_dict = sorted(first_dict.items(), key= lambda x: x[1], reverse=True)
for key, val in sorted_dict:
print(key," :", val)
for key, val in sorted_dict:
textOut.write(key + " :" + str(val) + "\n")
textOut.close()
readText(files)
f.read() will you give a string of the entire text file such that when you iterate over it for i in textin you are iterating over each character. What you probably want is
for line in f.readlines():
for word in line.split():
blah
I want to write an INI file with duplicate options,ie:
[test]
foo = value1
foo = value2
xxx = yyy
With ConfigParser.set only the last value is writed.
config = ConfigParser.ConfigParser()
config.read('example.cfg')
config.add_section('test')
config.set('test', service['foo'], service['value1'])
config.set('test', service['foo'], service['value2'])
config.set('test', service['xxx'], service['yyy'])
The result is:
[test]
foo = value2
xxx = yyy
Is there any way?
It looks like it isn't possible in a simple way. The default way ConfigParser stores is with dict's, i.e. one value per unique key.
In a similar question Python's ConfigParser unique keys per section the suggestions are to go with:
CongfigObj
Patched version of epydoc
i have a simple custom .ini parser in python (built for another project), which uses a list to store values but only if they are not in key=value format. if key=value then last key will be held since these are stored in a dictionary
The parser can also parse nested sections like:
[SECTION1][SECTION2]
key1=value1
; etc..
The code is below, it is easy to modify to store key/value in list instead of dictionary or even detect multiple key and rename to avoid collisions (e.g key, key$1 second key with same key value key and so on). use/modify as needed
##
#
# Simple .ini Parser for Python 2.x, 3.x
#
##
import re
class Ini_Parser():
"""Simple .ini parser for Python"""
NL = None
ACTUAL = {
'\\n' : "\n",
'\\t' : "\t",
'\\v' : "\v",
'\\f' : "\f"
}
def parseStr(s, q):
_self = Ini_Parser
endq = s.find(q, 1)
quoted = s[1:endq]
rem = s[endq+1:].strip()
for c,actual in _self.ACTUAL.items():
quoted = ( actual ).join( quoted.split( c ) )
quoted = ( '\\' ).join( quoted.split( '\\\\' ) )
return quoted, rem
def fromString(s, keysList=True, rootSection='_'):
_self = Ini_Parser
comments = [';', '#']
if rootSection: rootSection = str(rootSection)
else: rootSection = '_'
if not _self.NL:
_self.NL = re.compile(r'\n\r|\r\n|\r|\n')
sections = {}
currentSection = str(rootSection)
if keysList:
sections[currentSection] = { '__list__' : [] }
else:
sections[currentSection] = { }
currentRoot = sections
# parse the lines
lines = re.split(_self.NL, str(s))
# parse it line-by-line
for line in lines:
# strip the line of extra spaces
line = line.strip()
lenline = len(line)
# comment or empty line, skip it
if not lenline or (line[0] in comments): continue
linestartswith = line[0]
# section line
if '['==linestartswith:
SECTION = True
# parse any sub-sections
while '['==linestartswith:
if SECTION:
currentRoot = sections
else:
currentRoot = currentRoot[currentSection]
SECTION = False
endsection = line.find(']', 1)
currentSection = line[1:endsection]
if currentSection not in currentRoot:
if keysList:
currentRoot[currentSection] = { '__list__' : [] }
else:
currentRoot[currentSection] = { }
# has sub-section ??
line = line[endsection+1:].strip()
if not len(line): break
linestartswith = line[0]
# key-value pairs
else:
# quoted string
if '"'==linestartswith or "'"==linestartswith:
key, line = _self.parseStr(line, linestartswith)
# key-value pair
if line.find('=', 0)>-1:
line = line.split('=')
line.pop(0)
value = "=".join(line).strip()
valuestartswith = value[0]
# quoted value
if '"'==valuestartswith or "'"==valuestartswith:
value, rem = _self.parseStr(value, valuestartswith)
currentRoot[currentSection][key] = value
# single value
else:
if keysList:
currentRoot[currentSection]['__list__'].append(key)
else:
currentRoot[currentSection][key] = True
# un-quoted string
else:
line = line.split('=')
key = line.pop(0).strip()
# single value
if 1>len(line):
if keysList:
currentRoot[currentSection]['__list__'].append(key)
else:
currentRoot[currentSection][key] = True
# key-value pair
else:
value = "=".join(line).strip()
valuestartswith = value[0]
# quoted value
if '"'==valuestartswith or "'"==valuestartswith:
value, rem = _self.parseStr(value, valuestartswith)
currentRoot[currentSection][key] = value
return sections
def fromFile(filename, keysList=True, rootSection='_'):
s = ''
with open(filename, 'r') as f: s = f.read()
return Ini_Parser.fromString(s, keysList, rootSection)
def walk(o, key=None, top='', q='', EOL="\n"):
s = ''
if len(o):
o = dict(o)
if key: keys = [key]
else: keys = o.keys()
for section in keys:
keyvals = o[section]
if not len(keyvals): continue
s += str(top) + "[" + str(section) + "]" + EOL
if ('__list__' in keyvals) and len(keyvals['__list__']):
# only values as a list
s += q + (q+EOL+q).join(keyvals['__list__']) + q + EOL
del keyvals['__list__']
if len(keyvals):
for k,v in keyvals.items():
if not len(v): continue
if isinstance(v, dict) or isinstance(v, list):
# sub-section
s += Ini_Parser.walk(keyvals, k, top + "[" + str(section) + "]", q, EOL)
else:
# key-value pair
s += q+k+q+ '=' +q+v+q + EOL
s += EOL
return s
def toString(o, rootSection='_', quote=False, EOL="\n"):
s = ''
if rootSection: root = str(rootSection)
else: root = '_'
if quote: q = '"'
else: q = ''
# dump the root section first, if exists
if root in o:
section = dict(o[root])
llist = None
if '__list__' in section:
llist = section['__list__']
if llist and isinstance(llist, list) and len(llist):
s += q + (q+EOL+q).join(llist) + q + EOL
del section['__list__']
for k,v in section.items():
if not len(v): continue
s += q+k+q+ '=' +q+v+q + EOL
s += EOL
del o[root]
# walk the sections and sub-sections, if any
s += Ini_Parser.walk(o, None, '', q, EOL)
return s
def toFile(filename, o, rootSection='_', quote=False, EOL="\n"):
with open(filename, 'w') as f:
f.write( Ini_Parser.toString(o, rootSection, quote, EOL) )
# for use with 'import *'
__all__ = [ 'Ini_Parser' ]
I want to return a dictionary that a file contains. What I have is this code:
def read_report(filename):
new_report = {}
input_filename = open(filename)
for line in input_filename:
lines = line[:-1]
new_report.append(lines)
input_filename.close()
return new_report
It says I can't append to a dictionary. So how would I go with adding lines from the file into the dictionary? Let's say my filename is this:
shorts: a, b, c, d
longs: a, b, c, d
mosts: a
count: 11
avglen: 1.0
a 5
b 3
c 2
d 1
I'm assuming the last lines of your files (the ones that don't contain :) are to be ignored.
from collections import defaultdict
d = defaultdict(list)
with open('somefile.txt') as f:
for line in f:
if ':' in line:
key, val = line.split(':')
d[key.strip()] += val.rstrip().split(',')
def read_line(filename):
list = []
new_report = {}
file_name = open(filename)
for i in file_name:
list.append(i[:-1])
for i in range(len(list)):
new_report[i] = list[i]
file_name.close()
return new_report
if you rewrite your input file to have uniform lines like the first and the second, you could try this:
EDIT: modified code to support also lines with space separator instead of colon (:)
def read_report(filename):
new_report = {}
f = open(filename)
for line in f:
if line.count(':') == 1:
key, value = line.split(':')
else:
key, value = line.split(' ')
new_report[key] = value.split(',')
f.close()
return new_report