Counting how many specific items exist in a YAML file? - python

I'm attempting to find out how many "usernames" exist. Currently there are two, and I can loop over users to get this, but that feels clunky. Is there a way to get how many usernames exist in user?
open('file.yaml', 'r') as f:
file = yaml.safe_load(f)
# count number of usernames in user...?
file.yaml:
host: "example.com"
timeout: 60
work:
-
processes: 1
users:
-
username: "me"
-
username: "notme"

If you want to get counts from your specific structure:
sum([len(x["users"]) for x in d["work"]])
For a general solution, you could do something like:
f = open("test.yaml")
d = yaml.safe_load(f)
# d is now a dict - {'host': 'example.com', 'work': [{'processes': 1, 'users': [{'username': 'me'}, {'username': 'notme'}]}], 'timeout': 60}
def yaml_count(d, s):
c = 0
if isinstance(d, dict):
for k, v in d.iteritems():
if k == s: c += 1
c += yaml_count(v, s)
elif isinstance(d, list):
for l in d:
c += yaml_count(l, s)
return c
yaml_count(d, "username") # returns 2

Related

string indices must be integers in Json

I am getting an Error in the by me created JSON by the message: string indices must be integers. I did read some topics on stackoverflow about this issue, but it is not clear for me what I have to change.
The issue is with items = json_data['items']
My code:
formattedUrl = ["https://stackoverflow.com/", "https://en.wikipedia.org/wiki/Main_Page"]
displayLink = ["https://stackoverflow.com/", "https://en.wikipedia.org/wiki/Main_Page"]
htmlFormattedUrl = ["https://stackoverflow.com/", "https://en.wikipedia.org/wiki/Main_Page"]
title = ["Stackoverflow", "Wikipedia, the free encyclopedia"]
htmlTitle = ["Stackoverflow", "Wikipedia, the free encyclopedia"]
snippet = ["Stack Overflow is the largest", "Main page"]
#
keys = ['formattedUrl', 'displayLink', 'htmlFormattedUrl', 'title', 'htmlTitle', 'snippet']
items = [dict(zip(keys, [u, t, d, aa, ab, ac])) for u, t, d, aa, ab, ac in
zip(formattedUrl, displayLink, htmlFormattedUrl, title, htmlTitle, snippet)]
d = {
'items': items
}
json_data = json.dumps(d, indent=4)
#queries
if has_result == 1 :
# print "results"
result = []
results = []
items = json_data['items']
Try this, You need to use json.loads to bring it into right format(i.e., dictionary)
>>> data = json.loads(json_data)
>>> data['items']
'items' # your output

How to add the value from list of tuples

I am extracting from the log file and print using the below code
for line in data:
g = re.findall(r'([\d.]+).*?(GET|POST|PUT|DELETE)', line)
print (g)
[('1.1.1.1', 'PUT')]
[('2.2.2.2', 'GET')]
[('1.1.1.1', 'PUT')]
[('2.2.2.2', 'POST')]
How to add to the output
output
1.1.1.1: PUT = 2
2.2.2.2: GET = 1,POST=1
You could use a dictionary to count:
# initialize the count dict
count_dict= dict()
for line in data:
g = re.findall(r'([\d.]+).*?(GET|POST|PUT|DELETE)', line)
for tup in g:
# get the counts for tuple tup if we don't have it yet
# use 0 (second argument to .get)
num= count_dict.get(tup, 0)
# increase the count and write it back
count_dict[tup]= num+1
# now iterate over the key (tuple) - value (counts)-pairs
# and print the result
for tup, count in count_dict.items():
print(tup, count)
Ok, I have to admit this doesn't give the exact output, you want, but from this you can do in a similar manner:
out_dict= dict()
for (comma_string, request_type), count in count_dict.items():
out_str= out_dict.get(comma_string, '')
sep='' if out_str == '' else ', '
out_str= f'{out_str}{sep}{request_type} = {count}'
out_dict[comma_string]= out_str
for tup, out_str in out_dict.items():
print(tup, out_str)
From your data that outputs:
1.1.1.1 PUT = 2
2.2.2.2 GET = 1, POST = 1
I would look towards Counter.
from collections import Counter
results = []
for line in data:
g = re.findall(r'([\d.]+).*?(GET|POST|PUT|DELETE)', line)
results.append(g[0])
ip_list = set(result[0] for result in results)
for ip in ip_list:
print(ip, Counter(result[1] for result in results if result[0] == ip ))
You can use collection.defaultdict
Ex:
from collections import defaultdict
result = defaultdict(list)
for line in data:
for ip, method in re.findall(r'([\d.]+).*?(GET|POST|PUT|DELETE)', line):
result[ip].append(method)
for k, v in result.items():
temp = ""
for i in set(v):
temp += " {} = {}".format(i, v.count(i))
print("{}{}".format(k, temp))
from collections import Counter
x = [[('1.1.1.1', 'PUT')],[('2.2.2.2', 'GET')],[('1.1.1.1', 'PUT')],[('2.2.2.2', 'POST')]]
# step 1: convert x into a dict.
m = {}
for i in x:
a, b = i[0]
if a not in m.keys():
m[a] = [b]
else:
x = m[a]
x.append(b)
m[a] = x
print('new dict is {}'.format(m))
# step 2 count frequency
m_values = list(m.values())
yy = []
for i in m_values:
x = []
k = list(Counter(i).keys())
v = list(Counter(i).values())
for i in range(len(k)):
x.append(k[i] + '=' + str(v[i]))
yy.append(x)
# step 3, update the value of the dict
m_keys = list(m.keys())
n = len(m_keys)
for i in range(n):
m[m_keys[i]] = yy[i]
print("final dict is{}".format(m))
Output is
new dict is {'1.1.1.1': ['PUT', 'PUT'], '2.2.2.2': ['GET', 'POST']}
final dict is{'1.1.1.1': ['PUT=2'], '2.2.2.2': ['GET=1', 'POST=1']}
Without dependencies and using a dict for counting, in a very basic way. Given the data_set:
data_set = [[('1.1.1.1', 'PUT')],
[('2.2.2.2', 'GET')],
[('2.2.2.2', 'POST')],
[('1.1.1.1', 'PUT')]]
Initialize the variables (manually, just few verbs) then iterate over the data:
counter = {'PUT': 0, 'GET': 0, 'POST': 0, 'DELETE': 0}
res = {}
for data in data_set:
ip, verb = data[0]
if not ip in res:
res[ip] = counter
else:
res[ip][verb] += 1
print(res)
#=> {'1.1.1.1': {'PUT': 1, 'GET': 0, 'POST': 1, 'DELETE': 0}, '2.2.2.2': {'PUT': 1, 'GET': 0, 'POST': 1, 'DELETE': 0}}
It's required to format the output to better fits your needs.

create a list of list of parameters from a file

Hi i am trying to create a list of parameters from a file
The final result should be something like
param=[[field],[units],[height],[site]]
The problem is that the information is split into lines and some of the parameters do not have all the information
#info in the file
[field1]
unit=m/s
height=70.4
site=site1
[field2]
height=20.6
site=site2
[field3]
units=m
...
so i would like to fulfill all the fields in such a way that, if there is not information assigns 0 or ''
Final result in the example
param={field1:'m/s',70.4,'site1',field2:'',20.6,site2, field3:'m',0,''}
I know how to create a dictionary from list of lists but not to set default values ('' for the strings values an 0 for the numeric ones) in case some values are missing
Thanks
You could group using a defaultdict:
from collections import defaultdict
with open("test.txt") as f:
d = defaultdict(list)
for line in map(str.rstrip, f):
if line.startswith("["):
d["fields"].append(line.strip("[]"))
else:
k,v = line.split("=")
d[k].append(v)
Input::
[field1]
unit=m/s
height=70.4
site=site1
[field2]
height=20.6
site=site2
[field3]
unit=m
height=6.0
site=site3
Output:
defaultdict(<type 'list'>, {'fields': ['field1', 'field2', 'field3'],
'site': ['site1', 'site2', 'site3'], 'unit': ['m/s', 'm'],
'height': ['70.4', '20.6', '6.0']})
If you actually want to group by field, you can use itertools.groupby grouping on lines that start with [:
from itertools import groupby
with open("test.txt") as f:
grps, d = groupby(map(str.rstrip,f), key=lambda x: x.startswith("[")), {}
for k,v in grps:
if k:
k, v = next(v).strip("[]"), list(next(grps)[1])
d[k] = v
print(d)
Output:
{'field2': ['height=20.6', 'site=site2'],
'field3': ['unit=m', 'height=6.0', 'site=site3'],
'field1': ['unit=m/s', 'height=70.4', 'site=site1']}
Each k is a line starting with [, we then call next on the grouper object to get all the lines up to the next line starting with [ or the EOF:
This would fill in the missing information.
f= open('file.txt','r')
field, units, height, site = [],[],[],[]
param = [ field, units, height, site]
lines = f.readlines()
i=0
while True:
try:
line1 = lines[i].rstrip()
if line1.startswith('['):
field.append(line1.strip('[]'))
else:
field.append(0)
i-= 1
except:
field.append(0)
try:
line2 = lines[i+1].rstrip()
if line2.startswith('unit') or line2.startswith('units'):
units.append(line2.split('=')[-1])
else:
units.append('')
i-=1
except:
units.append('')
try:
line3 = lines[i+2].rstrip()
if line3.startswith('height'):
height.append(line3.split('=')[-1])
else:
height.append(0)
i-=1
except:
height.append(0)
try:
line4 = lines[i+3].rstrip()
if line4.startswith('site'):
site.append(line4.split('=')[-1])
else:
site.append('')
except:
site.append('')
break
i +=4
Output:
param:
[['field1', 'field2', 'field3'],
['m/s', '', 'm'],
['70.4', '20.6', 0],
['site1', 'site2', '']]

Dot notation to Json in python

I receive data from the Loggly service in dot notation, but to put data back in, it must be in JSON.
Hence, I need to convert:
{'json.message.status.time':50, 'json.message.code.response':80, 'json.time':100}
Into:
{'message': {'code': {'response': 80}, 'status': {'time': 50}}, 'time': 100}
I have put together a function to do so, but I wonder if there is a more direct and simpler way to accomplish the same result.
def dot_to_json(a):
# Create root for JSON tree structure
resp = {}
for k,v in a.items():
# eliminate json. (if metric comes from another type, it will keep its root)
k = re.sub(r'\bjson.\b','',k)
if '.' in k:
# Field has a dot
r = resp
s = ''
k2 = k.split('.')
l = len(k2)
count = 0
t = {}
for f in k2:
count += 1
if f not in resp.keys():
r[f]={}
r = r[f]
if count < l:
s += "['" + f + "']"
else:
s = "resp%s" % s
t = eval(s)
# Assign value to the last branch
t[f] = v
else:
r2 = resp
if k not in resp.keys():
r2[k] = {}
r2[k] = v
return resp
You can turn the path into dictionary access with:
def dot_to_json(a):
output = {}
for key, value in a.iteritems():
path = key.split('.')
if path[0] == 'json':
path = path[1:]
target = reduce(lambda d, k: d.setdefault(k, {}), path[:-1], output)
target[path[-1]] = value
return output
This takes the key as a path, ignoring the first json part. With reduce() you can walk the elements of path (except for the last one) and fetch the nested dictionary with it.
Essentially you start at output and for each element in path fetch the value and use that value as the input for the next iteration. Here dict.setdefault() is used to default to a new empty dictionary each time a key doesn't yet exist. For a path ['foo', 'bar', 'baz'] this comes down to the call output.setdefault('foo', {}).setdefault('bar', {}).setdefault('baz', {}), only more compact and supporting arbitrary length paths.
The innermost dictionary is then used to set the value with the last element of the path as the key.
Demo:
>>> def dot_to_json(a):
... output = {}
... for key, value in a.iteritems():
... path = key.split('.')[1:] # ignore the json. prefix
... target = reduce(lambda d, k: d.setdefault(k, {}), path[:-1], output)
... target[path[-1]] = value
... return output
...
>>> dot_to_json({'json.message.status.time':50, 'json.message.code.response':80, 'json.time':100}))
{'message': {'status': {'time': 50}, 'code': {'response': 80}}, 'time': 100}

Split dictionary key and list of values from dict

I want to split keys and values and display the dictionary result below mentioned format. I'm reading a file and splitting the data into list and later moving to dictionary.
Please help me to get the result.
INPUT FILE - commands.txt
login url=http://demo.url.net username=test#url.net password=mytester
create-folder foldername=demo
select-folder foldername=test123
logout
Expected result format
print result_dict
"0": {
"login": [
{
"url": "http://demo.url.net",
"username": "test#url.net",
"password": "mytester"
}
]
},
"1": {
"create-folder": {
"foldername": "demo"
}
},
"2": {
"select-folder": {
"foldername": "test-folder"
}
},
"3": {
"logout": {}
}
CODE
file=os.path.abspath('catalog/commands.txt')
list_output=[f.rstrip().split() for f in open(file).readlines()]
print list_output
counter=0
for data in list_output:
csvdata[counter]=data[0:]
counter=counter+1
print csvdata
for key,val in csvdata.iteritems():
for item in val:
if '=' in item:
key,value=item.split("=")
result[key]=value
print result
As a function:
from collections import defaultdict
from itertools import count
def read_file(file_path):
result = defaultdict(dict)
item = count()
with open(file_path) as f:
for line in f:
if not line:
continue
parts = line.split()
result[next(item)][parts[0]] = dict(p.split('=') for p in parts[1:])
return dict(result)
Better example and explanation:
s = """
login url=http://demo.url.net username=test#url.net password=mytester
create-folder foldername=demo
select-folder foldername=test123
logout
"""
from collections import defaultdict
from itertools import count
result_dict = defaultdict(dict)
item = count()
# pretend you opened the file and are reading it line by line
for line in s.splitlines():
if not line:
continue # skip empty lines
parts = line.split()
result_dict[next(item)][parts[0]] = dict(p.split('=') for p in parts[1:])
With pretty print:
>>> pprint(dict(result_dict))
{0: {'login': {'password': 'mytester',
'url': 'http://demo.url.net',
'username': 'test#url.net'}},
1: {'create-folder': {'foldername': 'demo'}},
2: {'select-folder': {'foldername': 'test123'}},
3: {'logout': {}}}
lines = ["login url=http://demo.url.net username=test#url.net password=mytester",
"create-folder foldername=demo",
"select-folder foldername=test123",
"logout"]
result = {}
for no, line in enumerate(lines):
values = line.split()
pairs = [v.split('=') for v in values[1:]]
result[str(no)] = {values[0]: [dict(pairs)] if len(pairs) > 1 else dict(pairs)}
import pprint
pprint.pprint(result)
Output:
{'0': {'login': [{'password': 'mytester',
'url': 'http://demo.url.net',
'username': 'test#url.net'}]},
'1': {'create-folder': {'foldername': 'demo'}},
'2': {'select-folder': {'foldername': 'test123'}},
'3': {'logout': {}}}
But are you sure you need the extra list inside the login value? If not, just change [dict(pairs)] if len(pairs) > 1 else dict(pairs) to dict(pairs).
r = dict()
f = open('commands.txt')
for i, line in enumerate(f.readlines()):
r[str(i)] = dict()
actions = line.split()
list_actions = {}
for action in actions[1:]:
if "=" in action:
k, v = action.split('=')
list_actions[k] = v
if len(actions[1:]) > 1:
r[str(i)][actions[0]] = [list_actions]
else:
r[str(i)][actions[0]] = list_actions
print r
Should be work

Categories