I have SCIM search request body like this,
{
"schemas": ["urn:ietf:params:scim:api:messages:2.0:SearchRequest"],
"attributes": ["displayName", "userName"],
"excludedAttributes": ["emails"],
"filter":"displayName sw \"smith\"",
"startIndex": 1,
"count": 10,
"sortBy": "userName",
"sortOrder": "ascending"
}
all the above attributes are optional, except for "schemas" attribute.
because, all the attributes are optional i have construct query accordingly.
below is the code for this handling, as you can see there are conditions which make code look untidy.
data = request.get_json()
a = {}
attributes = data.get('attributes',[])
sortby = data.get('sortBy',None)
sortorder = data.get('sortOrder',None)
if not attributes:
pass
else:
for i in attributes:
if i not in a:
a[i]=1
excludedAttributes = data.get('excludedAttributes',[])
if not excludedAttributes:
pass
else:
for i in excludedAttributes:
if i not in a:
a[i]=0
if not a and not sortby:
result = mongo.db.test.find({}, )
if a and not sortby:
result = mongo.db.test.find({}, a)
if not a and sortby and not sortorder:
result = mongo.db.test.find({}, ).sort([(sortby,flask_pymongo.ASCENDING)])
if a and sortby and not sortorder:
result = mongo.db.test.find({}, a).sort([(sortby, flask_pymongo.ASCENDING)])
if not a and sortby and sortorder=='ascending':
result = mongo.db.test.find({}, ).sort([(sortby, flask_pymongo.ASCENDING)])
if a and sortby and not sortorder=='ascending':
result = mongo.db.test.find({}, a).sort([(sortby, flask_pymongo.ASCENDING)])
if not a and sortby and sortorder=='descending':
result = mongo.db.test.find({}, ).sort([(sortby, flask_pymongo.DESCENDING)])
if a and sortby and not sortorder=='descending':
result = mongo.db.test.find({}, a).sort([(sortby, flask_pymongo.DESCENDING)])
for i in result:
full_data.append(i)
resp = jsonify(json.loads(dumps(full_data)))
return resp
if i include even pagination, there will be even more conditions piling up.
How do i construct these queries effectively.
data = request.get_json()
a = {}
attributes = data.get('attributes',[])
sortby = data.get('sortBy',None)
sortorder = data.get('sortOrder',None)
if not attributes:
pass
else:
for i in attributes:
if i not in a:
a[i]=1
excludedAttributes = data.get('excludedAttributes',[])
if not excludedAttributes:
pass
else:
for i in excludedAttributes:
if i not in a:
a[i]=0
result = mongo.db.test.find({}, )
if a:
result = mongo.db.test.find({}, a)
if sortby:
if sortorder == "descending":
sortorder = flask_pymongo.DESCENDING
else:
sortorder = flask_pymongo.ASCENDING
result = result.sort([(sortby, sortorder)])
for i in result:
full_data.append(i)
resp = jsonify(json.loads(dumps(full_data)))
return resp
Folks,
I am trying to parse log file into json format.
I have a lot of logs, there is one of them
How can I parse this?
03:02:03.113 [info] ext_ref = BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}
Into this type of json, or any other format (but I guess json is best one)
{
"time":"03:02:03.113",
"class_req":"info",
"ext_ref":"BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ",
"reqid":"1253166",
"type":"INREQ",
"channel":"BANK24AOS",
"sid":"msid_1591844511335516_KRRNBSLH2FS",
"duration":"703.991",
"req_uri":"marketcredit/order/state",
"login":"77012221122",
"req_type":"cl_req",
"req_headers":{
"accept-encoding":"gzip",
"connection":"close",
"host":"test-mobileapp-api.bank.kz",
"user-agent":"okhttp/4.4.1",
"x-forwarded-for":"212.154.169.134",
"x-real-ip":"212.154.169.134"
},
"req_body":{
"$sid":"msid_1591844511335516_KRRNBSLH2FS",
"$sid":"msid_1591844511335516_KRRNBSLH2FS",
"app":"bank",
"app_version":"2.3.2",
"channel":"aos",
"colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv",
"colvir_commercial_id":"-1",
"colvir_id":"000120.335980",
"openway_commercial_id":"6247520",
"openway_id":"6196360",
"$lang":"ru",
"ekb_id":"923243",
"inn":"990830221722",
"login":"77012221122",
"bank24_id":"262"
},
"resp_body":{
"task_id":"",
"status":"success",
"data":{
"state":"init",
"applications":[
{
"status":"init",
"id":"123db561-34a3-4a8d-9fa7-03ed6377b44f",
"name":"Sulpak",
"amount":101000,
"items":[
{
"name":"Switch CISCO x24",
"price":100000,
"count":1,
"amount":100000
}
]
}
],
"segment":{
"range":{
"min":6,
"max":36,
"step":1
},
"payment_day":{
"max":28,
"min":1
}
}
}
}
}
I am trying to split first whole text, but there I met another problem is to match keys to values depending on '=' sign. Also there might be some keys with empty values. For ex.:
type = INREQ channel = sid = duration = 1.333 (to get to know that there is an empty value, you need to pay attention on number of spaces. Usually there is 1 space between prev.value and next key). So this example should look like this:
{
"type":"INREQ",
"channel":"",
"sid":"",
"duration":"1.333"
}
Thanks ahead!
Here, one thing pass for duplicate key about "$sid":"msid_1591844511335516_KRRNBSLH2FS"
import re
text = """03:02:03.113 [info] ext_ref = reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}"""
index1 = text.index('[')
index2 = text.index(']')
new_text = 'time = '+ text[:index1-1] + ' class_req = ' + text[index1+1:index2] + text[index2+2:]
lst = re.findall(r'\S+? = |\S+? = \{.*?\} |\S+? = \{.*?\}$|\S+? = \S+? ', new_text)
res = {}
for item in lst:
key, equal, value = item.partition('=')
key, value = key.strip(), value.strip()
if value.startswith('{'):
try:
value = json.loads(value)
except:
print(value)
res[key] = value
you can try regulation in python.
here is what i write, it works for your problem.
for convenience i deleted string before "ext_ref...",you can directly truncate the raw string.
import re
import json
string = 'ext_ref = BANK24AOS_cl_reqmarketcreditorderstate_6M8I1NT8JKYD_1591844522410384_4SGA08M8KIXQ reqid = 1253166 type = INREQ channel = BANK24AOS sid = msid_1591844511335516_KRRNBSLH2FS duration = 703.991 req_uri = marketcredit/order/state login = 77012221122 req_type = cl_req req_headers = {"accept-encoding":"gzip","connection":"close","host":"test-mobileapp-api.bank.kz","user-agent":"okhttp/4.4.1","x-forwarded-for":"212.154.169.134","x-real-ip":"212.154.169.134"} req_body = {"$sid":"msid_1591844511335516_KRRNBSLH2FS","$sid":"msid_1591844511335516_KRRNBSLH2FS","app":"bank","app_version":"2.3.2","channel":"aos","colvir_token":"GExPR0lOX1BBU1NXT1JEX0NMRUFSVEVYVFNzrzh4Thk1+MjDKWl/dDu1fQPsJ6gGLSanBp41yLRv","colvir_commercial_id":"-1","colvir_id":"000120.335980","openway_commercial_id":"6247520","openway_id":"6196360","$lang":"ru","ekb_id":"923243","inn":"990830221722","login":"77012221122","bank24_id":"262"} resp_body = {"task_id":"","status":"success","data":{"state":"init","applications":[{"status":"init","id":"123db561-34a3-4a8d-9fa7-03ed6377b44f","name":"Sulpak","amount":101000,"items":[{"name":"Switch CISCO x24","price":100000,"count":1,"amount":100000}]}],"segment":{"range":{"min":6,"max":36,"step":1},"payment_day":{"max":28,"min":1}}}}'
position = re.search("req_headers",string) # position of req_headers
resp_body_pos = re.search("resp_body",string)
resp_body = string[resp_body_pos.span()[0]:]
res1 = {}
res1.setdefault(resp_body.split("=")[0],resp_body.split("=")[1])
print(res1)
before = string[:position.span()[0]]
after = string[position.span()[0]:resp_body_pos.span()[0]] # handle req_body seperately
res2 = re.findall("(\S+) = (\S+)",before)
print(res2)
res3 = re.findall("(\S+) = ({.*?})",after)
print(res3)
#res1 type: dict{'resp_body':'...'} content in resp_body
#res2 type: list[(),()..] content before req_head
#res3 type: list[(),()..] the rest content
and now you can do what you want to do with the data(.e.g. transform it into json respectively)
Hope this is helpful
I am trying to read through a csv file in the following format:
number,alphabet
1,a
2,b
3,c
2,b
1,a
My code to create a dictionary:
alpha = open('alpha.csv','r')
csv_alpha = csv.reader(alpha)
alpha_file = {row[0]:row[1] for row in csv_alpha}
OUTPUT:
alpha_file = { 1:'a', 2:'b', 3:'c' }
By looking at the file, 1 and 2 have duplicate values.
How can i possibly change my output to :
alpha_file = { 1:'a', 1:'a', 2:'b', 2:'b', 3:'c' }
LNG - PYTHON
use a list to hold key's value
alpha = open('alpha.csv','r')
csv_alpha = csv.reader(alpha)
alpha_file = dict()
for row in csv_alpha:
if row[0] in alpha_file:
alpha_file[row[0]].append(row[1])
else:
alpha_file[row[0]] = [row[1]]
the output will be like:
{ 1:['a','a'],2:['b','b'], 3:['c'] }
to output the number of key occurrences, use a for loop
d = { 1:['a','a'],2:['b','b'], 3:['c'] }
amount = []
for key, value in d.iteritems():
amount += [key] * len(value)
print amount
output looks like:
[1, 1, 2, 2, 3]
I have a json file with objects and a text file with several groups (Each group have 5 numbers and I have them in a list this way: the first number of each group are in list 1, the second number of each group, are in list 2, etc). I basically have to match each object of the json with each group I created. The problem is that Im getting as result the last element from the Json. The groups from the text file are created in the correct way.
This is my code:
import json
NUM_LIST = 5
index = 0
def report(a, b, c, d, e, index):
json_file = 'json_global.json'
json_data = open(json_file)
data = json.load(json_data)
i = 0
index = 0
item = 0
cmd = " "
ind = 0
for node in data:
for i in range(0, 5):
item = data[i]['item']
cmd = data[i]['command']
index+= 1
print item, cmd, a, b, c, d, e
f = open("Output.txt", "r")
lines = [line.rstrip() for line in f if line != "\n"]
NUM_LISTS = 5
groups = [[] for i in range(NUM_LISTS)]
listIndex = 0
for line in lines:
if "Transactions/Sec for Group" not in line:
groups[listIndex].append(float(line))
listIndex += 1
if listIndex == NUM_LISTS:
listIndex = 0
value0 = groups[0]
value1 = groups[1]
value2 = groups[2]
value3 = groups[3]
value4 = groups[4]
for i in range(0, 5):
a = value0[i]
b = value1[i]
c = value2[i]
d = value3[i]
e = value4[i]
i += 1
report(a, b, c, d, e, index)
The Json file looks like:
[
{
"item": 1,
"command": "AA"
},
{
"item": 2,
"command": "BB",
},
{
"item": 3,
"command": "CC",
},
{
"item": 4,
"command": "DD",
},
{
"item": 5,
"command": "EE",
}
]
The text file looks like this:
Transactions/Sec for Group = AA\CODE1\KK
1011.5032
2444.8864
2646.6893
2740.8531
2683.8178
Transactions/Sec for Group = BB\CODE1\KK
993.2360
2652.8784
3020.2740
2956.5260
3015.5910
Transactions/Sec for Group = CC\CODE1\KK
1179.5766
3271.5700
4588.2059
4174.6358
4452.6785
Transactions/Sec for Group = DD\CODE1\KK
1112.2567
3147.1466
4014.8404
3913.3806
3939.0626
Transactions/Sec for Group = EE\CODE1\KK
1205.8499
3364.8987
4401.1702
4747.4354
4765.7614
The logic in the body of the program works fine. The groups appears ok, but instead of having the list from 1 to 5 from the Json file, is appearing everything with the number 5 command EE. Instead should appear: Item 1, 2, 3, 4, 5, with their commands
My list 1 will have the numbers: 1011.5032, 993.2360, 1179.5766, 1112.2567, 1205.8499.
My list 2 will have the numbers: 2444.8864, 2652.8784, 3271.5700, 3147.1466,
The python version I'm using is 2.6
Based on your explanation it's hard to tell what you're trying to do -- do you mean the nested loop below? The inner loop executes 5 times, but in every iteration it overwrites the previous values for item and cmd.
for node in data:
for i in range(0, 5):
item = data[i]['item']
cmd = data[i]['command']
index+= 1
Try printing the values each time the inner loop executes:
for node in data:
for i in range(0, 5):
item = data[i]['item']
cmd = data[i]['command']
print item, cmd
index+= 1
I think this code is your problem:
for node in data:
for i in range(0, 5):
item = data[i]['item']
cmd = data[i]['command']
Item will always be "5" and command will always be "EE" after this executes. Perhaps your indents are off for the code beneath it, and that code is supposed to be within the loop?
Using the python/thrift interface I am trying to insert a SuperColumn just like the Comments example in WTF is a Supercolumn..
I've gotten as far as to create the SuperColumn and figured out that I should use batch_mutate to insert it. But I don't know how to create the Mutation and set the key and SuperColumn type
keyspace = "Keyspace1"
col1 = Column(name = "commenter", value = "J Doe", timestamp = time.time())
col2 = Column(name = "email", value = "jdoe#example.com", timestamp = time.time())
sc = SuperColumn(name = str(uuid.uuidl()), [col1, col2])
# i am guessing the missing code goes here
mutation = Mutation(column_or_supercolumn = sc?)
client.batch_mutate(keyspace, mutation, ConsistencyLevel.ZERO)
I would use pycassa or something to make life easier, but something like:
keyspace = "Keyspace1"
tableName = "Super1"
key = "jdoe"
col1 = Column(name = "commenter", value = "J Doe", timestamp = time.time())
col2 = Column(name = "email", value = "jdoe#example.com", timestamp = time.time())
newData = [Mutation(ColumnOrSuperColumn(None,
SuperColumn(str(uuid.uuidl()),
[col1, col2])))]
dataMap = {key : {tableName : newData}}
client.batch_mutate(keyspace=keyspace,
mutation_map=dataMap,
consistency_level=ConsistencyLevel.ZERO)