Python equivalent of PHP http_build_query - python

Here is the PHP code that I want to write in Python.
<?php
$json = '{
"targeting": [
{
"country": {
"allow": [
"US",
"DE"
]
},
"region" : {
"allow" : {
"US" : [
33
],
"DE" : [
10383
]
}
},
"city": {
"allow": {
"US": [
57
],
"DE": [
3324
]
}
},
"os": {
"allow": [
{
"name": "Android",
"comparison": "GTE",
"version": "2.3.1"
},
{
"name": "Apple TV Software",
"comparison": "EQ",
"version": "4.4"
},
{
"name": "Windows",
"comparison": "EQ",
"version": "Vista"
}
]
},
"isp" : {
"allow" : {
"US" : [
"Att"
],
"DE" : [
"Telekom"
]
}
},
"ip": {
"allow": [
"11.12.13.0-17.18.19.22",
"6.0.0.0",
"10.0.0.0-10.0.0.2",
"11.0.0.0/24"
]
},
"device_type": [
"mobile"
],
"browser": {
"allow": [
"Yandex.Browser for iOS",
"SlimBrowser",
"Edge Mobile"
]
},
"brand": {
"allow": [
"Smartbook Entertainment",
"Walton",
"PIPO"
]
},
"sub": {
"allow": {
"1": [
"A",
"B"
]
},
"deny": {
"2": [
"C",
"D"
]
},
"deny_groups": [
{
"1": ""
},
{
"1": "X",
"2": "Y"
}
]
},
"connection": [
"wi-fi",
"cellular"
],
"block_proxy": true,
"affiliate_id": [
1
],
"url": "http://test-url.com"
}
]
}';
$arr = json_decode($json);
$postData = http_build_query($arr);
//POST SomeURLhere
echo urldecode($arr);
What I need is to send this json in this format
targeting[0][country][allow][]=TR
targeting[0][os][allow][][name]=iOS
targeting[1][country][allow][]=DE
targeting[1][os][allow][][name]=iOS
I guess I need to figure out how to use http_build_query in Python.

with referring this answer I found the solution.
from collections.abc import MutableMapping
from urllib.parse import urlencode, unquote
def flatten(dictionary, parent_key=False, separator='.', separator_suffix=''):
"""
Turn a nested dictionary into a flattened dictionary
:param dictionary: The dictionary to flatten
:param parent_key: The string to prepend to dictionary's keys
:param separator: The string used to separate flattened keys
:return: A flattened dictionary
"""
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key + separator_suffix if parent_key else key
if isinstance(value, MutableMapping):
items.extend(flatten(value, new_key, separator, separator_suffix).items())
elif isinstance(value, list) or isinstance(value, tuple):
for k, v in enumerate(value):
items.extend(flatten({str(k): v}, new_key, separator, separator_suffix).items())
else:
items.append((new_key, value))
return dict(items)
req = {'check': 'command', 'parameters': ({'parameter': '1', 'description':
'2'}, {'parameter': '3', 'description': '4'})}
req = flatten(req, False, '[', ']')
query = urlencode(req)
query_parsed = unquote(query)
print(query)
print(query_parsed)
And the outputs:
check=command&parameters%5B0%5D%5Bparameter%5D=1&parameters%5B0%5D%5Bdescription%5D=2&parameters%5B1%5D%5Bparameter%5D=3&parameters%5B1%5D%5Bdescription%5D=4
check=command&parameters[0][parameter]=1&parameters[0][description]=2&parameters[1][parameter]=3&parameters[1][description]=4

Related

How to update values in a nested dictionary?

I have 2 dictionaries:
data = {
"filter":
{
"and":
[
{
"or":
[
{
"and":
[
{"category": "profile", "key": "languages", "operator": "IN", "value": "EN"},
{"category": "skill", "key": "26366", "value": 100, "operator": "EQ"},
],
},
],
},
{"or": [{"category": "skill", "key": "45165", "operator": "NE"}]},
{"or": [{"category": "skill", "key": "48834", "value": 80, "operator": "GT"}]},
{"or": [{"category": "profile", "key": "gender", "operator": "EQ", "value": "FEMALE"}]},
],
},
}
new_val = {'26366': '11616', '45165': '11613', '48834': '11618'}
I want to update values in "data" dictionary with the values from "new_val" dictionary.
So that 26366(in "data" dict) becomes 11616(from "new_val" dict), 45165 becomes 11613, and 48834 becomes 11618.
"data" dictionary nesting can be different (both up and down)
The key in the "data" dictionary can be different, not only "key", it can be "skill_id", "filter_id" and so on.
And get this result:
{
"filter":
{
"and":
[
{
"or":
[
{
"and":
[
{"category": "profile", "key": "languages", "operator": "IN", "value": "EN"},
{"category": "skill", "key": "11616", "value": 100, "operator": "EQ"},
],
},
],
},
{"or": [{"category": "skill", "key": "11613", "operator": "NE"}]},
{"or": [{"category": "skill", "key": "11618", "value": 80, "operator": "GT"}]},
{"or": [{"category": "profile", "key": "gender", "operator": "EQ", "value": "FEMALE"}]},
],
},
}
To return an updated dict without modifying the old one:
def updated_in_depth(d, replace):
if isinstance(d, dict):
return {k: updated_in_depth(v, replace)
for k,v in d.items()}
elif isinstance(d, list):
return [updated_in_depth(x, replace) for x in d]
else:
return replace.get(d, d)
Testing with your data and new_val:
>>> updated_in_depth(data, new_val)
{'filter': {'and': [{'or': [{'and': [
{'category': 'profile', 'key': 'languages', 'operator': 'IN', 'value': 'EN'},
{'category': 'skill', 'key': '11616', 'value': 100, 'operator': 'EQ'}]}]},
{'or': [{'category': 'skill', 'key': '11613', 'operator': 'NE'}]},
{'or': [{'category': 'skill', 'key': '11618', 'value': 80, 'operator': 'GT'}]},
{'or': [{'category': 'profile', 'key': 'gender', 'operator': 'EQ', 'value': 'FEMALE'}]}]}}
Use something like this:
data['filter']['and']['or']['and'][1]['key']='11616'
To search for the keys recursively you can do:
from copy import deepcopy
def replace(d, new_vals):
if isinstance(d, dict):
# replace key (if there's match):
if "key" in d:
d["key"] = new_vals.get(d["key"], d["key"])
for v in d.values():
replace(v, new_vals)
elif isinstance(d, list):
for v in d:
replace(v, new_vals)
new_data = deepcopy(data)
replace(new_data, new_val)
print(new_data)
Prints:
{
"filter": {
"and": [
{
"or": [
{
"and": [
{
"category": "profile",
"key": "languages",
"operator": "IN",
"value": "EN",
},
{
"category": "skill",
"key": "11616",
"value": 100,
"operator": "EQ",
},
]
}
]
},
{"or": [{"category": "skill", "key": "11613", "operator": "NE"}]},
{
"or": [
{
"category": "skill",
"key": "11618",
"value": 80,
"operator": "GT",
}
]
},
{
"or": [
{
"category": "profile",
"key": "gender",
"operator": "EQ",
"value": "FEMALE",
}
]
},
]
}
}
If you don't need copy of data you can omit the deepcopy:
replace(data, new_val)
print(data)
You can build a recursive function like this
def walk_dict(d):
if isinstance(d, list):
for item in d:
walk_dict(item)
elif isinstance(d, dict):
if 'key' in d and d['key'] in new_val:
d['key'] = new_val[d['key']]
for k, v in d.items():
walk_dict(v)
walk_dict(data)
print(data)
As many have advised, a recursive function will do the trick:
def a(d):
if isinstance(d, dict): # if dictionary, apply a to all values
d = {k: a(d[k]) for k in d.keys()}
return d
elif isinstance(d, list): # if list, apply to all elements
return [a(x) for x in d]
else: # apply to d directly (it is a number, a string or a bool)
return new_val[d] if d in new_val else d
When a is called, it check what is the type of the variable d:
if d is a list, it apply a to each element of the list and return the updated list
if d is a dict, it applies a to all values and return the updated dict
otherwise, it returns the mapped new value if the old one has been found in the new_val keys
data = {
"filter":
{
"and":
[
{
"or":
[
{
"and":
[
{"category": "profile", "key": "languages", "operator": "IN", "value": "EN"},
{"category": "skill", "key": "11616", "value": 100, "operator": "EQ"},
],
},
],
},
{"or": [{"category": "skill", "key": "11613", "operator": "NE"}]},
{"or": [{"category": "skill", "key": "11618", "value": 80, "operator": "GT"}]},
{"or": [{"category": "profile", "key": "gender", "operator": "EQ", "value": "FEMALE"}]},
],
},
}
class Replace:
def __init__(self,data):
self.data=data
def start(self,d):
data = self.data
def replace(data):
if type(data) == list:
for v in data:
replace(v)
if type(data) == dict:
for k,v in data.items():
if type(v) == dict:
replace(v)
if type(v) == str:
if v in d:
data[k] = d[v]
replace(data)
return data
new_data = Replace(data).start({'26366': '11616',
'45165': '11613',
'48834': '11618'})
print(new_data)

Python Extracting nested values from a json string using pandas

Below is a nested json I am using:
{
"9": {
"uid": "9",
"name": "pedro",
"mail": "pedro#pedro.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
},
"10": {
"uid": "10",
"name": "Rosa",
"mail": "rosa#rosa.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
},
"11": {
"uid": "11",
"name": "Tania",
"mail": "tania#tania.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
}
}
Each first key is different from the rest. I need to extract the information between each of the keys, e.g. uid, name, mail, etc but not interested on the key id (9,10,11). Is there any way to achieve this without passing the key id on the code?
Below is what I’ve attempted thus far:
import json
outputuids = {
"9": {
"uid": "9",
"name": "pedro",
"mail": "pedro#pedro.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
},
"10": {
"uid": "10",
"name": "Rosa",
"mail": "rosa#rosa.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
},
"11": {
"uid": "11",
"name": "Tania",
"mail": "tania#tania.com",
"roles": [
"authenticated",
"administrator"
],
"user_status": "1"
}
}
data1 = json.loads(outputuids)
for i in data1:
fuid=data1['9']['uid']
fname=data1['9']['name']
print (fuid + fname)
Pandas is overkill for this task. You can iterate over outputuids.values() to avoid having to explicitly refer to the keys of the dictionary:
result = []
keys_to_retain = {"uid", "name", "mail"}
for val in outputuids.values():
result.append({k: v for k, v in val.items() if k in keys_to_retain})
print(result)
This outputs:
[
{'uid': '9', 'name': 'pedro', 'mail': 'pedro#pedro.com'},
{'uid': '10', 'name': 'Rosa', 'mail': 'rosa#rosa.com'},
{'uid': '11', 'name': 'Tania', 'mail': 'tania#tania.com'}
]

Remove Dictionary from list when a condition is met with python

I have the following list:
[
{
"name": "Book1",
"details": [
{
"id": 30278752,
"isbn": " 1594634025",
"average_rating": " 3.92"
}
]
},
{
"name": "Book2",
"details": [
{
"isbn": " 1501173219",
"average_rating": "4.33 "
}
]
}
]
I want to delete a whole dictionary when there is no id in it.How can i do this please?
I tried this solution:
final = filter(lambda x: x['id'] in details, var)
But this is not working for me
You can use list comprehensions and do something like:
myList = [
{
"name": "Book1",
"details": [
{
"id": 30278752,
"isbn": " 1594634025",
"average_rating": " 3.92"
}
]
},
{
"name": "Book2",
"details": [
{
"isbn": " 1501173219",
"average_rating": "4.33 "
}
]
}
]
[x for x in myList if "id" in x["details"][0]]
Output
[{'details': [{'average_rating': ' 3.92',
'id': 30278752,
'isbn': ' 1594634025'}],
'name': 'Book1'}]

Parsing through Nested Json/dict in Python

dealing with a nasty bit of JSON. I am using json.load to write into a file and have it stored is a dict type , printed below. In python, how would I go about getting a list of just the "dimension" values starting after ""false_value"" (as they first dimension value is not actually a value I want).
I tried kind of a hacky way, but feel like someone may have a perspective on how to do this in a more eloquent fashion.
Goal, make list of all the dimension values (outside the first) such as ( '100', '121' ...)
{
"reports": [
{
"columnHeader": {
"dimensions": [
"ga:clientId"
],
"metricHeader": {
"metricHeaderEntries": [
{
"name": "blah",
"type": "INTEGER"
}
]
}
},
"data": {
"rows": [
{
"dimensions": [
"false_value"
],
"metrics": [
{
"values": [
"2"
]
}
]
},
{
"dimensions": [
"100"
],
"metrics": [
{
"values": [
"2"
]
}
]
},
{
"dimensions": [
"121"
],
"metrics": [
{
"values": [
"1"
]
}
]
},
{
"dimensions": [
"1212"
],
"metrics": [
{
"values": [
"1"
]
}
]
}, ],
"totals": [
{
"values": [
"10497"
]
}
],
"rowCount": 9028,
"minimums": [
{
"values": [
"0"
]
}
],
"maximums": [
{
"values": [
"9"
]
}
],
"isDataGolden": true
},
"nextPageToken": "1000"
}
]
}
First, you should put your json object in a better textual readable form. Use something like Black to clean up the spaces.
Then just transverse the keys till you find your required value, this post will help you.
You should end up with something like this:
dimensions = [row["dimensions"][0] for row in json["reports"][0]["data"]["rows"]]
Using recursive function to find values with two conditions
Parent key was dimensions
Take only the numeric values
Code
def find_dims(d, inside = False, results = None):
'''
Recursive processing of structure
inside = True when parent was "dimensions"
'''
if results is None:
results = []
if isinstance(d, dict):
for k, v in d.items():
find_dims(v, k=="dimensions" or inside, results)
elif isinstance(d, list):
for k in d:
find_dims(k, inside, results)
else:
if inside and d.isdigit():
# inside dimensions with a number
results.append(int(d))
return results
Test
OP Dictinary (changed true to True)
d = {
"reports": [
{
"columnHeader": {
"dimensions": [
"ga:clientId"
],
"metricHeader": {
"metricHeaderEntries": [
{
"name": "blah",
"type": "INTEGER"
}
]
}
},
"data": {
"rows": [
{
"dimensions": [
"false_value"
],
"metrics": [
{
"values": [
"2"
]
}
]
},
{
"dimensions": [
"100"
],
"metrics": [
{
"values": [
"2"
]
}
]
},
{
"dimensions": [
"121"
],
"metrics": [
{
"values": [
"1"
]
}
]
},
{
"dimensions": [
"1212"
],
"metrics": [
{
"values": [
"1"
]
}
]
}, ],
"totals": [
{
"values": [
"10497"
]
}
],
"rowCount": 9028,
"minimums": [
{
"values": [
"0"
]
}
],
"maximums": [
{
"values": [
"9"
]
}
],
"isDataGolden": True
},
"nextPageToken": "1000"
}
]
}
print(find_dims(d)) # Output: [100, 121, 1212]
Like stated in the comments u can just use a simple recursive function, for example:
all_dimensions = []
search_key = 'dimensions'
def searchDimensions(data):
if isinstance(data, dict):
for (key, sub_data) in data.items():
if key == search_key: all_dimensions.extend(sub_data)
else: all_dimensions.extend(searchDimensions(sub_data))
elif isinstance(data, list):
for sub_data in data:
all_dimensions.extend(searchDimensions(sub_data))
return []
searchDimensions(example)
false_value_index = all_dimensions.index('false_value') + 1
output = all_dimensions[false_value_index:]
print(output)
>>> ['100', '121', '1212']
And then filter the values that u don't want (eg. starting from false_value)

Convert float string to float in json

I have a json(test.json) file with the below data. I have around 10000 records. I need to convert value from string to float write in the new file(test1.json). How can I do do this from Python?
{
"name":"test001",
"cat":"test",
"loc":"x loc",
"ings":[
{
"name":"rrrrrr",
"value":"13.0"
},
{
"name":"hhhh",
"value":"18.0"
}
],
"nums":[
{
"name":"kkkk",
"value":"82.05"
},
{
"name":"uuuuu",
"value":"53.55"
}
]
},
{
"name":"test002",
"cat":"test1",
"loc":"y loc",
"ings":[
{
"name":"trtrtr",
"value":"11.0"
},
{
"name":"wewew",
"value":"19.0"
}
],
"nums":[
{
"name":"iuyt",
"value":"122.05"
},
{
"name":"oiui",
"value":"15.5"
}
]
}
resulting json file(test1.json) should be like below...
{
"name":"test001",
"cat":"test",
"loc":"x loc",
"ings":[
{
"name":"rrrrrr",
"value":13.0
},
{
"name":"hhhh",
"value":18.0
}
],
"nums":[
{
"name":"kkkk",
"value":82.05
},
{
"name":"uuuuu",
"value":53.55
}
]
},
{
"name":"test002",
"cat":"test1",
"loc":"y loc",
"ings":[
{
"name":"trtrtr",
"value":11.0
},
{
"name":"wewew",
"value":19.0
}
],
"nums":[
{
"name":"iuyt",
"value":122.05
},
{
"name":"oiui",
"value":15.5
}
]
}
You can provide an object_hook to the json.loads method which will allow you to modify any object (dicts) found within the json:
import json
json_data = """
[{
"name":"test001",
"cat":"test",
"loc":"x loc",
"ings":[
{
"name":"rrrrrr",
"value":"13.0"
},
{
"name":"hhhh",
"value":"18.0"
}
],
"nums":[
{
"name":"kkkk",
"value":"82.05"
},
{
"name":"uuuuu",
"value":"53.55"
}
]
},
{
"name":"test002",
"cat":"test1",
"loc":"y loc",
"ings":[
{
"name":"trtrtr",
"value":"11.0"
},
{
"name":"wewew",
"value":"19.0"
}
],
"nums":[
{
"name":"iuyt",
"value":"122.05"
},
{
"name":"oiui",
"value":"15.5"
}
]
}]
"""
def as_float(obj):
"""Checks each dict passed to this function if it contains the key "value"
Args:
obj (dict): The object to decode
Returns:
dict: The new dictionary with changes if necessary
"""
if "value" in obj:
obj["value"] = float(obj["value"])
return obj
if __name__ == '__main__':
l = json.loads(json_data, object_hook=as_float)
print (json.dumps(l, indent=4))
This results in what you want:
[
{
"loc": "x loc",
"ings": [
{
"name": "rrrrrr",
"value": 13.0
},
{
"name": "hhhh",
"value": 18.0
}
],
"name": "test001",
"nums": [
{
"name": "kkkk",
"value": 82.05
},
{
"name": "uuuuu",
"value": 53.55
}
],
"cat": "test"
},
{
"loc": "y loc",
"ings": [
{
"name": "trtrtr",
"value": 11.0
},
{
"name": "wewew",
"value": 19.0
}
],
"name": "test002",
"nums": [
{
"name": "iuyt",
"value": 122.05
},
{
"name": "oiui",
"value": 15.5
}
],
"cat": "test1"
}
]
To write to a file instead:
with open("out.json", "w+") as out:
json.dump(l, out, indent=4)
You would need to recursively traverse the data and convert anything that looks like a float to a float:
def fix_floats(data):
if isinstance(data,list):
iterator = enumerate(data)
elif isinstance(data,dict):
iterator = data.items()
else:
raise TypeError("can only traverse list or dict")
for i,value in iterator:
if isinstance(value,(list,dict)):
fix_floats(value)
elif isinstance(value,str):
try:
data[i] = float(value)
except ValueError:
pass
It should do the trick:
my_data = [
{ "name" : "rrrrrr",
"value" : "13.0" },
{ "name" : "hhhh",
"value" : "18.0" },
]
fix_floats(my_data)
>>> my_data
[{'name': 'rrrrrr', 'value': 13.0}, {'name': 'hhhh', 'value': 18.0}]
If you have a single or specific key value object, you can reiterate the value containing alphabetical strings or numerical strings, then map and check against their type with string.isnumeric():
dict = { 'a':'100', 'b':'200', 'c':'300', 'd':'four_hundred', 'e':'500' }
dict_parse = {k: int(v) if v.isnumeric() else v for k, v in dict.items()}
>>> dict_parse
{ 'a': 100, 'b': 200, 'c': 300, 'd':'four_hundred', 'e':500}
when dealing with float numbers amend the if statement to replace decimal point, you can apply same principal to negative numbers:
dict = { 'a':'10.0', 'b':'20.12', 'c':'300.3', 'd':'four_hundred', 'e':'500' }
dict_parse = {k: float(v) if v.replace(".", "").isnumeric() else v for k, v in dict.items()}
>>> dict_parse
{ 'a': 10.0, 'b': 20.12, 'c': 300.3, 'd':'four_hundred', 'e':500}

Categories