In a Django application, I want to use a dictionary as elements of a result.html page:
<tbody>
{% for element in products%}
<tr>
<td>{{ element['q0']['Results'][0]['Name'] }}</td>
</tr>
{% endfor %}
</tbody>
But it returns Could not parse the remainder: '['q0']['Results'][0]['Name']' from 'element['q0']['Results'][0]['Name']':
return render(request, 'todo/result.html', {'products': top_products})
File "C:\Python36\lib\site-packages\django\shortcuts.py", line 19, in render
content = loader.render_to_string(template_name, context, request, using=using)
File "C:\Python36\lib\site-packages\django\template\loader.py", line 61, in render_to_string
template = get_template(template_name, using=using)
File "C:\Python36\lib\site-packages\django\template\loader.py", line 15, in get_template
return engine.get_template(template_name)
File "C:\Python36\lib\site-packages\django\template\backends\django.py", line 34, in get_template
return Template(self.engine.get_template(template_name), self)
File "C:\Python36\lib\site-packages\django\template\engine.py", line 143, in get_template
template, origin = self.find_template(template_name)
File "C:\Python36\lib\site-packages\django\template\engine.py", line 125, in find_template
template = loader.get_template(name, skip=skip)
File "C:\Python36\lib\site-packages\django\template\loaders\base.py", line 30, in get_template
contents, origin, origin.template_name, self.engine,
File "C:\Python36\lib\site-packages\django\template\base.py", line 155, in __init__
self.nodelist = self.compile_nodelist()
File "C:\Python36\lib\site-packages\django\template\base.py", line 193, in compile_nodelist
return parser.parse()
File "C:\Python36\lib\site-packages\django\template\base.py", line 478, in parse
raise self.error(token, e)
File "C:\Python36\lib\site-packages\django\template\base.py", line 476, in parse
compiled_result = compile_func(self, token)
File "C:\Python36\lib\site-packages\django\template\defaulttags.py", line 814, in do_for
nodelist_loop = parser.parse(('empty', 'endfor',))
File "C:\Python36\lib\site-packages\django\template\base.py", line 449, in parse
raise self.error(token, e)
File "C:\Python36\lib\site-packages\django\template\base.py", line 447, in parse
filter_expression = self.compile_filter(token.contents)
File "C:\Python36\lib\site-packages\django\template\base.py", line 563, in compile_filter
return FilterExpression(token, self)
File "C:\Python36\lib\site-packages\django\template\base.py", line 663, in __init__
"from '%s'" % (token[upto:], token))
django.template.exceptions.TemplateSyntaxError: Could not parse the remainder: '['q0']['Results'][0]['Name']' from 'element['q0']['Results'][0]['Name']'
It was sent by views.py:
def getmatch(request):
# cosas cosas cosas para obtener top_products
print(top_products[0])
return render(request, 'todo/result.html', {'products': top_products})
Here is an example of a product top_products[0]:
{
"q1": {
"Id": "q1",
"Limit": 20,
"Offset": 0,
"TotalResults": 0,
"Locale": "fr_FR",
"Results": [],
"Includes": {},
"HasE rrors": false,
"Errors": []
},
"q0": {
"Id": "q0",
"Limit": 10,
"Offset": 0,
"TotalResults": 1,
"Locale": "fr_FR",
"Results": [
{
"EANs": [
"8011003827336"
],
"Description": "L’aur a divine d’une femme habillée d’une essence éblouissante et sensuelle…\nEros pour Femme est le mythe signé Versace, qui déclenche la passion débordante d’Eros au pre mier regard.\n\nMais qui séduit qui ?\nEros pour Femme est une invitation à s’abandonner au désir, en osmose avec les forces de la nature apaisée.\n\nAudacieuse, cré ative et sensuelle, comme seule peut l’être la maison Versace, cette Eau de Toilette révèle une aura radieuse et une séduction irrésistible.",
"ImageUrl": "https://w ww.sephora.fr/dw/image/v2/BCVW_PRD/on/demandware.static/-/Sites-masterCatalog_Sephora/default/dw99b648b2/images/hi-res/SKU/SKU_5/359845_swatch.jpg?sw=250&sh=250&sm=f it",
"Name": "Eros pour Femme - Eau de Toilette",
"Id": "P2615007",
"CategoryId": "parfum_719097",
"BrandExternalId": "versace_c45bfd",
"Brand": {
"Id": "versace_c45b fd",
"Name": "VERSACE"
},
"Active": true,
"ProductPageUrl": "https://www.sephora.fr/p/eros-pour-femme---eau-de-toilette-359845.html",
"Disabled": false,
"ISBNs": [],
"FamilyIds": [],
"UPCs": [],
"StoryIds": [],
"ModelNumbers": [],
"Attributes": {},
"QuestionIds": [],
"AttributesOrder": [],
"ReviewIds": [],
"ManufacturerPartNumber s": [],
"QAStatistics": {
"QuestionHelpfulVoteCount": 0,
"FirstAnswerTime": "None",
"LastQuestionAnswerTime": "None",
"FirstQuestionTime": "None",
"FeaturedAnswerCount": 0,
"LastAnswerTime": "None",
"TagDistribution": {},
"ContextDataDistribution": {},
"TotalAnswerCount": 0,
"FeaturedQuestionCount": 0,
"LastQuestionTime": "None",
"Question NotHelpfulVoteCount": 0,
"BestAnswerCount": 0,
"TagDistributionOrder": [],
"AnswerHelpfulVoteCount": 0,
"HelpfulVoteCount": 0,
"AnswerNotHelpfulVoteCount": 0,
"Total QuestionCount": 0,
"ContextDataDistributionOrder": []
},
"TotalQuestionCount": 0,
"TotalAnswerCount": 0,
"ReviewStatistics": {
"ContextDataDistributionOrder": [
"Gender ",
"Age",
"Eyes",
"Skin",
"loyalty"
],
"ContextDataDistribution": {
"Gender": {
"Id": "Gender",
"Values": [
{
"Count": 7,
"Value": "Female"
}
]
},
"Age": {
"Id": "Age",
"Valu es": [
{
"Count": 1,
"Value": "13to17"
},
{
"Count": 2,
"Value": "18to24"
},
{
"Count": 1,
"Value": "25to34"
},
{
"Count": 1,
"Value": "35to44"
},
{
"Count": 1,
"Value": "45to 54"
},
{
"Count": 1,
"Value": "plus54"
}
]
},
"Eyes": {
"Id": "Eyes",
"Values": [
{
"Count": 2,
"Value": "Marrons"
},
{
"Count": 3,
"Value": "Bleus"
},
{
"Count": 1,
"Value": "N oirs"
}
]
},
"Skin": {
"Id": "Skin",
"Values": [
{
"Count": 1,
"Value": "Normale"
},
{
"Count": 2,
"Value": "Seche"
},
{
"Count": 2,
"Value": "Mixte"
},
{
"Count": 1,
"Value": " Deshydratee"
}
]
},
"loyalty": {
"Id": "loyalty",
"Values": [
{
"Count": 2,
"Value": "Yes--Im-a-VIB"
},
{
"Count": 2,
"Value": "Yes--Im-a-VIB-Rouge"
},
{
"Count": 2,
"Value": "No"
}
]
}
},
"AverageOverallRating": 4.428571428571429,
"NotHelpfulVoteCount": 1,
"FeaturedReviewCount": 0,
"NotRecommendedCount": 1,
"HelpfulVoteCount": 19,
"RatingDis tribution": [
{
"RatingValue": 5,
"Count": 5
},
{
"RatingValue": 2,
"Count": 1
},
{
"RatingValue": 4,
"Count": 1
}
],
"RecommendedCount": 5,
"RatingsOnlyReviewCount": 0,
"To talReviewCount": 7,
"FirstSubmissionTime": "2017-05-28T22:46:00.000+00:00",
"LastSubmissionTime": "2020-03-21T19:01:26.000+00:00",
"SecondaryRatingsAveragesOrder": [],
"SecondaryRatingsAverages": {},
"OverallRatingRange": 5,
"TagDistributionOrder": [],
"TagDistribution": {}
},
"TotalReviewCount": 7,
"FilteredQAStatistics": {
"Ques tionHelpfulVoteCount": 0,
"FirstAnswerTime": "None",
"LastQuestionAnswerTime": "None",
"FirstQuestionTime": "None",
"FeaturedAnswerCount": 0,
"LastAnswerTime": "None",
"TagD istribution": {},
"ContextDataDistribution": {},
"TotalAnswerCount": 0,
"FeaturedQuestionCount": 0,
"LastQuestionTime": "None",
"QuestionNotHelpfulVoteCount": 0,
"Best AnswerCount": 0,
"TagDistributionOrder": [],
"AnswerHelpfulVoteCount": 0,
"HelpfulVoteCount": 0,
"AnswerNotHelpfulVoteCount": 0,
"TotalQuestionCount": 0,
"ContextDat aDistributionOrder": []
},
"FilteredReviewStatistics": {
"ContextDataDistributionOrder": [
"Gender",
"Age",
"Eyes",
"Skin",
"loyalty"
],
"ContextDataDistribution": {
"Gen der": {
"Id": "Gender",
"Values": [
{
"Count": 7,
"Value": "Female"
}
]
},
"Age": {
"Id": "Age",
"Values": [
{
"Count": 1,
"Value": "13to17"
},
{
"Count": 2,
"Value": "18to24"
},
{
"Count": 1,
"Value": "25to34"
},
{
"Count": 1,
"Value": "35to44"
},
{
"Count": 1,
"Value": "45to54"
},
{
"Count": 1,
"Value": "plus54"
}
]
},
"Eyes": {
"Id": "Eyes",
"Value s": [
{
"Count": 2,
"Value": "Marrons"
},
{
"Count": 3,
"Value": "Bleus"
},
{
"Count": 1,
"Value": "Noirs"
}
]
},
"Skin": {
"Id": "Skin",
"Values": [
{
"Count": 1,
"Value": "Nor male"
},
{
"Count": 2,
"Value": "Seche"
},
{
"Count": 2,
"Value": "Mixte"
},
{
"Count": 1,
"Value": "Deshydratee"
}
]
},
"loyalty": {
"Id": "loyalty",
"Values": [
{
"Count": 2,
"Value": "Yes--Im-a-VIB"
},
{
"Count": 2,
"Value": "Yes--Im-a-VIB-Rouge"
},
{
"Count": 2,
"Value": "No"
}
]
}
},
"AverageOverallRating": 4.428571428571429,
"NotHelpfulVoteCo unt": 1,
"FeaturedReviewCount": 0,
"NotRecommendedCount": 1,
"HelpfulVoteCount": 19,
"RatingDistribution": [
{
"RatingValue": 5,
"Count": 5
},
{
"RatingValue": 2,
"Count ": 1
},
{
"RatingValue": 4,
"Count": 1
}
],
"RecommendedCount": 5,
"RatingsOnlyReviewCount": 0,
"TotalReviewCount": 7,
"FirstSubmissionTime": "2017-05-28T22:46:00.000+00 :00",
"LastSubmissionTime": "2020-03-21T19:01:26.000+00:00",
"SecondaryRatingsAveragesOrder": [],
"SecondaryRatingsAverages": {},
"OverallRatingRange": 5,
"TagDistri butionOrder": [],
"TagDistribution": {}
}
}
],
"Includes": {},
"HasErrors": false,
"Errors": []
},
"d": {
"attributs": {
"Doux": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Délicat": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Elegant": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Mature": {
"claimed_benefit": 0,
" perceived_benefit": 0
},
"Sexy": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Féminin": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Frais": {
"claimed_ benefit": 0,
"perceived_benefit": 0.14285714285714285
},
"Classe": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Mou": {
"claimed_benefit": 0,
"perceived_benefit": 0.14285714285714285
},
"Décontracté": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Comme les autres": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Jeu ne femme": {
"claimed_benefit": 1,
"perceived_benefit": 0.14285714285714285
},
"charmant": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Gai": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Propre": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Eté": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Rafraîchissant ": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Chaud": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Masculin": {
"claimed_benefit": 0,
"perceived_benefit ": 0
},
"Fiable": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Mystérieux": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Furtif": {
"claimed_benefit": 0,
"perceived_benefit": 0.14285714285714285
},
"Fort": {
"claimed_benefit": 0,
"perceived_benefit": 0.14285714285714285
},
"Hivernal": {
"claimed_benefit": 0,
"perceived_ benefit": 0
},
"Herbacé": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Plantes": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Big brands": {
"claimed_be nefit": 0,
"perceived_benefit": 0
},
"Luxueux": {
"claimed_benefit": 0,
"perceived_benefit": 0
},
"Connu": {
"claimed_benefit": 0,
"perceived_benefit": 0.2857142857142857
},
"A la mode": {
"claimed_benefit": 0,
"perceived_benefit": 0
}
}
},
"total": 0
}
Instead of using square bracket notations, Django Template Language uses dots. So the result should be: {{ element.q0.Results.0.Name }}
Related
movies={
'actors':{'prabhas':{'knownAs':'Darling', 'awards':{'nandi':1, 'cinemaa':1, 'siima':1},'remuneration':100, 'hits':{'industry':2, 'super':3,'flops':8}, 'age':41, 'height':6.1, 'mStatus':'single','sRate':'35%'},
'pavan':{'knownAs':'Power Star', 'awards':{'nandi':2, 'cinemaa':2, 'siima':5}, 'hits':{'industry':2, 'super':7,'flops':16}, 'age':48, 'height':5.9, 'mStatus':'married','sRate':'37%','remuneration':50},
},
'actress':{
'tamanna':{'knownAs':'Milky Beauty', 'awards':{'nandi':0, 'cinemaa':1, 'siima':1}, 'remuneration':10, 'hits':{'industry':1, 'super':7,'flops':11}, 'age':28, 'height':5.9, 'mStatus':'single', 'sRate':'40%'},
'rashmika':{'knownAs':'Butter Milky Beauty', 'awards':{'nandi':0, 'cinemaa':0, 'siima':2}, 'remuneration':12,'hits':{'industry':0, 'super':4,'flops':2}, 'age':36, 'height':5.9, 'mStatus':'single', 'sRate':'30%'},
1.What are the total number of Nandi Awards won by actors?
2. What is the success rate of Prince?
3.What is the name of Prince?
you can answer the first question with this:
import jmespath
movies={
"actors": {
"prabhas": {
"knownAs": "Darling",
"awards": {
"nandi": 1,
"cinemaa": 1,
"siima": 1
},
"remuneration": 100,
"hits": {
"industry": 2,
"super": 3,
"flops": 8
},
"age": 41,
"height": 6.1,
"mStatus": "single",
"sRate": "35%"
},
"pavan": {
"knownAs": "Power Star",
"awards": {
"nandi": 2,
"cinemaa": 2,
"siima": 5
},
"hits": {
"industry": 2,
"super": 7,
"flops": 16
},
"age": 48,
"height": 5.9,
"mStatus": "married",
"sRate": "37%",
"remuneration": 50
}
},
"actress": {
"tamanna": {
"knownAs": "Milky Beauty",
"awards": {
"nandi": 0,
"cinemaa": 1,
"siima": 1
},
"remuneration": 10,
"hits": {
"industry": 1,
"super": 7,
"flops": 11
},
"age": 28,
"height": 5.9,
"mStatus": "single",
"sRate": "40%"
},
"rashmika": {
"knownAs": "Butter Milky Beauty",
"awards": {
"nandi": 0,
"cinemaa": 0,
"siima": 2
},
"remuneration": 12,
"hits": {
"industry": 0,
"super": 4,
"flops": 2
},
"age": 36,
"height": 5.9,
"mStatus": "single",
"sRate": "30%"
}
}
}
total_nandies_by_actors = sum(jmespath.search('[]',jmespath.search('actors.*.*.nandi',movies)))
but there is no Prince in the data you've provided
I have the following Dataframe with MultiIndex rows in pandas.
time available_slots status
month day
1 1 10:00:00 1 AVAILABLE
1 12:00:00 1 AVAILABLE
1 14:00:00 1 AVAILABLE
1 16:00:00 1 AVAILABLE
1 18:00:00 1 AVAILABLE
2 10:00:00 1 AVAILABLE
... ... ... ...
2 28 12:00:00 1 AVAILABLE
28 14:00:00 1 AVAILABLE
28 16:00:00 1 AVAILABLE
28 18:00:00 1 AVAILABLE
28 20:00:00 1 AVAILABLE
And I need to transform it to a hierarchical nested JSON as this:
[
{
"month": 1,
"days": [
{
"day": 1,
"slots": [
{
"time": "10:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "12:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
...
]
},
{
"day": 2,
"slots": [
...
]
}
]
},
{
"month": 2,
"days":[
{
"day": 1,
"slots": [
...
]
}
]
},
...
]
Unfortunately, it is not as easy as doing df.to_json(orient="index").
Does anyone know if there is a method in pandas to perform this kind of transformations? or in what way I could iterate over the DataFrame to build the final object?
Here's one way. Basically repeated groupby + apply(to_dict) + reset_index until we get the desired shape:
out = (df.groupby(level=[0,1])
.apply(lambda x: x.to_dict('records'))
.reset_index()
.rename(columns={0:'slots'})
.groupby('month')
.apply(lambda x: x[['day','slots']].to_dict('records'))
.reset_index()
.rename(columns={0:'days'})
.to_json(orient='records', indent=True)
)
Output:
[
{
"month":1,
"days":[
{
"day":1,
"slots":[
{
"time":"10:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"12:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"14:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"16:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"18:00:00",
"available_slots":1,
"status":"AVAILABLE"
}
]
},
{
"day":2,
"slots":[
{
"time":"10:00:00",
"available_slots":1,
"status":"AVAILABLE"
}
]
}
]
},
{
"month":2,
"days":[
{
"day":28,
"slots":[
{
"time":"12:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"14:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"16:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"18:00:00",
"available_slots":1,
"status":"AVAILABLE"
},
{
"time":"20:00:00",
"available_slots":1,
"status":"AVAILABLE"
}
]
}
]
}
]
You can use a double loop for each level of your index:
data = []
for month, df1 in df.groupby(level=0):
data.append({'month': month, 'days': []})
for day, df2 in df1.groupby(level=1):
data[-1]['days'].append({'day': day, 'slots': df2.to_dict('records')})
Output:
import json
print(json.dumps(data, indent=2))
[
{
"month": 1,
"days": [
{
"day": 1,
"slots": [
{
"time": "10:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "12:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "14:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "16:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "18:00:00",
"available_slots": 1,
"status": "AVAILABLE"
}
]
},
{
"day": 2,
"slots": [
{
"time": "10:00:00",
"available_slots": 1,
"status": "AVAILABLE"
}
]
}
]
},
{
"month": 2,
"days": [
{
"day": 28,
"slots": [
{
"time": "12:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "14:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "18:00:00",
"available_slots": 1,
"status": "AVAILABLE"
},
{
"time": "20:00:00",
"available_slots": 1,
"status": "AVAILABLE"
}
]
}
]
}
]
I have a LinkedIn dataset of follower statistics in the following JSON (Removed many key pair values for easy understanding). In this, each key has a different number of inner key pair values.
Can somebody help convert this to a CSV output using python?
{
"paging": { "start": 0, "count": 10, "links": [] },
"elements": [
{
"followerCountsByAssociationType": [
{
"followerCounts": {
"organicFollowerCount": 2775,
"paidFollowerCount": 0
}
},
{
"followerCounts": {
"organicFollowerCount": 13,
"paidFollowerCount": 0
},
"associationType": "EMPLOYEE"
}
],
"followerCountsByRegion": [
{
"region": "urn:li:region:7312",
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
}
},
{
"region": "urn:li:region:6981",
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
}
},
{
"region": "urn:li:region:620",
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
}
}
],
"followerCountsBySeniority": [
{
"followerCounts": {
"organicFollowerCount": 12,
"paidFollowerCount": 0
},
"seniority": "urn:li:seniority:8"
},
{
"followerCounts": {
"organicFollowerCount": 5,
"paidFollowerCount": 0
},
"seniority": "urn:li:seniority:9"
},
{
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
},
"seniority": "urn:li:seniority:1"
}
],
"followerCountsByIndustry": [
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"industry": "urn:li:industry:51"
},
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"industry": "urn:li:industry:74"
},
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"industry": "urn:li:industry:77"
},
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"industry": "urn:li:industry:78"
},
],
"followerCountsByFunction": [
{
"followerCounts": {
"organicFollowerCount": 3,
"paidFollowerCount": 0
},
"function": "urn:li:function:14"
},
{
"followerCounts": {
"organicFollowerCount": 3,
"paidFollowerCount": 0
},
"function": "urn:li:function:21"
},
{
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
},
"function": "urn:li:function:11"
},
{
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
},
"function": "urn:li:function:17"
},
{
"followerCounts": {
"organicFollowerCount": 2,
"paidFollowerCount": 0
},
"function": "urn:li:function:1"
},
],
"followerCountsByStaffCountRange": [
{
"followerCounts": {
"organicFollowerCount": 267,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_1001_TO_5000"
},
{
"followerCounts": {
"organicFollowerCount": 185,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_201_TO_500"
},
{
"followerCounts": {
"organicFollowerCount": 131,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_501_TO_1000"
},
{
"followerCounts": {
"organicFollowerCount": 81,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_5001_TO_10000"
},
{
"followerCounts": {
"organicFollowerCount": 74,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_2_TO_10"
},
{
"followerCounts": {
"organicFollowerCount": 10,
"paidFollowerCount": 0
},
"staffCountRange": "SIZE_1"
}
],
"followerCountsByCountry": [
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"country": "urn:li:country:es"
},
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"country": "urn:li:country:ph"
},
{
"followerCounts": {
"organicFollowerCount": 1,
"paidFollowerCount": 0
},
"country": "urn:li:country:ng"
}
],
"organizationalEntity": "urn:li:organization:28849398"
}
]
}
I tried using the json_normalize(data['Elements']) but that gives the following Output
I am sure that there must be some parameter in json_normalize() that can simplify the inner nesting.
The desired output is as follows-
FollwerCounByAssociationorganicfollowecount
FollwerCounByAssociationpaidfollowecount
AssociationType
Region
RegionOrganicFollowercount
RegionpaidFollowercount
2775
0
Employee
urn:li:region:7312
2
0
urn:li:region:6981
2
0
.......And so on
Now I have only made a small part of the output but largely for as many entries in the last the columns will go on (while for the others it will be null)
Would appreciate any help possible! Thanks!
I have a big JSON file with a very complex structure
you can look on it here: https://drive.google.com/file/d/1tBVJ2xYSCpTTUGPJegvAz2ZXbeN0bteX/view?usp=sharing
it contains more than 7 millions lines, and I want to extract only the "text" field
I have written a python code, to extra all the values of the "text" key or field in the whole file, and it extracted only 12 values! while when I open the JSON file on the Visualstudio, I have more than 19000 values!!
you can see the code here:
import json
import csv
with open("/Users/zahraa-maher/rasa-init-demo/venv/Tickie/external_data/frames2.json") as file:
data = json.load(file)
fname = "outputText8.csv"
with open(fname, "w") as file:
csv_file = csv.writer(file,lineterminator='\n')
csv_file.writerow(["text"])
for item in data[i]["turns"]:
csv_file.writerow([item['text']])
please take a look on the JSON file as it is very large one and with a complex structure, so I an not paste it here to see because it would be not understandable
also this is a part of the son file:
[
{
"user_id": "U22HTHYNP",
"turns": [
{
"text": "I'd like to book a trip to Atlantis from Caprica on Saturday, August 13, 2016 for 8 adults. I have a tight budget of 1700.",
"labels": {
"acts": [
{
"args": [
{
"val": "book",
"key": "intent"
}
],
"name": "inform"
},
{
"args": [
{
"val": "Atlantis",
"key": "dst_city"
},
{
"val": "Caprica",
"key": "or_city"
},
{
"val": "Saturday, August 13, 2016",
"key": "str_date"
},
{
"val": "8",
"key": "n_adults"
},
{
"val": "1700",
"key": "budget"
}
],
"name": "inform"
}
],
"acts_without_refs": [
{
"args": [
{
"val": "book",
"key": "intent"
}
],
"name": "inform"
},
{
"args": [
{
"val": "Atlantis",
"key": "dst_city"
},
{
"val": "Caprica",
"key": "or_city"
},
{
"val": "Saturday, August 13, 2016",
"key": "str_date"
},
{
"val": "8",
"key": "n_adults"
},
{
"val": "1700",
"key": "budget"
}
],
"name": "inform"
}
],
"active_frame": 1,
"frames": [
{
"info": {
"intent": [
{
"val": "book",
"negated": false
}
],
"budget": [
{
"val": "1700.0",
"negated": false
}
],
"dst_city": [
{
"val": "Atlantis",
"negated": false
}
],
"or_city": [
{
"val": "Caprica",
"negated": false
}
],
"str_date": [
{
"val": "august 13",
"negated": false
}
],
"n_adults": [
{
"val": "8",
"negated": false
}
]
},
"frame_id": 1,
"requests": [],
"frame_parent_id": null,
"binary_questions": [],
"compare_requests": []
}
]
},
"author": "user",
"timestamp": 1471272019730.0
},
{
"db": {
"result": [
[
{
"trip": {
"returning": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 10,
"year": 2016,
"day": 24,
"min": 51,
"month": 8
},
"departure": {
"hour": 10,
"year": 2016,
"day": 24,
"min": 0,
"month": 8
}
},
"seat": "ECONOMY",
"leaving": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 0,
"year": 2016,
"day": 16,
"min": 51,
"month": 8
},
"departure": {
"hour": 0,
"year": 2016,
"day": 16,
"min": 0,
"month": 8
}
},
"or_city": "Porto Alegre",
"duration_days": 9
},
"price": 2118.81,
"hotel": {
"gst_rating": 7.15,
"vicinity": [],
"name": "Scarlet Palms Resort",
"country": "Brazil",
"amenities": [
"FREE_BREAKFAST",
"FREE_PARKING",
"FREE_WIFI"
],
"dst_city": "Goiania",
"category": "3.5 star hotel"
}
},
{
"trip": {
"returning": {
"duration": {
"hours": 2,
"min": 37
},
"arrival": {
"hour": 12,
"year": 2016,
"day": 10,
"min": 37,
"month": 8
},
"departure": {
"hour": 10,
"year": 2016,
"day": 10,
"min": 0,
"month": 8
}
},
"seat": "ECONOMY",
"leaving": {
"duration": {
"hours": 2,
"min": 37
},
"arrival": {
"hour": 0,
"year": 2016,
"day": 4,
"min": 37,
"month": 8
},
"departure": {
"hour": 22,
"year": 2016,
"day": 3,
"min": 0,
"month": 8
}
},
"or_city": "Porto Alegre",
"duration_days": 7
},
"price": 2369.83,
"hotel": {
"gst_rating": 0,
"vicinity": [],
"name": "Sunway Hostel",
"country": "Argentina",
"amenities": [
"FREE_BREAKFAST",
"FREE_WIFI"
],
"dst_city": "Rosario",
"category": "2.0 star hotel"
}
},
{
"trip": {
"returning": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 10,
"year": 2016,
"day": 24,
"min": 51,
"month": 8
},
"departure": {
"hour": 10,
"year": 2016,
"day": 24,
"min": 0,
"month": 8
}
},
"seat": "BUSINESS",
"leaving": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 0,
"year": 2016,
"day": 16,
"min": 51,
"month": 8
},
"departure": {
"hour": 0,
"year": 2016,
"day": 16,
"min": 0,
"month": 8
}
},
"or_city": "Porto Alegre",
"duration_days": 9
},
"price": 2375.72,
"hotel": {
"gst_rating": 7.15,
"vicinity": [],
"name": "Scarlet Palms Resort",
"country": "Brazil",
"amenities": [
"FREE_BREAKFAST",
"FREE_PARKING",
"FREE_WIFI"
],
"dst_city": "Goiania",
"category": "3.5 star hotel"
}
},
{
"trip": {
"returning": {
"duration": {
"hours": 1,
"min": 30
},
"arrival": {
"hour": 11,
"year": 2016,
"day": 1,
"min": 30,
"month": 9
},
"departure": {
"hour": 10,
"year": 2016,
"day": 1,
"min": 0,
"month": 9
}
},
"seat": "BUSINESS",
"leaving": {
"duration": {
"hours": 1,
"min": 30
},
"arrival": {
"hour": 18,
"year": 2016,
"day": 19,
"min": 30,
"month": 8
},
"departure": {
"hour": 17,
"year": 2016,
"day": 19,
"min": 0,
"month": 8
}
},
"or_city": "Porto Alegre",
"duration_days": 13
},
"price": 2492.95,
"hotel": {
"gst_rating": 0,
"vicinity": [],
"name": "Hotel Mundo",
"country": "Brazil",
"amenities": [
"FREE_BREAKFAST",
"FREE_WIFI",
"FREE_PARKING"
],
"dst_city": "Manaus",
"category": "2.5 star hotel"
}
},
{
"trip": {
"returning": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 10,
"year": 2016,
"day": 31,
"min": 51,
"month": 8
},
"departure": {
"hour": 10,
"year": 2016,
"day": 31,
"min": 0,
"month": 8
}
},
"seat": "ECONOMY",
"leaving": {
"duration": {
"hours": 0,
"min": 51
},
"arrival": {
"hour": 19,
"year": 2016,
"day": 27,
"min": 51,
"month": 8
},
"departure": {
"hour": 19,
"year": 2016,
"day": 27,
"min": 0,
"month": 8
}
},
"or_city": "Porto Alegre",
"duration_days": 4
},
"price": 2538.0,
"hotel": {
"gst_rating": 8.22,
"vicinity": [],
"name": "The Glee",
"country": "Brazil",
"amenities": [
"FREE_BREAKFAST",
"FREE_WIFI"
],
"dst_city": "Recife",
"category": "4.0 star hotel"
}
}
],
[],
[],
[],
[],
[],
[]
],
"search": [
{
"ORIGIN_CITY": "Porto Alegre",
"PRICE_MIN": "2000",
"NUM_ADULTS": "2",
"timestamp": 1471271949.995,
"PRICE_MAX": "3000",
"ARE_DATES_FLEXIBLE": "true",
"NUM_CHILDREN": "5",
"START_TIME": "1470110400000",
"MAX_DURATION": 2592000000.0,
"DESTINATION_CITY": "Brazil",
"RESULT_LIMIT": "10",
"END_TIME": "1472616000000"
},
{
"ORIGIN_CITY": "Atlantis",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272148.124,
"PRICE_MAX": "1700",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "NaN",
"END_TIME": "NaN"
},
{
"ORIGIN_CITY": "Caprica",
"PRICE_MAX": "1700",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272189.07,
"DESTINATION_CITY": "Atlantis",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "1470715200000",
"END_TIME": "1472011200000"
},
{
"ORIGIN_CITY": "Caprica",
"PRICE_MAX": "1700",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272205.436,
"DESTINATION_CITY": "Atlantis",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "1470715200000",
"END_TIME": "1472011200000"
},
{
"ORIGIN_CITY": "Caprica",
"PRICE_MIN": "1700",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272278.72,
"DESTINATION_CITY": "Atlantis",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "1470715200000",
"END_TIME": "1472011200000"
},
{
"ORIGIN_CITY": "Caprica",
"PRICE_MIN": "1700",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272454.542,
"DESTINATION_CITY": "Atlantis",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "1471060800000",
"END_TIME": "1472011200000"
},
{
"ORIGIN_CITY": "Caprica",
"PRICE_MIN": "1700",
"NUM_ADULTS": "8",
"RESULT_LIMIT": "10",
"timestamp": 1471272466.008,
"DESTINATION_CITY": "Atlantis",
"NUM_CHILDREN": "",
"ARE_DATES_FLEXIBLE": "true",
"START_TIME": "1471060800000",
"END_TIME": "1472011200000"
}
]
},
How it could be modified to extract all the "text" values from the JSON file to a CSV file?
This is a potential solution using pandas:
import pandas as pd
#importing data
dj = pd.read_json("frames2.json")
dtext = dj[["user_id","turns"]]
#Saving text records in a list
list_ = []
for record in dtext["turns"].values:
for r in record:
list_.append(r["text"])
#Exporting the csv
out = pd.Series(list_,name="text")
out.to_csv("text.csv")
It gives the following output.
Try:
import json
import csv
with open("/Users/zahraa-maher/rasa-init-demo/venv/Tickie/external_data/frames2.json") as file:
data = json.load(file)
fname = "outputText8.csv"
with open(fname, "w") as file:
csv_file = csv.writer(file,lineterminator='\n')
csv_file.writerow(["text"])
for keys,values in data.items():
now it up to you which of the fields you want to save, if you user a debugger you can see the values and Keys
I'm trying to get a specific value in Python of a JSON object. Before I could use something like:
data['data']['data2']
to get a certain value that is associated with data2 but this is a little different, my JSON file is now more complex and is this
{
"data": {
"playerStatSummaries": {
"playerStatSummarySet": [
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "Unranked3x3",
"rating": 400,
"wins": 5
},
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "AramUnranked6x6",
"rating": 400,
"wins": 0
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 68
},
{
"statType": "TOTAL_ASSISTS",
"value": 116
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 1854
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 22
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 359
}
]
},
"leaves": 0,
"losses": 5,
"maxRating": 1505,
"modifyDate": "/Date(1357261303440)/",
"playerStatSummaryType": "RankedSolo5x5",
"rating": 1505,
"wins": 9
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 369
},
{
"statType": "TOTAL_ASSISTS",
"value": 535
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 9917
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 78
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 1050
}
]
},
"leaves": 0,
"losses": 35,
"maxRating": 1266,
"modifyDate": "/Date(1323496849000)/",
"playerStatSummaryType": "RankedTeam5x5",
"rating": 1266,
"wins": 39
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 29
},
{
"statType": "TOTAL_ASSISTS",
"value": 17
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 176
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 8
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 12
}
]
},
"leaves": 0,
"losses": 0,
"maxRating": 1200,
"modifyDate": "/Date(1326521499000)/",
"playerStatSummaryType": "CoopVsAI",
"rating": 1200,
"wins": 2
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 150
},
{
"statType": "TOTAL_ASSISTS",
"value": 184
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 3549
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 24
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 224
}
]
},
"leaves": 0,
"losses": 17,
"maxRating": 0,
"modifyDate": "/Date(1350098520000)/",
"playerStatSummaryType": "RankedTeam3x3",
"rating": 1308,
"wins": 22
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 15
},
{
"statType": "TOTAL_ASSISTS",
"value": 185
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 250
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 4
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 15
}
]
},
"leaves": 0,
"losses": 3,
"maxRating": 1365,
"modifyDate": "/Date(1321778545000)/",
"playerStatSummaryType": "RankedPremade5x5",
"rating": 1365,
"wins": 8
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 672
},
{
"statType": "AVERAGE_CHAMPIONS_KILLED",
"value": 9
},
{
"statType": "MAX_COMBAT_PLAYER_SCORE",
"value": 889
},
{
"statType": "AVERAGE_OBJECTIVE_PLAYER_SCORE",
"value": 771
},
{
"statType": "MAX_TEAM_OBJECTIVE",
"value": 2
},
{
"statType": "MAX_NODE_CAPTURE",
"value": 14
},
{
"statType": "MAX_OBJECTIVE_PLAYER_SCORE",
"value": 1424
},
{
"statType": "MAX_TOTAL_PLAYER_SCORE",
"value": 1950
},
{
"statType": "AVERAGE_NUM_DEATHS",
"value": 10
},
{
"statType": "TOTAL_DECAYER",
"value": 105
},
{
"statType": "TOTAL_ASSISTS",
"value": 931
},
{
"statType": "AVERAGE_NODE_NEUTRALIZE",
"value": 6
},
{
"statType": "AVERAGE_NODE_CAPTURE_ASSIST",
"value": 2
},
{
"statType": "MAX_NODE_CAPTURE_ASSIST",
"value": 5
},
{
"statType": "MAX_ASSISTS",
"value": 25
},
{
"statType": "AVERAGE_NODE_NEUTRALIZE_ASSIST",
"value": 1
},
{
"statType": "AVERAGE_TOTAL_PLAYER_SCORE",
"value": 1182
},
{
"statType": "MAX_NODE_NEUTRALIZE_ASSIST",
"value": 3
},
{
"statType": "AVERAGE_COMBAT_PLAYER_SCORE",
"value": 413
},
{
"statType": "AVERAGE_NODE_CAPTURE",
"value": 8
},
{
"statType": "MAX_CHAMPIONS_KILLED",
"value": 20
},
{
"statType": "TOTAL_NODE_NEUTRALIZE",
"value": 391
},
{
"statType": "AVERAGE_TEAM_OBJECTIVE",
"value": 1
},
{
"statType": "AVERAGE_ASSISTS",
"value": 11
},
{
"statType": "TOTAL_NODE_CAPTURE",
"value": 447
},
{
"statType": "MAX_NODE_NEUTRALIZE",
"value": 11
},
{
"statType": "MAX_NUM_DEATHS",
"value": 16
}
]
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "OdinUnranked",
"rating": 400,
"wins": 43
},
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "AramUnranked2x2",
"rating": 400,
"wins": 0
},
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "AramUnranked1x1",
"rating": 400,
"wins": 0
},
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "AramUnranked3x3",
"rating": 400,
"wins": 0
},
{
"aggregatedStats": {
"stats": [
{
"statType": "TOTAL_CHAMPION_KILLS",
"value": 10269
},
{
"statType": "TOTAL_DECAYER",
"value": 0
},
{
"statType": "TOTAL_ASSISTS",
"value": 15722
},
{
"statType": "TOTAL_MINION_KILLS",
"value": 262793
},
{
"statType": "TOTAL_TURRETS_KILLED",
"value": 1954
},
{
"statType": "TOTAL_NEUTRAL_MINIONS_KILLED",
"value": 43898
},
{
"statType": "TOTAL_DEATHS_PER_SESSION",
"value": 1513
}
]
},
"leaves": 1,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "Unranked",
"rating": 400,
"wins": 1691
},
{
"aggregatedStats": {
"stats": []
},
"leaves": 0,
"losses": 0,
"maxRating": 0,
"modifyDate": "/Date(1357567398182)/",
"playerStatSummaryType": "AramUnranked5x5",
"rating": 400,
"wins": 0
}
]
},
"previousFirstWinOfDay": "/Date(1357489166306)/",
"userId": 55060
},
"success": true
}
As you can see this is really long, my question is, how would I grab only specific values from a certain playerStatSummarySet set? Like let's say I only wanted to grab the rating value from the set with the playerStatSummaryType value of RankedSolo5x5 how would I do that?
Here's what I have going so far to get the data from the JSON file.
with open('data.txt', 'r') as f:
data = json.load(f)
if you have to work with complex json objects, I suggest you take a look at jsonpath that offers xpath like language for json objects.
An example:
import jsonpath
import json
with open('/test.json', 'r') as f:
data = json.load(f)
path = "$.[?(#.playerStatSummaryType == 'RankedSolo5x5')].rating"
jsonpath.jsonpath(data,path)
out:
[1505]
Use a list comprehension
with open('data.txt', 'r') as f:
data = json.load(f)
rating = [summary["rating"] for summary
in data["data"]["playerStatSummaries"]["playerStatSummarySet"]
if summary["playerStatSummaryType"] == "RankedSolo5x5"][0]
You can still do it, but you have to access the data structure properly. What json.load() is returning is a JSON object which is the same as a Python dictionary. This obj has a key named 'data' in it that is associated with another object-dictionary, etc down until you get to the 'playerStatSummaries' object which has a data member keyed with 'playerStatSummarySet' that is actually a Python list rather than another object-dictionary.
Here's an example of how to search through that list of summary sets and find a specific entry -- remembering that since this data item is a list rather than dictionary object you have step through each of the entries in it to find the one you're looking for rather than just looking-up its name.
import json
with open('data.txt', 'r') as f:
jsonObj = json.load(f)
targetSummaryType = 'RankedSolo5x5'
for summarySet in jsonObj['data']['playerStatSummaries']['playerStatSummarySet']:
if summarySet['playerStatSummaryType'] == targetSummaryType:
print 'max rating for {}: {}'.format(targetSummaryType,
summarySet['maxRating'])
break # if you only expect there to be one
Output:
max rating for RankedSolo5x5: 1505
To figure out what was needed I found it useful to initially pprint() the whole jsonObj which made the structure very easy to see.