so I have a list of dicts that looks like this:
[{
'field': {
'data': 'F1'
},
'value': F1Value1,
'date': datetime.datetime(2019, 3, 1, 0, 0)
}, {
'field': {
'data': 'F2'
},
'value': F2Value1,
'date': datetime.datetime(2019, 2, 5, 0, 0)
}, {
'field': {
'data': 'F2'
},
'value': F2Value2,
'date': datetime.datetime(2019, 2, 7, 0, 0)
}]
And I want an output that looks like this:
[
{
'F1': [
{
'value': F1Value1,
'date': datetime.datetime(2019, 3, 1, 0, 0)
}
]
},
{
'F2': [
{
'value': F2Value1,
'date': datetime.datetime(2019, 2, 5, 0, 0)
},
{
'value': F2Value2,
'date': datetime.datetime(2019, 2, 5, 0, 0)
},
]
}
]
That is, I want every field.data to be the key and have it append the value and date if it belongs to the same field.
Note: I want to do this WITHOUT using a for loop (apart from the loop to iterate through the list). I want to use python dict functions like update() and append() etc.
Any optimized solutions would be really helpful.
You could just use iterate through the list of dicts and use defaultdict from collections to add the items with a unique key,
>>> from collections import defaultdict
>>> d = defaultdict(list)
>>>
>>> for items in x:
... d[items['field']['data']].append({
... 'value': items['value'],
... 'date': items['date']
... })
...
>>>
>>> import pprint
>>> pprint.pprint(x)
[{'date': datetime.datetime(2019, 3, 1, 0, 0),
'field': {'data': 'F1'},
'value': 'F1Value1'},
{'date': datetime.datetime(2019, 2, 5, 0, 0),
'field': {'data': 'F2'},
'value': 'F2Value1'},
{'date': datetime.datetime(2019, 2, 7, 0, 0),
'field': {'data': 'F2'},
'value': 'F2Value2'}]
>>>
>>> pprint.pprint(list(d.items()))
[('F1', [{'date': datetime.datetime(2019, 3, 1, 0, 0), 'value': 'F1Value1'}]),
('F2',
[{'date': datetime.datetime(2019, 2, 5, 0, 0), 'value': 'F2Value1'},
{'date': datetime.datetime(2019, 2, 7, 0, 0), 'value': 'F2Value2'}])]
Use itertools.groupby:
from itertools import groupby
from pprint import pprint
result = [{key: [{k: v for k, v in element.items() if k != 'field'}
for element in group]}
for key, group in groupby(data, lambda element: element['field']['data'])]
pprint(result)
Output:
[{'F1': [{'date': datetime.datetime(2019, 3, 1, 0, 0), 'value': 'F1Value1'}]},
{'F2': [{'date': datetime.datetime(2019, 2, 5, 0, 0), 'value': 'F2Value1'},
{'date': datetime.datetime(2019, 2, 7, 0, 0), 'value': 'F2Value2'}]}]
Only using dict, list, and set:
[
{
field_data :
[
{ k:v for k, v in thing.items() if k != 'field' }
for thing in things if thing['field']['data'] == field_data
]
for field_data in set(thing['field']['data'] for thing in things)
}
]
Related
I need to fill a db with fake data. To do so I'm creating a list of objects. I generate 10 timestamps, and for each timestamp I create an object, add the timestamp and some randomly assigned data, then it is appended to the list 5 times; each time given a unique id ('tag'). This format is quite specific as I am trying to simulate what the real data will be like.
Everytime I append a value, I run a print to show that the data is correct. Then when I am finished the entire operation, I print the entire list to double check. The first print value returns perfect. Yet the second tells me that I have 10 instances of identical data - my tag value is the same for 5 instances, then jumps up by 5, and repeat.
What am I missing?
cultures = [
{
'name': 'Pink Oyster',
'scientific_name': 'Pleurotus djamor'
},
{
'name': 'Brown Oyster',
'scientific_name': 'Pleurotus ostreatus'
},
{
'name': 'Blue Oyster',
'scientific_name': 'Pleurotus Columbinus'
},
{
'name': 'Chestnut',
'scientific_name': 'Pholiota Adiposa'
},
{
'name': 'Shiitake',
'scientific_name': 'Lentinula edodes'
},
{
'name': "Lion's Mane",
'scientific_name': 'Hericium erinaceus'
},
]
times = pd.date_range(start="2022-01-01",end="2022-07-20", periods=10).to_pydatetime().tolist()
data = []
count = 0
for time in times:
raw = {}
for key, value in cultures[randrange(0,5)].items():
raw[key] = value
raw['generation'] = randrange(0, 10)
raw['stage'] = randrange(0, 3)
raw['user_id'] = 1
raw['created_at'] = time
raw['updated_at'] = time
for i in range(5):
raw['tag'] = count
count += 1
data.append(raw)
#shows my data has been appended correctly
print(data[-1])
#???????
print(data)
example first print - tag value increases by 1
{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 0}
{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 1}
{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 2}
{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 3}
{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4}
example second print - tag is stuck at 4
[{'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4}, {'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4}, {'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4}, {'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4}, {'name': 'Blue Oyster', 'scientific_name': 'Pleurotus Columbinus', 'generation': 5, 'stage': 1, 'user_id': 1, 'created_at': datetime.datetime(2022, 1, 1, 0, 0), 'updated_at': datetime.datetime(2022, 1, 1, 0, 0), 'tag': 4},
How am i changing the values of previous appends when I change the value after I append the data
In python, stuff is passed around by reference to value. It basically means that primitive types (int, str, float) are passed by value, and complex types (list, object, dict) are passed by reference. So if you add raw to the list, you actually add the reference to it. You then change its contents, and add it again.
Instead of doing data.append(raw) try data.append(raw.copy()).
You need a fresh copy of raw each time round the loop:
for i in range(5):
raw1 = raw.copy()
raw1['tag'] = count
count += 1
data.append(raw1)
I have a query
fromDate = '2021-10-01'
toDate = '2021-10-10'
dataType = 'location'
typeId = '60dd6d303da6c17209d5ef68'
workerType = 'Employee'
start_date = datetime.strptime(str(fromDate)+" 00:00:00", '%Y-%m-%d %H:%M:%S')
end_date = datetime.strptime(str(toDate)+" 00:00:00", '%Y-%m-%d %H:%M:%S')
if dataType == 'location':
found_location = prodLocationCollection.find_one({"_id":ObjectId(typeId)})
match_filter = {'Date':{"$gte":start_date,"$lte":end_date},
'Location':found_location["locationName"],
'locationId':{'$exists':True},
'workerType':workerType
}
elif dataType == 'user':
match_filter = {'Date':{"$gte":start_date,"$lte":end_date},
'employeeId':ObjectId(typeId),
'locationId':{'$exists':True},
'workerType':workerType
}
output = list(prodAttendanceCollection.aggregate([{'$match': match_filter},
{"$group":{'_id':{
'employeeId':'$employeeId',
'workerId':'$workerId',
'workerFullName':'$workerFullName'
},
'dailyPointsArray':{
'$push':{
'Date':'$Date',
'createdAs':'$createdAs',
'Points':'$shiftPoints'
}
},
'total_shift_points':{'$sum':'$shiftPoints'},
'total_duration':{'$sum':'$duration'},
}
},
{ '$lookup':
{
'from': "users",
'localField': "employeeId",
'foreignField': "_id",
'as': "userInfo"
}
}
]))
This is giving output
[{'_id': {'employeeId': ObjectId('60dd78184524e6c116e22a44'),
'workerId': '1008',
'workerFullName': 'RADHIKA GOTHIVREKAR'},
'dailyPointsArray': [{'Date': datetime.datetime(2021, 10, 1, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 2, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 3, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 4, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 5, 0, 0),
'createdAs': 'IN-TIME PUNCHED',
'Points': 0},
{'Date': datetime.datetime(2021, 10, 6, 0, 0),
'createdAs': 'FULL-TIME PUNCHED',
'Points': 1},
{'Date': datetime.datetime(2021, 10, 7, 0, 0),
'createdAs': 'FULL-TIME PUNCHED',
'Points': 1},
{'Date': datetime.datetime(2021, 10, 8, 0, 0),
'createdAs': 'FULL-TIME PUNCHED',
'Points': 1},
{'Date': datetime.datetime(2021, 10, 9, 0, 0),
'createdAs': 'FULL-TIME PUNCHED',
'Points': 1},
{'Date': datetime.datetime(2021, 10, 10, 0, 0),
'createdAs': 'ABSENT',
'Points': None}],
'total_shift_points': 4,
'total_duration': 0,
'userInfo': []},
{'_id': {'employeeId': ObjectId('60dd77e34524e6c116e1b27d'),
'workerId': '1365',
'workerFullName': 'HARISON NIKALJE'},
'dailyPointsArray': [{'Date': datetime.datetime(2021, 10, 1, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 2, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 3, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 4, 0, 0),
'createdAs': 'IN-TIME PUNCHED',
'Points': 0},
{'Date': datetime.datetime(2021, 10, 5, 0, 0),
'createdAs': 'IN-TIME PUNCHED',
'Points': 0},
{'Date': datetime.datetime(2021, 10, 6, 0, 0),
'createdAs': 'IN-TIME PUNCHED',
'Points': 0},
{'Date': datetime.datetime(2021, 10, 7, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 8, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 9, 0, 0),
'createdAs': 'ABSENT',
'Points': None},
{'Date': datetime.datetime(2021, 10, 10, 0, 0),
'createdAs': 'ABSENT',
'Points': None}],
'total_shift_points': 0,
'total_duration': 0,
'userInfo': []}]
As you can see everything is working but the lookup part is returning [] empty array
This is sample data of attendance & user
ATTENDANCE
{
"_id": {
"$oid": "60dd7d723fcb2eb7df248af3"
},
"workerId": "2073",
"workerFullName": "ARUN NAIR",
"workerType": "Employee",
"workerAadharCardNumber": "xxxxxxxxxx",
"Date": {
"$date": "2021-07-01T00:00:00.000Z"
},
"employeeId": {
"$oid": "60dd77c14524e6c116e1692c"
},
"Location": "HEAD OFFICE 2",
"sourceUnitType": null,
"duration": null,
"shiftPoints": null,
"createdAs": "ABSENT",
"ruleApplied": null,
"detections": [],
"locationId": {
"$oid": "60dd6d303da6c17209d5ef68"
},
"workerFaceRegistered": true
}
USER
{
"_id": {
"$oid": "60dd77c14524e6c116e1692c"
},
"workerFirstName": "ARUN",
"workerSurname": "NAIR",
"workerPhoneNumber": "xxxxxxxxxx",
"workerId": "2073",
"locationType": "HEAD OFFICE",
"locationName": "HEAD OFFICE 2",
"workerDesignation": "EXECUTIVE",
"workerDepartment": "SALES",
"workerAadharCardNumber": "xxxxxxxxxx",
"workerType": "Employee",
"workerEmail": "xxxxxxxxxxxx",
"workerStatus": "REGULAR",
"workerUsername": "2073",
"workerPassword": "$2b$12$wBrDRYgPbRxrLG8N9Wxs2eDxGVFauZMraosrkePnYWWpZmPE1ljY2",
"totalLoginAttemptLeft": 3,
"lastLocationId": {
"$oid": "60dd6d303da6c17209d5ef68"
},
"allowFencing": true,
"workerFaceRegistered": true,
"isActive": false
}
After grouping the original fields are not longer present, so $employeeId does not exist, which won't match anything.
Since employeeId was moved to _id.employeeId in the group stage, in the lookup, use localField: "_id.employeeId"
I have a list of dictionaries that state a date as well as a price. It looks like this:
dict = [{'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 50}, {'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 12}, {'Date':datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
I'd like to create a new list of dictionaries that sum all the Price values that are on the same date. So the output would look like this:
output_dict = [{'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 62}, {'Date':datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
How could I achieve this?
You can use Counter from collections module:
from collections import Counter
c = Counter()
for v in dict:
c[v['Date']] += v['Price']
output_dict = [{'Date': name, 'Price': count} for name, count in c.items()]
Output:
[{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 62},
{'Date': datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
OR, a new way:
You can use Pandas library to solve this:
Install pandas like:
pip install pandas
Then code would be:
import pandas as pd
output_dict = pd.DataFrame(dict).groupby('Date').agg(sum).to_dict()['Price']
Output:
{Timestamp('2020-06-01 00:00:00'): 62, Timestamp('2020-06-02 00:00:00'): 60}
Another solution using itertools.groupby:
import datetime
from itertools import groupby
dct = [{'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 50}, {'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 12}, {'Date':datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
out = []
for k, g in groupby(dct, lambda k: k['Date']):
out.append({'Date': k, 'Price': sum(v['Price'] for v in g)})
print(out)
Prints:
[{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 62}, {'Date': datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
You can use itertools' groupby, although I'd like to believe that defaultdict will be faster :
#sort dicts
dicts = sorted(dicts, key= itemgetter("Date"))
#get the sum via itertools' groupby
result = [{"Date" : key,
"Price" : sum(entry['Price'] for entry in value)}
for key,value in
groupby(dicts, key = itemgetter("Date"))]
print(result)
[{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 62},
{'Date': datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
Using defaultdict
import datetime
from collections import defaultdict
dct = [{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 50},
{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 12},
{'Date': datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
sum_up = defaultdict(int)
for v in dct:
sum_up[v['Date']] += v['Price']
print([{"Date": k, "Price": v} for k, v in sum_up.items()])
[{'Date': datetime.datetime(2020, 6, 1, 0, 0), 'Price': 62}, {'Date': datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
This a good use-case for defaultdict, let's say our dict is my_dict:
import datetime
my_dict = [{'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 50},
{'Date':datetime.datetime(2020, 6, 1, 0, 0), 'Price': 12},
{'Date':datetime.datetime(2020, 6, 2, 0, 0), 'Price': 60}]
We can accumulate prices using a defaultdict like so:
from collections import defaultdict
new_dict = defaultdict(int)
for dict_ in my_dict:
new_dict[dict_['Date']] += dict_['Price']
Then we just reconvert this dict into a list of dicts!:
my_dict = [{'Date': date, 'Price': price} for date, price in new_dict.items()]
I am trying to group an array of datetime by hour and return the count of each hour.
This is my list that contains many datetime objects. I try to use a loop to count how many datetime objects are having the same hour but I could not find a way to get the count.
The other references at stackoverflow are all storing date as a column in pandas which I do not want, because my datetime are store in a list.
I am hoping to get a list of hour_count objects that looks like this
hour_cound = [
{
"hour": datetime,
"count": 2
}
]
# code
hours = [
datetime(2019, 1, 25, 1),
datetime(2019, 1, 25, 1),
datetime(2019, 1, 25, 2),
datetime(2019, 1, 25, 3),
datetime(2019, 1, 25, 4),
datetime(2019, 1, 25, 4)
]
existed = []
for hour in hours:
if hour.hour not in existed:
existed.append({
"hour": hour.hour,
"count": existed[hour.hour] + 1
})
The simplest thing without using pandas is to use collections.Counter
from collections import Counter
counts = Counter(h.hour for h in hours)
print(counts)
#Counter({1: 2, 2: 1, 3: 1, 4: 2})
Now just reformat into your desired output using a list comprehension:
hour_count = [{"hour": h, "count": c} for h, c in counts.items()]
print(hour_count)
#[{'count': 2, 'hour': 1},
# {'count': 1, 'hour': 2},
# {'count': 1, 'hour': 3},
# {'count': 2, 'hour': 4}]
You can use a helper method from pandas to store your list of hours and then use numpy to generate unique counts for each unique hour.
import numpy as np
import pandas as pd
hours = pd.DatetimeIndex(hours).hour
unique_hours, counts = np.unique(hours, return_counts=True)
hour_count = [{ "hour": hour, "count": count } for hour, count in zip(unique_hours, counts)]
pprint(hour_count)
Result
[{'count': 2, 'hour': 1},
{'count': 1, 'hour': 2},
{'count': 1, 'hour': 3},
{'count': 2, 'hour': 4}]
I'm trying to get the aggregation of 2 different lists, where each element is a dictionary with 2 entries, month and value.
So the first list looks like this:
[{
'patient_notes': 5,
'month': datetime.date(2017, 1, 1)
}, {
'patient_notes': 5,
'month': datetime.date(2017, 2, 1)
}, {
'patient_notes': 5,
'month': datetime.date(2017, 5, 1)
}, {
'patient_notes': 5,
'month': datetime.date(2017, 7, 1)
}, {
'patient_notes': 5,
'month': datetime.date(2017, 8, 1)
}, {
'patient_notes': 5,
'month': datetime.date(2017, 12, 1)
}]
Second list is:
[{
'employee_notes': 4,
'month': datetime.date(2017, 2, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 3, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 4, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 8, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 9, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 10, 1)
}, {
'employee_notes': 4,
'month': datetime.date(2017, 12, 1)
}]
So I need to build a new list that contains the sum of both list per month, something like this:
[{
'total_messages': 14,
'month': '2017-01-01'
}, {
'total_messages': 14,
'month': '2017-02-01'
}, {
'total_messages': 14,
'month': '2017-03-01'
}, {
'total_messages': 14,
'month': '2017-04-01'
}, {
'total_messages': 14,
'month': '2017-05-01'
}, {
'total_messages': 14,
'month': '2017-06-01'
}, {
'total_messages': 14,
'month': '2017-07-01'
}, {
'total_messages': 14,
'month': '2017-08-01'
}, {
'total_messages': 14,
'month': '2017-09-01'
}, {
'total_messages': 14,
'month': '2017-10-01'
}, {
'total_messages': 14,
'month': '2017-11-01'
}, {
'total_messages': 14,
'month': '2017-12-01'
}]
I first tried with zip but this only works if first 2 list are equal size. Then I tried with [itertools.izip_longest] but this has problems if lists are equal size but different months...I cannot simply aggregate those...I need to aggregate matching months only
Counter also is great for this, but I cannot change the keys names of original lists...any ideas?
You can use defaultdict to create a counter. Go through each item in the first list and add the patient_notes value to the dictionary. Then go through the second list and add the employee_notes values.
Now you need to encode your new defaultdict back into a list in your desired format. You can use a list comprehension for that. I've sorted the list by month.
from collections import defaultdict
dd = defaultdict(int)
for d in my_list_1:
dd[d['month']] += d['patient_notes']
for d in my_list_2:
dd[d['month']] += d['employee_notes']
result = [{'total_messages': dd[k], 'month': k} for k in sorted(dd.keys())]
>>> result
[{'month': datetime.date(2017, 1, 1), 'total_messages': 5},
{'month': datetime.date(2017, 2, 1), 'total_messages': 9},
{'month': datetime.date(2017, 3, 1), 'total_messages': 4},
{'month': datetime.date(2017, 4, 1), 'total_messages': 4},
{'month': datetime.date(2017, 5, 1), 'total_messages': 5},
{'month': datetime.date(2017, 7, 1), 'total_messages': 5},
{'month': datetime.date(2017, 8, 1), 'total_messages': 9},
{'month': datetime.date(2017, 9, 1), 'total_messages': 4},
{'month': datetime.date(2017, 10, 1), 'total_messages': 4},
{'month': datetime.date(2017, 12, 1), 'total_messages': 9}]
from collections import defaultdict
d_dict = defaultdict(int)
for k,v in [ i.values() for i in l1 + l2 ]:
d_dict[k] += v
[ {'month':i.strftime("%Y-%m-%d"),'total_messages':j} for i, j in sorted(d_dict.items()) ]
Output:
[{'month': '2017-01-01', 'total_messages': 5},
{'month': '2017-02-01', 'total_messages': 9},
{'month': '2017-03-01', 'total_messages': 4},
{'month': '2017-04-01', 'total_messages': 4},
{'month': '2017-05-01', 'total_messages': 5},
{'month': '2017-07-01', 'total_messages': 5},
{'month': '2017-08-01', 'total_messages': 9},
{'month': '2017-09-01', 'total_messages': 4},
{'month': '2017-10-01', 'total_messages': 4},
{'month': '2017-12-01', 'total_messages': 9}]