Parsing JSON with Python to get specific value - python

I am trying to parse JSON with Python. I am trying to get the value of "login" which is michael for "type" which is "CreateEvent".
Here's my JSON:
[
{
"id": "7",
"type": "PushEvent",
"actor": {
"id": 5,
"login": "michael",
"display_login": "michael",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
},
"repo": {
"id": 2,
"name": "myorganization/puppet",
"url": "https://ec2"
},
"payload": {
"push_id": 5,
"size": 1,
"distinct_size": 1,
"ref": "refs/heads/dev",
"head": "5584d504f971",
"before": "e485f37ce935775846f33b",
"commits": [
{
"sha": "5584cd504f971",
"author": {
"email": "michael.conte#gmail.ca",
"name": "michael"
},
"message": "Create dev.pp",
"distinct": true,
"url": "https://ec2"
}
]
},
"public": true,
"created_at": "2018-02-20T16:15:57Z",
"org": {
"id": 6,
"login": "myorganization",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
}
},
{
"id": "6",
"type": "CreateEvent",
"actor": {
"id": 5,
"login": "michael",
"display_login": "michael",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
},
"repo": {
"id": 2,
"name": "myorganization/puppet",
"url": "https://ec2"
},
"payload": {
"ref": "dev",
"ref_type": "branch",
"master_branch": "master",
"description": null,
"pusher_type": "user"
},
"public": true,
"created_at": "2018-02-20T16:15:44Z",
"org": {
"id": 6,
"login": "myorganization",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
}
},
{
"id": "5",
"type": "PushEvent",
"actor": {
"id": 5,
"login": "michael",
"display_login": "michael",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
},
"repo": {
"id": 2,
"name": "myorganization/puppet",
"url": "https://ec2"
},
"payload": {
"push_id": 3,
"size": 1,
"distinct_size": 1,
"ref": "refs/heads/master",
"head": "e485f84b875846f33b",
"before": "f8bb87b952bfb4",
"commits": [
{
"sha": "e485f37ce6f33b",
"author": {
"email": "michael.conte#gmail.ca",
"name": "michael"
},
"message": "Create hello.pp",
"distinct": true,
"url": "https://ec2"
}
]
},
"public": true,
"created_at": "2018-02-20T15:48:42Z",
"org": {
"id": 6,
"login": "myorganization",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
}
},
{
"id": "4",
"type": "CreateEvent",
"actor": {
"id": 5,
"login": "michael",
"display_login": "michael",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2?"
},
"repo": {
"id": 2,
"name": "myorganization/puppet",
"url": "https://ec2"
},
"payload": {
"ref": "master",
"ref_type": "branch",
"master_branch": "master",
"description": null,
"pusher_type": "user"
},
"public": true,
"created_at": "2018-02-20T15:48:21Z",
"org": {
"id": 6,
"login": "myorganization",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
}
},
{
"id": "3",
"type": "CreateEvent",
"actor": {
"id": 5,
"login": "michael",
"display_login": "michael",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
},
"repo": {
"id": 2,
"name": "myorganization/puppet",
"url": "https://ec2"
},
"payload": {
"ref": null,
"ref_type": "repository",
"master_branch": "master",
"description": null,
"pusher_type": "user"
},
"public": true,
"created_at": "2018-02-20T15:48:05Z",
"org": {
"id": 6,
"login": "myorganization",
"gravatar_id": "",
"url": "https://ec2",
"avatar_url": "https://ec2"
}
}
]
Here's my code:
response = requests.get(url, headers=headers, verify=False)
name = response.json()
fname = (name['type']['actor']['login'])
print(fname)
When I run the above code, I get a type error.
TypeError: list indices must be integers or slices, not str.
What am I doing wrong? I am using Python3 for my code.

Try
fname = name[0]['payload']['commits'][0]['author']['name']
The name Michael you are trying to get, is inside the dictionary named author, which is inside a single item list, which is inside the commits dictionary, which is inside the payload dictionary, which is inside a single item list.
Check out the docs for more info on collection types: http://python-textbok.readthedocs.io/en/1.0/Collections.html

Related

How to get specific value from JSON response in Python

I have a response coming in as :
b'
{
"_items": [
{
"_id": "61a8dc29fab70adfacf59789",
"name": "CP",
"url": "",
"sd_subscriber_id": "",
"account_manager": "",
"contact_name": "",
"contact_email": "",
"phone": "",
"country": "other",
"is_enabled": true,
"company_type": null,
"monitoring_administrator": null,
"allowed_ip_list": null,
"expiry_date": null,
"original_creator": "6183d49420d13bc4e332281d",
"events_only": false,
"_created": "2021-12-02T14:46:01+0000",
"_updated": "2022-02-06T11:59:32+0000",
"_etag": "277e2a8667b650fe4ba56f4b9b44780f3992062a",
"archive_access": false,
"sections": {
"wire": true,
"agenda": true,
"news_api": true,
"monitoring": true
},
"_links": {
"self": {
"title": "Companie",
"href": "companies/61a8dc29fab70adfacf59789"
},
"related": {
"original_creator": {
"title": "User",
"href": "users/6183d49420d13bc4e332281d"
}
}
}
},
{
"_id": "635ac6b9b837aa06e8e94ea3",
"name": "Load Company No Exp",
"url": "",
"sd_subscriber_id": "",
"account_manager": "",
"contact_name": "",
"contact_email": "karapetyan.mikayel#gmail.com",
"phone": "6478934734",
"country": "",
"is_enabled": true,
"company_type": null,
"monitoring_administrator": null,
"allowed_ip_list": null,
"expiry_date": null,
"original_creator": "6298c949007f2fb1c968dfdf",
"events_only": false,
"_created": "2022-10-27T17:58:17+0000",
"_updated": "2022-10-27T18:03:17+0000",
"_etag": "9cb17d520b3ca9dc1c3326a1ccab8bbb5e7839f2",
"version_creator": "6183d49420d13bc4e332281d",
"_links": {
"self": {
"title": "Companie",
"href": "companies/635ac6b9b837aa06e8e94ea3"
},
"related": {
"original_creator": {
"title": "User",
"href": "users/6298c949007f2fb1c968dfdf"
},
"version_creator": {
"title": "User",
"href": "users/6183d49420d13bc4e332281d"
}
}
}
}
]
}
All i Need is the _items part of it that is inside the [] :
[
{
"_id": "61a8dc29fab70adfacf59789",
"name": "CP",
"url": "",
"sd_subscriber_id": "",
"account_manager": "",
"contact_name": "",
"contact_email": "",
"phone": "",
"country": "other",
"is_enabled": true,
"company_type": null,
"monitoring_administrator": null,
"allowed_ip_list": null,
"expiry_date": null,
"original_creator": "6183d49420d13bc4e332281d",
"events_only": false,
"_created": "2021-12-02T14:46:01+0000",
"_updated": "2022-02-06T11:59:32+0000",
"_etag": "277e2a8667b650fe4ba56f4b9b44780f3992062a",
"archive_access": false,
"sections": {
"wire": true,
"agenda": true,
"news_api": true,
"monitoring": true
},
"_links": {
"self": {
"title": "Companie",
"href": "companies/61a8dc29fab70adfacf59789"
},
"related": {
"original_creator": {
"title": "User",
"href": "users/6183d49420d13bc4e332281d"
}
}
}
},
{
"_id": "635ac6b9b837aa06e8e94ea3",
"name": "Load Company No Exp",
"url": "",
"sd_subscriber_id": "",
"account_manager": "",
"contact_name": "",
"contact_email": "karapetyan.mikayel#gmail.com",
"phone": "6478934734",
"country": "",
"is_enabled": true,
"company_type": null,
"monitoring_administrator": null,
"allowed_ip_list": null,
"expiry_date": null,
"original_creator": "6298c949007f2fb1c968dfdf",
"events_only": false,
"_created": "2022-10-27T17:58:17+0000",
"_updated": "2022-10-27T18:03:17+0000",
"_etag": "9cb17d520b3ca9dc1c3326a1ccab8bbb5e7839f2",
"version_creator": "6183d49420d13bc4e332281d",
"_links": {
"self": {
"title": "Companie",
"href": "companies/635ac6b9b837aa06e8e94ea3"
},
"related": {
"original_creator": {
"title": "User",
"href": "users/6298c949007f2fb1c968dfdf"
},
"version_creator": {
"title": "User",
"href": "users/6183d49420d13bc4e332281d"
}
}
}
}
]
How to get it.
I tried getting it as
temp = response['_items']
but it wont work
Please help me out.
You need to convert raw byte string to Python dict first, assuming that you are using Python version 3.6+ and your response object is either string or bytes:
import json
data = json.loads(response) # loads() decodes it to dict
temp = response['_items']

What is the best way for me to iterate over this dataset to return all matching values from another key value pair if I match a separate key?

I want to be able to search through this list (see bottom of post) of dicts (I think that is what this particular arrangement is called) to search for an ['address'] that matches '0xd2'. If that match is found, I want to return/print all the corresponding ['id']s.
So in this case I would like to return:
632, 315, 432, 100
I'm able to extract individual values like this:
none = None
print(my_dict['result'][2]["id"])
432
I'm struggling with how to get a loop to do this properly.
{
"total": 4,
"page": 0,
"page_size": 100,
"result": [
{
"address": "0xd2",
"id": "632",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0xd2",
"id": "315",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0xd2",
"id": "432",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0x44",
"id": "100",
"amount": "1",
"name": "Suicide Squad",
"group": "DC",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
}
],
"status": "SYNCED"
}
Welcome to StackOverflow.
You can try list comprehension:
[res["id"] for res in my_dict["result"] if res["address"] == "0xd2"]
If you'd like to use a for loop:
l = []
for res in my_dict["result"]:
if res["address"] == "0xd2":
l.append(res["id"])
You can use a list comprehension.
import json
json_string = """{
"total": 4,
"page": 0,
"page_size": 100,
"result": [
{
"address": "0xd2",
"id": "632",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0xd2",
"id": "315",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0xd2",
"id": "432",
"amount": "1",
"name": "Avengers",
"group": "Marvel",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
},
{
"address": "0x44",
"id": "100",
"amount": "1",
"name": "Suicide Squad",
"group": "DC",
"uri": "https://google.com/",
"metadata": null,
"synced_at": "2022-05-26T22:52:34.113Z",
"last_sync": "2022-05-26T22:52:34.113Z"
}
],
"status": "SYNCED"
}"""
json_dict = json.loads(json_string)
result = [elem['id'] for elem in json_dict['result'] if elem['address'] == '0xd2']
print(result)
Output:
['632', '315', '432']
This would store the associated ids in the list:
ids=[]
for r in dataset.get('result'):
if r.get('address')=='0xd2':
ids.append(r.get('id'))

How do I iterate through a nested list from Spotipy's playlist_items function and only print out the fields I'm interested in?

I am attempting to get all the track names and corresponding artist from a playlist using Spotipy. I am at the point where I am able to get a json dump of the fields, but I cannot figure out how to iterate through the dump and print out the fields I'm interested in.
Here is what I'm using to get all information about the playlist:
playlist = spotipy.playlist_items(playlist_id)
If I then print(json.dumps(playlist, indent=2)), then I get the following output:
{
"collaborative": false,
"description": "",
"external_urls": {
"spotify": "https://open.spotify.com/playlist/50uWPcNFdJElMVZWo0IebB"
},
"followers": {
"href": null,
"total": 0
},
"href": "https://api.spotify.com/v1/playlists/50uWPcNFdJElMVZWo0IebB?additional_types=track",
"id": "50uWPcNFdJElMVZWo0IebB",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/ab67616d0000b273942a0c9ac8f1def7c8805044",
"width": 640
}
],
"name": "Spotipy Test",
"owner": {
"display_name": "username",
"external_urls": {
"spotify": "https://open.spotify.com/user/username"
},
"href": "https://api.spotify.com/v1/users/username",
"id": "username",
"type": "user",
"uri": "spotify:user:username"
},
"primary_color": null,
"public": false,
"snapshot_id": "Nyw0OTk4MDg1NDM3NzRlOWI2MGY2MDc2ZjMxNTE4OGFkZWQ5Y2QyZDdk",
"tracks": {
"href": "https://api.spotify.com/v1/playlists/50uWPcNFdJElMVZWo0IebB/tracks?offset=0&limit=100&additional_types=track",
"items": [
{
"added_at": "2021-11-17T07:32:59Z",
"added_by": {
"external_urls": {
"spotify": "https://open.spotify.com/user/username"
},
"href": "https://api.spotify.com/v1/users/username",
"id": "username",
"type": "user",
"uri": "spotify:user:username"
},
"is_local": false,
"primary_color": null,
"track": {
"album": {
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/6fOMl44jA4Sp5b9PpYCkzz"
},
"href": "https://api.spotify.com/v1/artists/6fOMl44jA4Sp5b9PpYCkzz",
"id": "6fOMl44jA4Sp5b9PpYCkzz",
"name": "NF",
"type": "artist",
"uri": "spotify:artist:6fOMl44jA4Sp5b9PpYCkzz"
}
],
"available_markets": [],
"external_urls": {
"spotify": "https://open.spotify.com/album/46xdC4Qcvscfs3Ai2RIHcv"
},
"href": "https://api.spotify.com/v1/albums/46xdC4Qcvscfs3Ai2RIHcv",
"id": "46xdC4Qcvscfs3Ai2RIHcv",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/ab67616d0000b273942a0c9ac8f1def7c8805044",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/ab67616d00001e02942a0c9ac8f1def7c8805044",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/ab67616d00004851942a0c9ac8f1def7c8805044",
"width": 64
}
],
"name": "The Search",
"release_date": "2019-07-26",
"release_date_precision": "day",
"total_tracks": 20,
"type": "album",
"uri": "spotify:album:46xdC4Qcvscfs3Ai2RIHcv"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/6fOMl44jA4Sp5b9PpYCkzz"
},
"href": "https://api.spotify.com/v1/artists/6fOMl44jA4Sp5b9PpYCkzz",
"id": "6fOMl44jA4Sp5b9PpYCkzz",
"name": "NF",
"type": "artist",
"uri": "spotify:artist:6fOMl44jA4Sp5b9PpYCkzz"
}
],
"available_markets": [],
"disc_number": 1,
"duration_ms": 248053,
"episode": false,
"explicit": false,
"external_ids": {
"isrc": "USUM71907048"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/3oLe5ZILASG8vU5dxIMfLY"
},
"href": "https://api.spotify.com/v1/tracks/3oLe5ZILASG8vU5dxIMfLY",
"id": "3oLe5ZILASG8vU5dxIMfLY",
"is_local": false,
"name": "The Search",
"popularity": 74,
"preview_url": "https://p.scdn.co/mp3-preview/c26b3dedc967f5009451a693ac275e50c6ecb53c?cid=1f51bd6bdbd94e34884f3ec17c0f68ed",
"track": true,
"track_number": 1,
"type": "track",
"uri": "spotify:track:3oLe5ZILASG8vU5dxIMfLY"
},
"video_thumbnail": {
"url": null
}
},
{
"added_at": "2021-11-17T07:33:24Z",
"added_by": {
"external_urls": {
"spotify": "https://open.spotify.com/user/username"
},
"href": "https://api.spotify.com/v1/users/username",
"id": "username",
"type": "user",
"uri": "spotify:user:username"
},
"is_local": false,
"primary_color": null,
"track": {
"album": {
"album_type": "album",
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/3uL4UpqShC4p2x1dJutoRW"
},
"href": "https://api.spotify.com/v1/artists/3uL4UpqShC4p2x1dJutoRW",
"id": "3uL4UpqShC4p2x1dJutoRW",
"name": "Ghostemane",
"type": "artist",
"uri": "spotify:artist:3uL4UpqShC4p2x1dJutoRW"
}
],
"available_markets": [],
"external_urls": {
"spotify": "https://open.spotify.com/album/4pzCKSWYNM2yUWrNvycC6e"
},
"href": "https://api.spotify.com/v1/albums/4pzCKSWYNM2yUWrNvycC6e",
"id": "4pzCKSWYNM2yUWrNvycC6e",
"images": [
{
"height": 640,
"url": "https://i.scdn.co/image/ab67616d0000b27347fa4895a04fbb517fd42070",
"width": 640
},
{
"height": 300,
"url": "https://i.scdn.co/image/ab67616d00001e0247fa4895a04fbb517fd42070",
"width": 300
},
{
"height": 64,
"url": "https://i.scdn.co/image/ab67616d0000485147fa4895a04fbb517fd42070",
"width": 64
}
],
"name": "Hexada",
"release_date": "2017-09-05",
"release_date_precision": "day",
"total_tracks": 10,
"type": "album",
"uri": "spotify:album:4pzCKSWYNM2yUWrNvycC6e"
},
"artists": [
{
"external_urls": {
"spotify": "https://open.spotify.com/artist/3uL4UpqShC4p2x1dJutoRW"
},
"href": "https://api.spotify.com/v1/artists/3uL4UpqShC4p2x1dJutoRW",
"id": "3uL4UpqShC4p2x1dJutoRW",
"name": "Ghostemane",
"type": "artist",
"uri": "spotify:artist:3uL4UpqShC4p2x1dJutoRW"
}
],
"available_markets": [],
"disc_number": 1,
"duration_ms": 124538,
"episode": false,
"explicit": true,
"external_ids": {
"isrc": "TCADG1741539"
},
"external_urls": {
"spotify": "https://open.spotify.com/track/2Vc6NJ9PW9gD9q343XFRKx"
},
"href": "https://api.spotify.com/v1/tracks/2Vc6NJ9PW9gD9q343XFRKx",
"id": "2Vc6NJ9PW9gD9q343XFRKx",
"is_local": false,
"name": "Mercury: Retrograde",
"popularity": 76,
"preview_url": "https://p.scdn.co/mp3-preview/7d4cb282d38d48d85e9faedb3ea5e74546281d07?cid=1f51bd6bdbd94e34884f3ec17c0f68ed",
"track": true,
"track_number": 4,
"type": "track",
"uri": "spotify:track:2Vc6NJ9PW9gD9q343XFRKx"
},
"video_thumbnail": {
"url": null
}
}
],
"limit": 100,
"next": null,
"offset": 0,
"previous": null,
"total": 2
},
"type": "playlist",
"uri": "spotify:playlist:50uWPcNFdJElMVZWo0IebB"
}
And this is only for two songs being in the playlist!
At this point my limited knowledge of python fails me, because I dont know how to proceed from here. All I am looking for is the track name and the corresponding artist.
So, how do I go through the massive nested list, and only print out the fields Im interested in?
You can use the fields variable to specify the information you want.
For example with
playlist = sp.playlist_items(paylist_id, fields='items.track.artists.name,items.track.name'),
you'll get only the artist name(s) and track name information back:
{"items": [{"track": {"artists": [{"name": "Avicii"}], "name": "Levels - Radio Edit"}}, {"track": {"artists": [{"name": "Avicii"}], "name": "Wake Me Up - Radio Edit"}}]}

append multiple json files together and ouptut 1 Avro file using Python

I have a use case where I am required to append multiple json files and then convert them into 1 single Avro file. I have written the code below which appends the json files together and then convert them into AVRO file. But the issue I am having is that the JSON file gets appended but the entore JSON is enclosed in [] brackets and so I get error while converting it into AVRO file. I am trying to figure out how can I get rid of the [] from the first and the last line in JSON file? Any help is appreciated.
The error I am getting is (snippet of the error, error is too long to paste : avro.io.AvroTypeException: The datum [{'event_type': 'uplink'.....}] is not an example of the schema
My code:
Laird.py
import avro.schema
from avro.datafile import DataFileReader, DataFileWriter
from avro.io import DatumReader, DatumWriter
from avro import schema, datafile, io
import json
from datetime import date
import glob
data = []
for f in glob.glob("*.txt"):
with open(f,) as infile:
data.append(json.load(infile))
# json.dumps(data)
with open("laird.json",'w') as outfile:
json.dump(data, outfile)
def json_to_avro():
fo = open("laird.json", "r")
data = fo.readlines()
final_header = []
final_rec = []
for header in data[0:1]:
header = header.strip("\n")
header = header.split(",")
final_header = header
for rec in data[1:]:
rec = rec.strip("\n")
rec = rec.split(" ")
rec = ' '.join(rec).split()
final_rec = rec
final_dict = dict(zip(final_header,final_rec))
# print(final_dict)
json_dumps = json.dumps(final_dict, ensure_ascii=False)
# print(json_dumps)
schema = avro.schema.parse(open("laird.avsc", "rb").read())
# print(schema)
writer = DataFileWriter(open("laird.avro", "wb"), DatumWriter(), schema)
with open("laird.json") as fp:
contents = json.load(fp)
print(contents)
writer.append(contents)
writer.close()
json_to_avro()
#Script to read/convert AVRO file to JSON
reader = DataFileReader(open("laird.avro", "rb"), DatumReader())
for user in reader:
print(user)
reader.close()
Schema: lair.avsc
{
"name": "MyClass",
"type": "record",
"namespace": "com.acme.avro",
"fields": [
{
"name": "event_type",
"type": "string"
},
{
"name": "event_data",
"type": {
"name": "event_data",
"type": "record",
"fields": [
{
"name": "device_id",
"type": "string"
},
{
"name": "user_id",
"type": "string"
},
{
"name": "payload",
"type": {
"type": "array",
"items": {
"name": "payload_record",
"type": "record",
"fields": [
{
"name": "name",
"type": "string"
},
{
"name": "sensor_id",
"type": "string"
},
{
"name": "type",
"type": "string"
},
{
"name": "unit",
"type": "string"
},
{
"name": "value",
"type": "float"
},
{
"name": "channel",
"type": "int"
},
{
"name": "timestamp",
"type": "long"
}
]
}
}
},
{
"name": "client_id",
"type": "string"
},
{
"name": "hardware_id",
"type": "string"
},
{
"name": "timestamp",
"type": "long"
},
{
"name": "application_id",
"type": "string"
},
{
"name": "device_type_id",
"type": "string"
}
]
}
},
{
"name": "company",
"type": {
"name": "company",
"type": "record",
"fields": [
{
"name": "id",
"type": "int"
},
{
"name": "address",
"type": "string"
},
{
"name": "city",
"type": "string"
},
{
"name": "country",
"type": "string"
},
{
"name": "created_at",
"type": "string"
},
{
"name": "industry",
"type": "string"
},
{
"name": "latitude",
"type": "float"
},
{
"name": "longitude",
"type": "float"
},
{
"name": "name",
"type": "string"
},
{
"name": "state",
"type": "string"
},
{
"name": "status",
"type": "int"
},
{
"name": "timezone",
"type": "string"
},
{
"name": "updated_at",
"type": "string"
},
{
"name": "user_id",
"type": "string"
},
{
"name": "zip",
"type": "string"
}
]
}
},
{
"name": "location",
"type": {
"name": "location",
"type": "record",
"fields": [
{
"name": "id",
"type": "int"
},
{
"name": "address",
"type": "string"
},
{
"name": "city",
"type": "string"
},
{
"name": "country",
"type": "string"
},
{
"name": "created_at",
"type": "string"
},
{
"name": "industry",
"type": "string"
},
{
"name": "latitude",
"type": "float"
},
{
"name": "longitude",
"type": "float"
},
{
"name": "name",
"type": "string"
},
{
"name": "state",
"type": "string"
},
{
"name": "status",
"type": "int"
},
{
"name": "timezone",
"type": "string"
},
{
"name": "updated_at",
"type": "string"
},
{
"name": "user_id",
"type": "string"
},
{
"name": "zip",
"type": "string"
},
{
"name": "company_id",
"type": "int"
}
]
}
},
{
"name": "device_type",
"type": {
"name": "device_type",
"type": "record",
"fields": [
{
"name": "id",
"type": "string"
},
{
"name": "application_id",
"type": "string"
},
{
"name": "category",
"type": "string"
},
{
"name": "codec",
"type": "string"
},
{
"name": "data_type",
"type": "string"
},
{
"name": "description",
"type": "string"
},
{
"name": "manufacturer",
"type": "string"
},
{
"name": "model",
"type": "string"
},
{
"name": "name",
"type": "string"
},
{
"name": "parent_constraint",
"type": "string"
},
{
"name": "proxy_handler",
"type": "string"
},
{
"name": "subcategory",
"type": "string"
},
{
"name": "transport_protocol",
"type": "string"
},
{
"name": "version",
"type": "string"
},
{
"name": "created_at",
"type": "string"
},
{
"name": "updated_at",
"type": "string"
}
]
}
},
{
"name": "device",
"type": {
"name": "device",
"type": "record",
"fields": [
{
"name": "id",
"type": "int"
},
{
"name": "thing_name",
"type": "string"
},
{
"name": "created_at",
"type": "string"
},
{
"name": "updated_at",
"type": "string"
},
{
"name": "status",
"type": "int"
}
]
}
}
]
}
Generated JSON File: laird.json
[{"event_type": "uplink", "event_data": {"device_id": "42934500-fcfb-11ea-9f13-d1d0271289a6", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "payload": [{"name": "Humidity", "sensor_id": "42abaf00-fcfb-11ea-9c71-c517ac227ea5", "type": "rel_hum", "unit": "p", "value": 94.29, "channel": 4, "timestamp": 1605007797789}, {"name": "Temperature", "sensor_id": "42b0df20-fcfb-11ea-bf5c-d11ce3dbc1cb", "type": "temp", "unit": "c", "value": 21.64, "channel": 3, "timestamp": 1605007797789}, {"name": "Battery", "sensor_id": "42a98c20-fcfb-11ea-b4dd-cd2887a335f7", "type": "batt", "unit": "p", "value": 100, "channel": 5, "timestamp": 1605007797789}, {"name": "Local Backup", "sensor_id": "42b01bd0-fcfb-11ea-9f13-d1d0271289a6", "type": "digital_sensor", "unit": "d", "value": 1, "channel": 400, "timestamp": 1605007797789}, {"name": "RSSI", "sensor_id": "42b39e40-fcfb-11ea-bf5c-d11ce3dbc1cb", "type": "rssi", "unit": "dbm", "value": -53, "channel": 100, "timestamp": 1605007797789}, {"name": "SNR", "sensor_id": "", "type": "snr", "unit": "db", "value": 10.2, "channel": 101, "timestamp": 1605007797789}], "client_id": "b8468c50-baf0-11ea-a5e9-89c3b09de43a", "hardware_id": "0025ca0a0000e232", "timestamp": 1605007797789, "application_id": "shipcomwireless", "device_type_id": "70776630-e15e-11ea-a8c9-05cd631755a5"}, "company": {"id": 7696, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:44:50Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "Harris Health System - Production", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-15T03:34:58Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054"}, "location": {"id": 9153, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-18T02:08:03Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "HHS Van Sensors", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-18T02:08:03Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054", "company_id": 7696}, "device_type": {"id": "70776630-e15e-11ea-a8c9-05cd631755a5", "application_id": "", "category": "module", "codec": "lorawan.laird.rs1xx-backup", "data_type": "", "description": "Temp Sensor", "manufacturer": "Laird", "model": "RS1xx", "name": "Laird Temp & Humidity with Local Backup", "parent_constraint": "NOT_ALLOWED", "proxy_handler": "PrometheusClient", "subcategory": "lora", "transport_protocol": "lorawan", "version": "", "created_at": "2020-08-18T14:23:51Z", "updated_at": "2020-08-18T18:16:37Z"}, "device": {"id": 269231, "thing_name": "Van 18-1775 (Ambient)", "created_at": "2020-09-22T17:44:27Z", "updated_at": "2020-09-25T22:39:57Z", "status": 0}}, {"event_type": "uplink", "event_data": {"device_id": "7de32cf0-f9d2-11ea-b4dd-cd2887a335f7", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "payload": [{"name": "Humidity", "sensor_id": "7dfbbe00-f9d2-11ea-9c71-c517ac227ea5", "type": "rel_hum", "unit": "p", "value": 0, "channel": 4, "timestamp": 1604697684139}, {"name": "Temperature", "sensor_id": "7dfb48d0-f9d2-11ea-9c71-c517ac227ea5", "type": "temp", "unit": "c", "value": -27.22, "channel": 3, "timestamp": 1604697684139}, {"name": "Battery", "sensor_id": "7dfa5e70-f9d2-11ea-bf5c-d11ce3dbc1cb", "type": "batt", "unit": "p", "value": 100, "channel": 5, "timestamp": 1604697684139}, {"name": "Local Backup", "sensor_id": "7dfb96f0-f9d2-11ea-b4dd-cd2887a335f7", "type": "digital_sensor", "unit": "d", "value": 1, "channel": 400, "timestamp": 1604697684139}, {"name": "RSSI", "sensor_id": "7dfc5a40-f9d2-11ea-b4dd-cd2887a335f7", "type": "rssi", "unit": "dbm", "value": -7, "channel": 100, "timestamp": 1604697684139}, {"name": "SNR", "sensor_id": "", "type": "snr", "unit": "db", "value": 10, "channel": 101, "timestamp": 1604697684139}], "client_id": "b8468c50-baf0-11ea-a5e9-89c3b09de43a", "hardware_id": "0025ca0a0000be6a", "timestamp": 1604697684139, "application_id": "shipcomwireless", "device_type_id": "70776630-e15e-11ea-a8c9-05cd631755a5"}, "company": {"id": 7696, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:44:50Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "Harris Health System - Production", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-15T03:34:58Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054"}, "location": {"id": 9080, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:46:07Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "HHS Cooler Sensors", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-18T14:17:28Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054", "company_id": 7696}, "device_type": {"id": "70776630-e15e-11ea-a8c9-05cd631755a5", "application_id": "", "category": "module", "codec": "lorawan.laird.rs1xx-backup", "data_type": "", "description": "Temp Sensor", "manufacturer": "Laird", "model": "RS1xx", "name": "Laird Temp & Humidity with Local Backup", "parent_constraint": "NOT_ALLOWED", "proxy_handler": "PrometheusClient", "subcategory": "lora", "transport_protocol": "lorawan", "version": "", "created_at": "2020-08-18T14:23:51Z", "updated_at": "2020-08-18T18:16:37Z"}, "device": {"id": 268369, "thing_name": "Cooler F-0201-AH", "created_at": "2020-09-18T17:15:04Z", "updated_at": "2020-09-25T22:39:57Z", "status": 0}}, {"event_type": "uplink", "event_data": {"device_id": "1c5c66f0-fcfb-11ea-8ae3-2ffdc909c57b", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "payload": [{"name": "Humidity", "sensor_id": "1c7a4f30-fcfb-11ea-8ae3-2ffdc909c57b", "type": "rel_hum", "unit": "p", "value": 81.22, "channel": 4, "timestamp": 1605148608302}, {"name": "Temperature", "sensor_id": "1c793dc0-fcfb-11ea-bf5c-d11ce3dbc1cb", "type": "temp", "unit": "c", "value": 24.47, "channel": 3, "timestamp": 1605148608302}, {"name": "Battery", "sensor_id": "1c76a5b0-fcfb-11ea-bf5c-d11ce3dbc1cb", "type": "batt", "unit": "p", "value": 100, "channel": 5, "timestamp": 1605148608302}, {"name": "Local Backup", "sensor_id": "1c73e690-fcfb-11ea-9c71-c517ac227ea5", "type": "digital_sensor", "unit": "d", "value": 1, "channel": 400, "timestamp": 1605148608302}, {"name": "RSSI", "sensor_id": "1c780540-fcfb-11ea-b4dd-cd2887a335f7", "type": "rssi", "unit": "dbm", "value": -14, "channel": 100, "timestamp": 1605148608302}, {"name": "SNR", "sensor_id": "", "type": "snr", "unit": "db", "value": 8.8, "channel": 101, "timestamp": 1605148608302}], "client_id": "b8468c50-baf0-11ea-a5e9-89c3b09de43a", "hardware_id": "0025ca0a0000e1e3", "timestamp": 1605148608302, "application_id": "shipcomwireless", "device_type_id": "70776630-e15e-11ea-a8c9-05cd631755a5"}, "company": {"id": 7696, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:44:50Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "Harris Health System - Production", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-15T03:34:58Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054"}, "location": {"id": 9153, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-18T02:08:03Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "HHS Van Sensors", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-18T02:08:03Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054", "company_id": 7696}, "device_type": {"id": "70776630-e15e-11ea-a8c9-05cd631755a5", "application_id": "", "category": "module", "codec": "lorawan.laird.rs1xx-backup", "data_type": "", "description": "Temp Sensor", "manufacturer": "Laird", "model": "RS1xx", "name": "Laird Temp & Humidity with Local Backup", "parent_constraint": "NOT_ALLOWED", "proxy_handler": "PrometheusClient", "subcategory": "lora", "transport_protocol": "lorawan", "version": "", "created_at": "2020-08-18T14:23:51Z", "updated_at": "2020-08-18T18:16:37Z"}, "device": {"id": 269213, "thing_name": "Van 19-1800 (Ambient)", "created_at": "2020-09-22T17:43:23Z", "updated_at": "2020-09-25T22:39:56Z", "status": 0}}, {"event_type": "uplink", "event_data": {"device_id": "851fd480-f70e-11ea-9f13-d1d0271289a6", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "payload": [{"name": "Humidity", "sensor_id": "85411820-f70e-11ea-8ae3-2ffdc909c57b", "type": "rel_hum", "unit": "p", "value": 49.52, "channel": 4, "timestamp": 1604558153188}, {"name": "Temperature", "sensor_id": "853f9180-f70e-11ea-9f13-d1d0271289a6", "type": "temp", "unit": "c", "value": 20.52, "channel": 3, "timestamp": 1604558153188}, {"name": "Battery", "sensor_id": "85429ec0-f70e-11ea-9621-a51b22d5dc1d", "type": "batt", "unit": "p", "value": 100, "channel": 5, "timestamp": 1604558153188}, {"name": "Local Backup", "sensor_id": "853f4360-f70e-11ea-9f13-d1d0271289a6", "type": "digital_sensor", "unit": "d", "value": 1, "channel": 400, "timestamp": 1604558153188}, {"name": "RSSI", "sensor_id": "8543b030-f70e-11ea-8ae3-2ffdc909c57b", "type": "rssi", "unit": "dbm", "value": -91, "channel": 100, "timestamp": 1604558153188}, {"name": "SNR", "sensor_id": "", "type": "snr", "unit": "db", "value": 8.5, "channel": 101, "timestamp": 1604558153188}], "client_id": "b8468c50-baf0-11ea-a5e9-89c3b09de43a", "hardware_id": "0025ca0a0000be5b", "timestamp": 1604558153188, "application_id": "shipcomwireless", "device_type_id": "70776630-e15e-11ea-a8c9-05cd631755a5"}, "company": {"id": 7696, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:44:50Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "Harris Health System - Production", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-15T03:34:58Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054"}, "location": {"id": 9080, "address": "9240 Kirby Dr", "city": "Houston", "country": "United States", "created_at": "2020-09-11T18:46:07Z", "industry": "[\"Health Care\"]", "latitude": 29.671324, "longitude": -95.415535, "name": "HHS Cooler Sensors", "state": "TX", "status": 0, "timezone": "America/Chicago", "updated_at": "2020-09-18T14:17:28Z", "user_id": "a5d78945-9f24-48a1-9107-5bee62bf007a", "zip": "77054", "company_id": 7696}, "device_type": {"id": "70776630-e15e-11ea-a8c9-05cd631755a5", "application_id": "", "category": "module", "codec": "lorawan.laird.rs1xx-backup", "data_type": "", "description": "Temp Sensor", "manufacturer": "Laird", "model": "RS1xx", "name": "Laird Temp & Humidity with Local Backup", "parent_constraint": "NOT_ALLOWED", "proxy_handler": "PrometheusClient", "subcategory": "lora", "transport_protocol": "lorawan", "version": "", "created_at": "2020-08-18T14:23:51Z", "updated_at": "2020-08-18T18:16:37Z"}, "device": {"id": 265040, "thing_name": "Cooler R-0306-PHAR", "created_at": "2020-09-15T04:47:12Z", "updated_at": "2020-09-25T22:39:54Z", "status": 0}}]
contents is a list of records but the writer.append expects a single record, so you iterate over your records and append them one by one.
You just need to change:
writer.append(contents)
to:
for record in contents:
writer.append(record)

Parsing JIRA webhook comments

I am trying to parse JIRA webhook comment section properly. Now, following code works well for the first comment:
data = request.json
jira_comment = data['issue']['fields']['comment']['comments'][0].get('body')
However, every time new webhook is fired, for example, new comment is added, I can only receive the first comment in the JSON-message. If I could somehow identify each comment from another or only parse always the last comment of the issue. What would be the best way to do it or is it even possible since JIRA webhook JSON looks broken (multiple 'body' keys).
See JSON data below, to make it cleaner, use this http://jsbeautifier.org/:
{
"timestamp": 1443024903340,
"webhookEvent": "jira:issue_updated",
"user": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"issue": {
"id": "10300",
"self": "http://192.168.10.58:8080/rest/api/2/issue/10300",
"key": "DEMO-6",
"fields": {
"issuetype": {
"self": "http://192.168.10.58:8080/rest/api/2/issuetype/3",
"id": "3",
"description": "A task that needs to be done.",
"iconUrl": "http://192.168.10.58:8080/images/icons/issuetypes/task.png",
"name": "Task",
"subtask": false
},
"components": [],
"timespent": null,
"timeoriginalestimate": null,
"description": "Hey",
"project": {
"self": "http://192.168.10.58:8080/rest/api/2/project/10000",
"id": "10000",
"key": "DEMO",
"name": "DEMO",
"avatarUrls": {
"48x48": "http://192.168.10.58:8080/secure/projectavatar?avatarId=10011",
"24x24": "http://192.168.10.58:8080/secure/projectavatar?size=small&avatarId=10011",
"16x16": "http://192.168.10.58:8080/secure/projectavatar?size=xsmall&avatarId=10011",
"32x32": "http://192.168.10.58:8080/secure/projectavatar?size=medium&avatarId=10011"
}
},
"fixVersions": [],
"aggregatetimespent": null,
"resolution": null,
"timetracking": {},
"customfield_10006": null,
"attachment": [],
"customfield_10009": "0|i0000f:",
"aggregatetimeestimate": null,
"resolutiondate": null,
"workratio": -1,
"summary": "Comment test",
"lastViewed": "2015-09-23T19:14:11.979+0300",
"watches": {
"self": "http://192.168.10.58:8080/rest/api/2/issue/DEMO-6/watchers",
"watchCount": 1,
"isWatching": true
},
"creator": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"subtasks": [],
"created": "2015-09-21T17:39:14.518+0300",
"reporter": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"customfield_10000": null,
"aggregateprogress": {
"progress": 0,
"total": 0
},
"priority": {
"self": "http://192.168.10.58:8080/rest/api/2/priority/2",
"iconUrl": "http://192.168.10.58:8080/images/icons/priorities/high.png",
"name": "High",
"id": "2"
},
"customfield_10001": null,
"customfield_10100": "1234",
"customfield_10200": null,
"labels": [],
"customfield_10004": null,
"environment": null,
"timeestimate": null,
"aggregatetimeoriginalestimate": null,
"versions": [],
"duedate": null,
"progress": {
"progress": 0,
"total": 0
},
"comment": {
"startAt": 0,
"maxResults": 3,
"total": 3,
"comments": [{
"self": "http://192.168.10.58:8080/rest/api/2/issue/10300/comment/10600",
"id": "10600",
"author": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"body": "Comment 1",
"updateAuthor": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"created": "2015-09-23T19:05:26.593+0300",
"updated": "2015-09-23T19:08:07.010+0300"
}, {
"self": "http://192.168.10.58:8080/rest/api/2/issue/10300/comment/10601",
"id": "10601",
"author": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"body": "Comment 2",
"updateAuthor": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"created": "2015-09-23T19:08:13.644+0300",
"updated": "2015-09-23T19:08:13.644+0300"
}, {
"self": "http://192.168.10.58:8080/rest/api/2/issue/10300/comment/10606",
"id": "10606",
"author": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"body": "Comment 3",
"updateAuthor": {
"self": "http://192.168.10.58:8080/rest/api/2/user?username=jhdoe",
"name": "jhdoe",
"key": "jhdoe",
"emailAddress": "john.doe#corp.com",
"avatarUrls": {
"48x48": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=48",
"24x24": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=24",
"16x16": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=16",
"32x32": "http://www.gravatar.com/avatar/f94b6b6c15d155f4d4b3b2c05509ab0a?d=mm&s=32"
},
"displayName": "John Doe",
"active": true,
"timeZone": "Europe/Berlin"
},
"created": "2015-09-23T19:13:24.836+0300",
"updated": "2015-09-23T19:14:04.464+0300"
}]
},
"issuelinks": [],
"votes": {
"self": "http://192.168.10.58:8080/rest/api/2/issue/DEMO-6/votes",
"votes": 0,
"hasVoted": false
},
"worklog": {
"startAt": 0,
"maxResults": 20,
"total": 0,
"worklogs": []
},
"assignee": null,
"updated": "2015-09-23T19:15:03.338+0300",
"status": {
"self": "http://192.168.10.58:8080/rest/api/2/status/10000",
"description": "",
"iconUrl": "http://192.168.10.58:8080/images/icons/subtask.gif",
"name": "To Do",
"id": "10000",
"statusCategory": {
"self": "http://192.168.10.58:8080/rest/api/2/statuscategory/2",
"id": 2,
"key": "new",
"colorName": "blue-gray",
"name": "To Do"
}
}
}
},
"changelog": {
"id": "10611",
"items": [{
"field": "Custom ID",
"fieldtype": "custom",
"from": null,
"fromString": null,
"to": null,
"toString": "1234"
}]
}
}
Well your comment section in the json is a list of dicts so you can always get the last in the list then call .get('body')
jira_comment = data['issue']['fields']['comment']['comments'][-1].get('body')
this works because in python negative numbers in an array index are treated as going backwards in the array. so -1 gives you the last item in the array.
Does your code work when there is no comments at all?
If it doesn't consider checking that the length of the the comments array is greater than 0 or just wrap it in a try except block.
When a comment is added, the request body should contain data['comment'], which will contain the comment that triggered the issue update. You can compare data['comment']['created'] and data['comment']['updated'] to figure if the comment is new or if the comment has been edited.
see https://developer.atlassian.com/jiradev/jira-apis/webhooks

Categories