Twint to Kafka -> Dont receive any events in my topic - python

i'm trying to load some tweets via TWINT into a kafka topic with the confluent-kafka[avro]
Producer. I don't get any errors but my topic wont receive any events from twint. I even get succes msg, when debugging(with try and except).
My Code:
import twint
import sys
import json
from time import sleep
from confluent_kafka import avro
from confluent_kafka.avro import AvroProducer
# Define Avro schema
value_schema_str = """
{
"namespace": "my.test",
"name": "value",
"type": "record",
"fields" : [
{ "name": "id", "type": "long" },
{ "name": "tweet", "type": "string" },
{ "name": "datetime", "type": "string" },
{ "name": "username", "type": "string" },
{ "name": "user_id", "type": "long" },
{ "name": "name", "type": "string" }
]
}
"""
key_schema_str = """
{
"namespace": "my.test",
"name": "key",
"type": "record",
"fields" : [
{
"name" : "name",
"type" : "string"
}
]
}
"""
kafka_broker = 'host.docker.internal:9092'
schema_registry = 'http://host.docker.internal:8081'
value_schema = avro.loads(value_schema_str)
key_schema = avro.loads(key_schema_str)
producer = AvroProducer({
'bootstrap.servers': kafka_broker,
'schema.registry.url': schema_registry
}, default_key_schema=key_schema, default_value_schema=value_schema)
module = sys.modules["twint.storage.write"]
def Json(obj, config):
tweet = obj.__dict__
tweet_new = {}
tweet_new['id'] = tweet['id']
tweet_new['tweet'] = tweet['tweet']
tweet_new['datetime'] = tweet['datetime']
tweet_new['username'] = tweet['username']
tweet_new['user_id'] = tweet['user_id']
tweet_new['name'] = tweet['name']
print(tweet_new)
try:
producer.produce(topic='tweets_test', key={"name": "Key"}, value=tweet_new)
except Exception as e:
print(f"Exception while producing record value - {tweet_new} to topic - tweets_test: {e}")
else:
print(f"Successfully producing record value - {tweet_new} to topic - tweets_test")
try:
producer.flush()
except Exception as e:
print(f"Exception while flush record value - {tweet_new} to topic - tweets_test: {e}")
else:
print(f"Successfully flushed record value - {tweet_new} to topic - tweets_test")
module.Json = Json
c2 = twint.Config()
c2.Search = "corona OR regen OR \"stark regen\" OR \"sturm\" OR überschwemmung OR landunter OR #hagel OR #regen OR #sturm OR flut"
c2.Store_json = True
c2.Custom["user"] = ["id", "tweet", "user_id", "username", "hashtags"]
c2.User_full = True
c2.Output = "tweets.json"
c2.Since = '2019-05-20'
c2.Hide_output = True
twint.run.Search(c2)
When i run it i get the following output:
{'id': 1513818741057937408, 'tweet': 'RKI: Bundesweite Sieben-Tage-Inzidenz steigt leicht auf 1087 | #corona #rki #test ', 'datetime': '2022-04-12 09:58:07 UTC', 'username': 'flashupde', 'user_id': 1179376986516606978, 'name': 'FLASH UP'}
Successfully producing record value - {'id': 1513818741057937408, 'tweet': 'RKI: Bundesweite Sieben-Tage-Inzidenz steigt leicht auf 1087 | #corona #rki #test ', 'datetime': '2022-04-12 09:58:07 UTC', 'username': 'flashupde', 'user_id': 1179376986516606978, 'name': 'FLASH UP'} to topic - tweets_test
Successfully flushed record value - {'id': 1513818741057937408, 'tweet': 'RKI: Bundesweite Sieben-Tage-Inzidenz steigt leicht auf 1087 | #corona #rki #test ', 'datetime': '2022-04-12 09:58:07 UTC', 'username': 'flashupde', 'user_id': 1179376986516606978, 'name': 'FLASH UP'} to topic - tweets_test
Any help how i can debug it better or any advice would be awesome.

Related

Unable to append data to Json array object with desired output

My code
import json
import re
from http.client import responses
import vt
import requests
with open('/home/asad/Downloads/ssh-log-parser/ok.txt', 'r') as file:
file = file.read()
pattern = re.compile(r'\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}')
ips = pattern.findall(file)
unique_ips = list(set(ips))
# print(unique_ips)
# print(len(unique_ips))
headers = {
"accept": "application/json",
"x-apikey": "9765ba5d9fd52f5747dde5240606019f83d32758cb664abc63a43488aa42812d"
}
i = 0
url = "https://www.virustotal.com/api/v3/ip_addresses/"
# messages = \[\]
f = open('formater.json')
# returns JSON object as
# a dictionary
data = json.load(f)
f.close()
no = 0
while i \< len(unique_ips):
furl = url + str(unique_ips[i])
# response = requests.get(furl, headers=headers)
# data_ = response.json()
# print(data_)
# messages = [data_['data']['attributes']['last_analysis_results']]
messages = [data['data']['attributes']['last_analysis_results']]
y = json.dumps(messages)
y1 = json.loads(y)
# print(y1)
a = []
r = []
v = []
cnt = 0
store = len(y1[0])
out_json_new = []
out_json1 ={}
for o in y1:
for k, vv in o.items():
a_ = vv['result']
a.append(a_)
r_ = vv['engine_name']
r.append(r_)
v_ = vv['category']
v.append(v_)
out_json = {
"indicators": [{
"value": str(unique_ips[i]),
"type": 'ip',
}]
}
out_json1 ={
"providers":[{
"provider": str(r),
"verdict": str(a),
"score": str(v)
}] }
out_json1['providers'].append(out_json1)
i += 1
print(out_json,out_json1)
\#for aaa in a:
\#print(a\[0\])\`
Outputs as
{'indicators': [{'value': '192.91.72.201', 'type': 'ip'}]} {'providers': [{'provider': "['Bkav', 'CMC Threat Intelligence', 'CMC sarah ']", 'verdict': "['clean', 'legs', 'hate']", 'score': "['harmless', 'harmless', 'sarah']"}, {...}]}
{'indicators': [{'value': '192.91.72.101', 'type': 'ip'}]} {'providers': [{'provider': "['Bkav', 'CMC Threat Intelligence', 'CMC sarah ']", 'verdict': "['clean', 'legs', 'hate']", 'score': "['harmless', 'harmless', 'sarah']"}, {...}]}
I want to change the output to this format.
{
"providers":\[
{
"provider":"['Bkav']",
"verdict":"['clean']",
"score":"['harmless']"
},
{
"provider":"['CMC Threat Intelligence']",
"verdict":"['clean']",
"score":"['harmless']"
},
{
"provider":"['CMC sarah']",
"verdict":"['hate']",
"score":"['harmless']"
}
]
}
My current code, groups under one key e.g provider instead it should be appended one after another like in output above. I tried to use append logic but its not working as i attended. It output as
`out_json1['providers'].append(out_json1)`
{'indicators': [{'value': '192.91.72.101', 'type': 'ip'}]} {'providers': [{'provider': "['Bkav', 'CMC Threat Intelligence', 'CMC sarah ']", 'verdict': "['clean', 'legs', 'hate']", 'score': "['harmless', 'harmless', 'sarah']"}, {...}]}
RELEVANT FILES
In order to run the code these files are required.
ok.txt
Aug 22 09:45:08 ip-170-32-23-64 sshd\[1546\]: Invalid user HPSupport from 192.91.72.201
Aug 22 09:45:08 ip-170-32-23-64 sshd\[1546\]: Invalid user HPSupport from 192.91.72.101
formater.json
{
"data": {
"attributes": {
"country": "US",
"last_analysis_stats": {
"harmless": 86,
"malicious": 0,
"suspicious": 0,
"undetected": 0,
"timeout": 0
},
"last_analysis_results": {
"Bkav": {
"category": "harmless",
"result": "clean",
"method": "blacklist",
"engine_name": "Bkav"
},
"CMC Threat Intelligence": {
"category": "harmless",
"result": "legs",
"method": "blacklist",
"engine_name": "CMC Threat Intelligence"
},
"CMC sarah ": {
"category": "sarah",
"result": "hate",
"method": "you",
"engine_name": "CMC sarah "
}
}
}
}
}

Retrieve a specific value from a JSON data and generate a CSV file

I am retrieving this data from api and it comes in JSON format. I only need the certain portion of the data and ignore all other data. Please check my Output csv how the final csv look like this. I need the result key, in that result value I need id and unid and userHierarchies field.
{
"apiVersion": "3.0",
"loggedInUser": {
"id": "api#api.com",
"unid": "192",
"userHierarchies": [
{
"hierarchyField": "Project",
"value": "Eli-f"
},
{
"hierarchyField": "Division",
"value": "DDD"
},
{
"hierarchyField": "Site",
"value": "RD02"
},
{
"hierarchyField": "Company",
"value": "Core"
},
{
"hierarchyField": "Department",
"value": "Operations"
}
]
},
"results":[
{
"id":"Random_Company_57",
"unid":"75",
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"gfds"
},
{
"hierarchyField":"Project",
"value":"JKL-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
},
{
"id":"xyz.abc#safe.net",
"unid":"98",
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"PUHJ"
},
{
"hierarchyField":"Project",
"value":"RPOJ-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
}
]
}
My Output CSV look like this:
id,unid,hierarchyField,value
Random_Company_57,75,Company,ABC Company
Random_Company_57,75,Department,gfds
Random_Company_57,75,Project,JKL-SDFGHJW
Random_Company_57,75,Division,Silver RC
Random_Company_57,75,Site,SQ06
xyz.abc#safe.net,98,Company,ABC Company
xyz.abc#safe.net,98,Department,PUHJ
xyz.abc#safe.net,98,Project,RPOJ-SDFGHJW
xyz.abc#safe.net,98,Division,Silver RC
My python Code look like this:
import requests
from pathlib import Path
from pprint import pprint
import pandas as pd
import time
import os
import argparse
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument("-path_save", help="define where to save the file")
parser.add_argument("--verbose", help="display processing information")
start = time.time()
def GetData(URL, endPoint, path_save, verbose):
response = requests.get(URL, auth=('api#api.net', 'uojk00'),
headers={
'Content-Type': 'application/json',
'x-api-key': 'ydVtsni1blwJHb65OJBrrtV',
})
print(endPoint,response)
df = pd.DataFrame(response.json()["results"])
print(df)
df.to_csv(os.path.join(path_save,f"{endPoint}.csv"), index=False)
if __name__ == '__main__':
start = time.time()
args = parser.parse_args()
path_save = Path(args.path_save)
verbose = args.verbose
endPoint=['users']
for endPt in endPoint:
URL = "https://api.com/v10/chor/" + endPt
GetData(URL, endPt, path_save, verbose)
print("Processed time:", time.time() - start) # Total Time
Any help how I generate that CSV???
If data is your data from api you have in your question, you can use next example how to save it to CSV in required format:
df = pd.DataFrame(data["results"]).explode("userHierarchies")
df = pd.concat([df, df.pop("userHierarchies").apply(pd.Series)], axis=1)
df = df[["id", "unid", "hierarchyField", "value"]]
df.to_csv("data.csv", index=False)
Saves data.csv:
id,unid,hierarchyField,value
Random_Company_57,75,Company,ABC Company
Random_Company_57,75,Department,gfds
Random_Company_57,75,Project,JKL-SDFGHJW
Random_Company_57,75,Division,Silver RC
Random_Company_57,75,Site,SQ06
xyz.abc#safe.net,98,Company,ABC Company
xyz.abc#safe.net,98,Department,PUHJ
xyz.abc#safe.net,98,Project,RPOJ-SDFGHJW
xyz.abc#safe.net,98,Division,Silver RC
xyz.abc#safe.net,98,Site,SQ06

Dictionary length is equal to 3 but when trying to access an index receiving KeyError

I am attempting to parse a json response that looks like this:
{
"links": {
"next": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-08&end_date=2015-09-09&detailed=false&api_key=xxx",
"prev": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-06&end_date=2015-09-07&detailed=false&api_key=xxx",
"self": "http://www.neowsapp.com/rest/v1/feed?start_date=2015-09-07&end_date=2015-09-08&detailed=false&api_key=xxx"
},
"element_count": 22,
"near_earth_objects": {
"2015-09-08": [
{
"links": {
"self": "http://www.neowsapp.com/rest/v1/neo/3726710?api_key=xxx"
},
"id": "3726710",
"neo_reference_id": "3726710",
"name": "(2015 RC)",
"nasa_jpl_url": "http://ssd.jpl.nasa.gov/sbdb.cgi?sstr=3726710",
"absolute_magnitude_h": 24.3,
"estimated_diameter": {
"kilometers": {
"estimated_diameter_min": 0.0366906138,
"estimated_diameter_max": 0.0820427065
},
"meters": {
"estimated_diameter_min": 36.6906137531,
"estimated_diameter_max": 82.0427064882
},
"miles": {
"estimated_diameter_min": 0.0227984834,
"estimated_diameter_max": 0.0509789586
},
"feet": {
"estimated_diameter_min": 120.3760332259,
"estimated_diameter_max": 269.1689931548
}
},
"is_potentially_hazardous_asteroid": false,
"close_approach_data": [
{
"close_approach_date": "2015-09-08",
"close_approach_date_full": "2015-Sep-08 09:45",
"epoch_date_close_approach": 1441705500000,
"relative_velocity": {
"kilometers_per_second": "19.4850295284",
"kilometers_per_hour": "70146.106302123",
"miles_per_hour": "43586.0625520053"
},
"miss_distance": {
"astronomical": "0.0269230459",
"lunar": "10.4730648551",
"kilometers": "4027630.320552233",
"miles": "2502653.4316094954"
},
"orbiting_body": "Earth"
}
],
"is_sentry_object": false
},
}
I am trying to figure out how to parse through to get "miss_distance" dictionary values ? I am unable to wrap my head around it.
Here is what I have been able to do so far:
After I get a Response object from request.get()
response = request.get(url
I convert the response object to json object
data = response.json() #this returns dictionary object
I try to parse the first level of the dictionary:
for i in data:
if i == "near_earth_objects":
dataset1 = data["near_earth_objects"]["2015-09-08"]
#this returns the next object which is of type list
Please someone can explain me :
1. How to decipher this response in the first place.
2. How can I move forward in parsing the response object and get to miss_distance dictionary ?
Please any pointers/help is appreciated.
Thank you
Your data will will have multiple dictionaries for the each date, near earth object, and close approach:
near_earth_objects = data['near_earth_objects']
for date in near_earth_objects:
objects = near_earth_objects[date]
for object in objects:
close_approach_data = object['close_approach_data']
for close_approach in close_approach_data:
print(close_approach['miss_distance'])
The code below gives you a table of date, miss_distances for every object for every date
import json
raw_json = '''
{
"near_earth_objects": {
"2015-09-08": [
{
"close_approach_data": [
{
"miss_distance": {
"astronomical": "0.0269230459",
"lunar": "10.4730648551",
"kilometers": "4027630.320552233",
"miles": "2502653.4316094954"
},
"orbiting_body": "Earth"
}
]
}
]
}
}
'''
if __name__ == "__main__":
parsed = json.loads(raw_json)
# assuming this json includes more than one near_earch_object spread across dates
near_objects = []
for date, near_objs in parsed['near_earth_objects'].items():
for obj in near_objs:
for appr in obj['close_approach_data']:
o = {
'date': date,
'miss_distances': appr['miss_distance']
}
near_objects.append(o)
print(near_objects)
output:
[
{'date': '2015-09-08',
'miss_distances': {
'astronomical': '0.0269230459',
'lunar': '10.4730648551',
'kilometers': '4027630.320552233',
'miles': '2502653.4316094954'
}
}
]

JSON move child keys up one level

I have a Python script that parses a JSON file like below:
[
{
"_index": "bulletins",
"_type": "bulletin",
"_id": "OPENWRT-SA-000001",
"_score": null,
"_source": {
"lastseen": "2016-09-26T15:45:23",
"references": [
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3193",
],
"affectedPackage": [
{
"OS": "OpenWrt",
"OSVersion": "15.05",
"packageVersion": "9.9.8-P3-1",
"packageFilename": "UNKNOWN",
"arch": "all",
"packageName": "bind",
"operator": "lt"
}
],
"edition": 1,
"description": "value in here,
"reporter": "OpenWrt Project",
"published": "2016-01-24T13:33:41",
"title": "bind: Security update (4 CVEs)",
"type": "openwrt",
"bulletinFamily": "unix",
"cvelist": [
"CVE-2015-8704",
],
"modified": "2016-01-24T13:33:41",
"id": "OPENWRT-SA-000001",
"href": "https://lists.openwrt.org/pipermail/openwrt-security-announce/2016-January/000001.html",
"cvss": {
"score": 7.1,
"vector": "AV:NETWORK/AC:MEDIUM/Au:NONE/C:NONE/I:NONE/A:COMPLETE/"
}
},
"sort": [
34872
]
},
I have removed some of the values to keep the post shorter but leaving some in to try to keep the structure.
I want to take all sub keys from the _source key and move them up to the same level as _source and then delete the _source key.
My code to parse the JSON is:
import json
import logging
import logging.handlers
import os
import pymongo
from pymongo import MongoClient
def import_json(mongo_server,mongo_port, vuln_folder):
try:
logging.info('Connecting to MongoDB')
client = MongoClient(mongo_server, mongo_port)
db = client['vuln_sets']
coll = db['vulnerabilities']
logging.info('Connected to MongoDB')
basepath = os.path.dirname(__file__)
filepath = os.path.abspath(os.path.join(basepath, ".."))
archive_filepath = filepath + vuln_folder
filedir = os.chdir(archive_filepath)
file_count = 0
for item in os.listdir(filedir):
if item.endswith('.json'):
file_name = os.path.abspath(item)
with open(item, 'r') as currentfile:
vuln_counter = 0
duplicate_count = 0
logging.info('Currently processing ' + item)
file_count +=1
json_data = currentfile.read()
vuln_content = json.loads(json_data)
for vuln in vuln_content:
try:
del vuln['_type']
coll.insert(vuln, continue_on_error=True)
vuln_counter +=1
except pymongo.errors.DuplicateKeyError:
duplicate_count +=1
logging.info('Added ' + str(vuln_counter) + ' vulnerabilities for ' + item)
logging.info('Found ' + str(duplicate_count) + ' duplicate records!')
os.remove(file_name)
logging.info('Processed ' + str(file_count) + ' files')
except Exception as e:
logging.exception(e)
Which you can see already deletes one key that is not needed but that key has no needed data where as I need the sub keys from _source. I am not sure on the best way to achieve this, whether it would be programmatically correct to just re-create the JSON file with the new info but I need to keep the order of the keys and structure apart from moving the sub keys up one level.
You can use the dictionary update() function to achieve what you're trying to do, but it's important to note that dictionaries don't have an "order of the keys" - see: Key Order in Python Dictionaries.
Here's an example of one way to do this, starting with a dictionary definition.
d = {
"_index": "bulletins",
"_type": "bulletin",
"_id": "OPENWRT-SA-000001",
"_score": None,
"_source": {
"lastseen": "2016-09-26T15:45:23",
"references": [
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-3193",
],
"affectedPackage": [
{
"OS": "OpenWrt",
"OSVersion": "15.05",
"packageVersion": "9.9.8-P3-1",
"packageFilename": "UNKNOWN",
"arch": "all",
"packageName": "bind",
"operator": "lt"
}
],
"edition": 1,
"description": "value in here",
"reporter": "OpenWrt Project",
"published": "2016-01-24T13:33:41",
"title": "bind: Security update (4 CVEs)",
"type": "openwrt",
"bulletinFamily": "unix",
"cvelist": [
"CVE-2015-8704",
],
"modified": "2016-01-24T13:33:41",
"id": "OPENWRT-SA-000001",
"href": "https://lists.openwrt.org/pipermail/openwrt-security-announce/2016-January/000001.html",
"cvss": {
"score": 7.1,
"vector": "AV:NETWORK/AC:MEDIUM/Au:NONE/C:NONE/I:NONE/A:COMPLETE/"
}
}
}
# create a new dictionary with everything except the key "_source"
new_d = {key: d[key] for key in d if key != '_source'}
# add the keys/values from "_source" to new dictionary
new_d.update(d['_source']) # This will overwriting any existing keys
The output of new_d:
{'_id': 'OPENWRT-SA-000001',
'_index': 'bulletins',
'_score': None,
'_type': 'bulletin',
'affectedPackage': [{'OS': 'OpenWrt',
'OSVersion': '15.05',
'arch': 'all',
'operator': 'lt',
'packageFilename': 'UNKNOWN',
'packageName': 'bind',
'packageVersion': '9.9.8-P3-1'}],
'bulletinFamily': 'unix',
'cvelist': ['CVE-2015-8704'],
'cvss': {
'score': 7.1,
'vector': 'AV:NETWORK/AC:MEDIUM/Au:NONE/C:NONE/I:NONE/A:COMPLETE/'},
'description': 'value in here',
'edition': 1,
'href': 'https://lists.openwrt.org/pipermail/openwrt-security-announce/2016-January/000001.html',
'id': 'OPENWRT-SA-000001',
'lastseen': '2016-09-26T15:45:23',
'modified': '2016-01-24T13:33:41',
'published': '2016-01-24T13:33:41',
'references': ['http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2015-
3193'],
'reporter': 'OpenWrt Project',
'title': 'bind: Security update (4 CVEs)',
'type': 'openwrt'}
I managed to get it working by using the following code:
for vuln in vuln_content:
try:
del vuln['_type']
new_vuln = {key: vuln[key] for key in vuln if key != '_source'}
new_vuln.update(vuln['_source'])
coll.insert(new_vuln, continue_on_error=True)
vuln_counter +=1
except pymongo.errors.DuplicateKeyError:
duplicate_count +=1

Python/JSON - does order matter and why is this JSON post to a REST API failing?

I'm posting to the office 365 rest API and am creating the dump as per below:
def CreateEvent(auth, cal_id, subject, start_time, end_time, attendees, content):
create_url = 'https://outlook.office365.com/api/v1.0/me/calendars/{0}/events'.format(cal_id)
headers = {"Content-type": "application/json", "Accept": "application/json"}
data = {"Subject":"","Attendees": [],"End": {},"Start": {},"Body": {}}
data["Subject"] = subject
data["StartTimeZone"] = "GMT Standard Time"
data["Start"] = start_time
data["EndTimeZone"] = "GMT Standard Time"
data["End"] = end_time
data["Attendees"] = attendees
data["Body"]["ContentType"] = "Text"
data["Body"]["Content"] = content
content_data = json.dumps(data)
#return data
response = requests.post(create_url,data,headers=headers,auth=auth)
return response
This produces an unordered dump, which i believe shouldn't cause any issues?
however, when i post manually using Y i get a 201 and the event is created, when i post using the function which produces the below dump, i get a 400
y="""
{
"Subject": "TESTTTT",
"Body": {
"ContentType": "HTML",
"Content": "I think it will meet our requirements!"
},
"Start": "2016-12-02T11:30:00Z",
"StartTimeZone": "GMT Standard Time",
"End": "2016-12-02T11:45:00Z",
"EndTimeZone": "GMT Standard Time",
"Attendees": [
{
"EmailAddress": {
"Name": "Alex ",
"Address": "alex#test.com"
},
"Type": "Required"
}
]
}
"""
what my function returns and give a 400
{
'Body': {
'Content': 'test data',
'ContentType': 'Text'
},
'End': '2016-12-02T06:00:00Z',
'StartTimeZone': 'GMT Standard Time',
'EndTimeZone': 'GMT Standard Time',
'Start': '2016-12-02T02:00:00Z',
'Attendees': [{
'EmailAddress': {
'Name': 'Alex ',
'Address': 'alex#test.com'
},
'Type': 'Required'
}],
'Subject': 'Maintenance: test'
}
At a glance, I believe you just need to change
response = requests.post(create_url,data,headers=headers,auth=auth)
to
response = requests.post(create_url,content_data,headers=headers,auth=auth)
You were correct in calling the json.dumps() method to serialize the dictionary. Just pass that string to the server instead.

Categories