Query mysql from json came from angularjs - Python - python

Hi Guys I already get the json data and this is my json data from angularjs, can someone help me on this?. I'm stuck only on it. Thank you.
{u'isChecked': {u'49871': False, u'49870': True, u'113634': False}}
then in my python I want to update mysql when id is found in the json data
right now here is my code for updating and i want to connect it to my json data
updatetable = """UPDATE table_x
SET value = '1'
"""
db.session.execute(updatetable)
db.session.commit()

Here is a solution
#!/usr/bin/env python
import platform
import sys
import urllib2
import simplejson as json
def update_table(id):
sqlUpdateStr = "UPDATE table_x SET value = '1' where id="+id
print "Executing update: " + sqlUpdateStr
def test_parse_json():
print "Loading json ..."
req = urllib2.Request("http://localhost/example.json")
opener = urllib2.build_opener()
f = opener.open(req)
# json.load() will deserialize your JSON document and return a Python object.
data = json.load(f)
print data['isChecked']
print ""
for id in data['isChecked']:
id_val = str2bool(data['isChecked'][id])
if id_val == True:
print "Found id for update: " + id
update_table(id)
else:
print "Ignoring record with id=" + id
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def main():
test_parse_json()
return
if __name__ == '__main__':
main()
and the content of example.json is:
{
"isChecked":{
"49870":"true",
"49871":"false",
"113634":"false"
}
}

Related

Search haveibeenpwned for all emails on a domain

I am able to use haveibeenpwned to search for 1 account compromise. However, I could not find an option to use the API key to search for compromise of all the email accounts on a domain. (For example. if the domain is xyz.com, I want to search for the compromise of abc#xyz.com, peter.charlie#xyz.com and so on). I am aware of the notification email that I can sign up for. But, that is a lengthy process and I prefer using the API.
So, I wrote a script to search against haveibeenpwned for all the email address of my domain, but it takes very long. I searched through a couple of Github projects, but I did not find any such implementation. Has anyone tried this before?
I have added the code below. I am using Multi threading approach, but still it takes very long, is there any other Optimization strategy I can use? Please help. Thank you.
import requests, json
import threading
from time import sleep
import datetime
import splunklib.client as client
import splunklib.results as results
date = datetime.datetime.now()
from itertools import islice
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
class myThread (threading.Thread):
def __init__(self, threadID, name, list_emails):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.list_emails = list_emails
def run(self):
i=0
print "Starting " + self.name
for email in self.list_emails:
print i
i=i+1
result = check_pasteaccount(email)
print email
print result
print result
print "Exiting " + self.name
def check_pasteaccount(account):
account = str(account)
result = ""
URL = "https://haveibeenpwned.com/api/v3/pasteaccount/%s?truncateResponse=false" % (account)
# print(URL)
headers= {'hibp-api-key':api_key}
result = ""
try:
r = requests.get(url=URL,headers=headers)
# sleep(2)
status_code = r.status_code
if status_code == 200:
data = r.text
result = []
for entry in json.loads(data.decode('utf8')):
if int((date - datetime.datetime.strptime(entry['Date'], '%Y-%m-%dT%H:%M:%SZ')).days) > 120:
pass
else:
result.append(['Title: {0}'.format(entry['Title']), \
'Source: {0}'.format(['Source']), \
'Paste ID: {0}'.format(entry['Id'])])
if len(result) == 0:
result = "No paste reported for given account and time frame."
else:
paste_result = ""
for entry in result:
for item in entry:
paste_result += str(item) + "\r\n"
paste_result += "\r\n"
result = paste_result
elif status_code == 404:
result = "No paste for the account"
else:
if status_code == 429:
sleep(5)
# print "Limit exceeded, sleeping"
result = check_pasteaccount(account)
else:
result = "Exception"
print status_code
except Exception as e:
result = "Exception"
PrintException()
pass
return result
def split_every(n, iterable):
iterable = iter(iterable)
for chunk in iter(lambda: list(islice(iterable, n)), []):
yield chunk
def main():
print datetime.datetime.now()
# Fetching the list of email addresses from Splunk
list_emails = connect_splunk()
print datetime.datetime.now()
i=0
list_split = split_every(1000,list_emails)
threads=[]
for list in list_split:
i=i+1
thread_name = "Thread" + str(i)
thread = myThread(1, thread_name, list)
thread.start()
threads.append(thread)
# Wait for all the threads to complete
for t in threads:
t.join()
print "Completed Search"
Here's a shorter and maybe more efficient version of your script using the standard multiprocessing library instead of a hand-rolled thread system.
You'll need Python 3.6+ since we're using f-strings.
You'll need to install the tqdm module for fancy progress bars.
You can adjust the number of concurrent requests with the pool size parameter.
Output is written in machine-readable JSON Lines format into a timestamped file.
A single requests session is shared (per-worker), which means less time spent connecting to HIBP.
import datetime
import json
import multiprocessing
import random
import time
import requests
import tqdm
HIBP_PARAMS = {
"truncateResponse": "false",
}
HIBP_HEADERS = {
"hibp-api-key": "xxx",
}
sess = requests.Session()
def check_pasteaccount(account):
while True:
resp = sess.get(
url=f"https://haveibeenpwned.com/api/v3/pasteaccount/{account}",
params=HIBP_PARAMS,
headers=HIBP_HEADERS,
)
if resp.status_code == 429:
print("Quota exceeded, waiting for a while")
time.sleep(random.uniform(3, 7))
continue
if resp.status_code >= 400:
return {
"account": account,
"status": resp.status_code,
"result": resp.text,
}
return {
"account": account,
"status": resp.status_code,
"result": resp.json(),
}
def connect_splunk():
# TODO: return emails
return []
def main():
list_emails = [str(account) for account in connect_splunk()]
datestamp = datetime.datetime.now().isoformat().replace(":", "-")
output_filename = f"accounts-log-{datestamp}.jsonl"
print(f"Accounts to look up: {len(list_emails)}")
print(f"Output filename: {output_filename}")
with multiprocessing.Pool(processes=16) as p:
with open(output_filename, "a") as f:
results_iterable = p.imap_unordered(
check_pasteaccount, list_emails, chunksize=20
)
for result in tqdm.tqdm(
results_iterable,
total=len(list_emails),
unit="acc",
unit_scale=True,
):
print(json.dumps(result, sort_keys=True), file=f)
if __name__ == "__main__":
main()

Search Splunk API using python

What I am trying to do is perform a search on Splunk's API using python, I am able to get a session key but thats it. I'm new to both python and splunk so im a bit out-of-depth and any help would be really appreciated.
The error:
Traceback (most recent call last):
File "splunkAPI.py", line 31, in <module>
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
IndexError: list index out of range
python:
import time # need for sleep
from xml.dom import minidom
import json, pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
base_url = 'https://___________:8089'
username = '______'
password = '______'
search_query = "____________"
#-------------------------get session token------------------------
r = requests.get(base_url+"/servicesNS/admin/search/auth/login",
data={'username':username,'password':password}, verify=False)
session_key = minidom.parseString(r.text).getElementsByTagName('sessionKey')[0].firstChild.nodeValue
print ("Session Key:", session_key)
#-------------------- perform search -------------------------
r = requests.post(base_url + '/services/search/jobs/', data=search_query,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
done = False
while not done:
r = requests.get(base_url + '/services/search/jobs/' + sid,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
response = minidom.parseString(r.text)
for node in response.getElementsByTagName("s:key"):
if node.hasAttribute("name") and node.getAttribute("name") == "dispatchState":
dispatchState = node.firstChild.nodeValue
print ("Search Status: ", dispatchState)
if dispatchState == "DONE":
done = True
else:
time.sleep(1)
r = requests.get(base_url + '/services/search/jobs/' + sid + '/results/',
headers = { 'Authorization': ('Splunk %s' %session_key)},
data={'output_mode': 'json'},
verify = False)
pprint.pprint(json.loads(r.text))
Hmm... that code looks awfully familiar :P Unfortunately, error checking wasn't that important when I wrote it.
The issue you see occurs if the search_query is not defined properly. It must start with search=. Also note that you need to include an initial search command if doing a standard Splunk search,
For example, search=search index=* will work, search=index=* will not work.
If you need to include quotes in your search string, I suggest you use something like the following format.
search_query = """search=search index=* "a search expression" | stats count"""
Tried this but did not give needed result not sure what is missing
import urllib
import httplib2 #import library
import json
import pprint
import time
import re
from xml.dom import minidom
searchquery = 'search index="movable_in" sourcetype="movable:in:assets" | stats avg(exposure_score)'
myhttp = httplib2.Http()
baseurl = 'https://xxxx.splunkxxx.com:8089'
usernamesp = 'xxxx'
passwordsp = 'xxxx'
def get_splunk_result(searchquery):
# Step 1: Get a session key
servercontent = myhttp.request(f'{baseurl}/services/auth/login', 'POST', headers={},
body=urllib.parse.urlencode({'username': usernamesp, 'password': passwordsp}))[1]
sessionkey = minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
# print ("====>sessionkey: %s <====" % sessionkey)
sid = ''
# ------------------
if not searchquery.startswith('search'):
searchquery = f'search {searchquery}'
# Step 2: Get a sid with the search query
i = 0
while True:
time.sleep(1)
try:
searchjob = myhttp.request(f'{baseurl}/services/search/jobs', 'POST',
headers={F'Authorization': F'Splunk %s' % sessionkey},
body=urllib.parse.urlencode({'search': searchquery}))[1]
sid = minidom.parseString(searchjob).getElementsByTagName('sid')[0].childNodes[0].nodeValue
break
except:
i = i + 1
# print(i)
if (i > 30): break
# print("====>SID: %s <====" % sid)
# Step 3: Get search status
myhttp.add_credentials(usernamesp, passwordsp)
servicessearchstatusstr = '/services/search/jobs/%s/' % sid
isnotdone = True
while isnotdone:
searchstatus = myhttp.request(f'{baseurl}{servicessearchstatusstr}', 'GET')[1]
isdonestatus = re.compile('isDone">(0|1)')
strstatus = str(searchstatus)
isdonestatus = isdonestatus.search(strstatus).groups()[0]
if (isdonestatus == '1'):
isnotdone = False
# Step 4: Get the search result
services_search_results_str = '/services/search/jobs/%s/results?output_mode=json_rows&count=0' % sid
searchresults = myhttp.request(f'{baseurl}{services_search_results_str}', 'GET')[1]
searchresults = json.loads(searchresults)
# searchresults = splunk_result(searchresults)
return searchresults
output = get_splunk_result(searchquery)
print(output)

returning values inside a function

I have python code looping thru json post and connecting to network device. All that works fine but i can not return back to the json client postman. Python 3. 4 Flask. I have tried many different solutions. All i'm trying to do is return results from my netmiko send commands
from flask import Flask, jsonify, request
import netmiko
from netmiko.ssh_autodetect import SSHDetect
from netmiko.ssh_exception import NetMikoTimeoutException
import time
import gevent
app = Flask(__name__)
#app.route('/myuri', methods=['GET','POST', 'DELETE'])
def post():
# Authentication
headers = request.headers
auth = headers.get("header key")
if auth == 'my key':
def firewall(command):
src_a = command[0]
src_p = command[1]
dst_a = command[2]
dst_p = command[3]
p_col = command[4]
p_show = command[5]
p_push = command[6]
ip = "1.1.1.1"
username = "bla"
password = "bla"
device = {"device_type": "autodetect", "host": ip,
"username": username, "password": password}
while True:
try:
guesser = SSHDetect(**device)
best_match = guesser.autodetect()
print(best_match)
if "None" in str(best_match):
continue
if "true" in str(p_show) and "juniper_junos" in
str(best_match):
device["device_type"] = best_match
connection = netmiko.ConnectHandler(**device)
time.sleep(1)
connection.enable()
resp = connection.send_command('show
configuration | display json | match ' + str(src_a))
resp1 = connection.send_command('show
configuration | display json | match ' + str(src_p))
resp2 = connection.send_command('show
configuration | display json | match ' + str(dst_a))
resp3 = connection.send_command('show
configuration | display json | match ' + str(dst_p))
connection.disconnect()
time.sleep(1)
returns = resp, resp1, resp2, resp3
print(returns) # this prints fine !!!!!
return return # Can't return back !!!!!!
except NetMikoTimeoutException:
return "Timeout Error" ### Note can't return this!
commands = []
data = request.get_json(force=True)
for x in data["firewall"]:
if 'SourceAddress' in x:
commands.append((x['SourceAddress'], x['SourcePort'],
x['DestinationAddress'], x['DestinationPort'],
x['Protocol'], x['show'], x['push']))
threads = [gevent.spawn(firewall, command) for command in
commands]
gevent.joinall(threads)
return "done" ###### how do i return the returns in function
Firewall
else:
return jsonify({"message": "ERROR: Unauthorized"}), 401
the python works finds device auto detect and logs in gets info i can print all of it just can't get those returns to return backenter code here
The return is a keyword, the variable with data in your code is returns
return returns # Will work !!!!!!
You got at typo with your return statement
return return # Can't return back !!!!!!

My python webhook isn't giving me results

I've been working on trying to edit a webhook that was originally meant to be used for a weather API to get to be used with a postcode/zipcode API. The original file is here: https://github.com/dialogflow/fulfillment-webhook-weather-python/blob/master/app.py
I can't understand where mine is different, I thought I had solved it when I replaced urlencode with quote but alas, it wasn't enough.
The problem is very unlikely to do with the source json request that collects the postcode in postcodeValue(). The api url comes out correct when you enter it into a browser and is presented quite simply.
https://api.getaddress.io/find/SW11%201th?api-key=I98umgPiu02GEMmHdmfg3w12959
Is it in the correct format? Maybe I need to convert it to become even more JSON then it already is. This question is essentially an end of day brain dump that I I'm hoping that someone can save me with.
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode, quote
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
#this line is just naming conventions I reckon with a reference to expect to receive data as POST
#app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
#who knows where this is getting printed
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://api.getaddress.io/find/"
apikey = "?api-key=I98umgPiu02GEMmHdmfg3w12959"
yql_query = postcodeValue(req)
if yql_query is None:
return {}
#this line is the actual api request
yql_url = baseurl + quote(yql_query) + apikey
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
#this function extracts an individual parameter and turns it into a string
def postcodeValue(req):
result = req.get("result")
parameters = result.get("parameters")
postcode = parameters.get("postcode")
if postcode is None:
return None
return postcode
#def housenoValue(req):
# result = req.get("result")
#parameters = result.get("parameters")
#houseno = parameters.get("houseno")
#if houseno is None:
# return None
#return houseno
def makeWebhookResult(data):
longitude = data.get("longitude")
if longitude is None:
return {}
#def makeWebhookResult(data):
# query = data.get('query')
# if query is None:
# return {}
# result = query.get('results')
# if result is None:
# return {}
# channel = result.get('channel')
# if channel is None:
# return {}
# item = channel.get('item')
# location = channel.get('location')
# units = channel.get('units')
# if (location is None) or (item is None) or (units is None):
# return {}
# condition = item.get('condition')
# if condition is None:
# return {}
# print(json.dumps(item, indent=4))
speech = "Sausage face " + longitude
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
#More flask specific stuff
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
Here is a bit cleaner version of your code:
from urllib.request import urlopen
import os
from flask import Flask
app = Flask(__name__)
#app.route('/webhook', methods=['GET'])
def webhook():
res = processRequest()
return res
def processRequest():
try:
result = urlopen("https://api.getaddress.io/find/SW11%201th?api-key=I98umgPiu02GEMmHdmfg3w12959").read()
return result
except:
return "Error fetching data"
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
Open your browser and go to http://localhost:5000/webhook and you should see a response.

How to write JSON from response to file? (PYTHON)

so im trying to write this JSON from the Kik smiley site, and im trying to do this so I wont have to write it manually, anyways I need to parse the JSON so only some of the existing JSON shows up in the file (basically cleaning it) what I need from the site is... (name, id, type) how would I do this?
I have written this in python but it seems to fail, and im not 100% sure as to why. I am new to Python, so sorry if this is an obvious question! I did find something earlier but it just confused me even more :) Thank you!
import requests, json, sys
from colorama import init
from termcolor import colored
#colorama
init()
class SmileyGrabber():
def __init__(self):
# requests vars
self.smileysFound = 0
self.smileysLost = 0
self.url="https://sticker-service.appspot.com/v2/collection/smiley"
self.session = requests.Session()
self.grabSmiley()
def grabSmiley(self):
while True:
try:
r = self.session.get(self.url)
j = r.json()
try:
if j["IsSuccess"] == True:
meta = j["smileys"]
sID = meta["id"]
sType = meta["type"]
sName = meta["name"]
FormatSmileyData(sID, sType, sName)
print "Smiley Found:", colored("({0})".format(sName), "cyan")
self.smileysFound += 1
else:
print(colored("Could not grab smiley"), "red")
self.smileysLost += 1
except:
sys.exit()
except KeyboardInterrupt:
sys.exit()
except:
print r.text
sys.exit()
class FormatSmileyData(object):
def __init__(self, sID, sType, sName):
smileyData = {}
data = []
data.append({"SMILEY_ID":sID, "SMILEY_TYPE":sType, "SMILEY_NAME":sName})
dataFile = open("smileys.json", "a+")
dataFile.write(json.dumps(smileyData)+"\n")
dataFile.close()
if __name__ == "__main__":
SmileyGrabber()
There are a number of problems with your code.
It will be more efficient to read from the network all at once
rather than making a call to session.get for each smiley.
j does not have an "IsSuccess" element, so that will never be true
j["smileys"] is a list, so to get the dictionaries (which represent each smiley) you will need to iterate through that list.
You are appending data into data but you are writing from
smileyData, which never has any data entered into it.
Each time you call the FormatSmileyData constructor, you are
resetting the data.
Take a look at a tool like Postman to prettify the JSON so you can see the structure. This can help figure out how to parse it.
Here's an updated version of your script that appears to work:
I removed the colorization and made it work with Python 3.
import requests, json, sys
class SmileyGrabber():
def __init__(self):
# requests vars
self.smileysFound = 0
self.smileysLost = 0
self.url="https://sticker-service.appspot.com/v2/collection/smiley"
self.session = requests.Session()
self.data = []
self.grabSmiley()
self.writeSmileyData()
def grabSmiley(self):
r = self.session.get(self.url)
j = r.json()
print ("got json")
print (str(len(j)))
for element in j:
for meta in element["smileys"]:
print ("---------------")
print (str(meta))
sID = meta["id"]
sType = meta["type"]
sName = meta["name"]
self.addSmileyData(sID, sType, sName)
print ("Smiley Found:" + "({0})".format(sName))
self.smileysFound += 1
print ("found " + str(self.smileysFound))
else:
print("Could not grab smiley")
self.smileysLost += 1
def addSmileyData(self, sID, sType, sName):
self.data.append({"SMILEY_ID":sID, "SMILEY_TYPE":sType, "SMILEY_NAME":sName})
def writeSmileyData(self):
dataFile = open("smileys.json", "a+")
dataFile.write(json.dumps(self.data)+"\n")
dataFile.close()
if __name__ == "__main__":
SmileyGrabber()

Categories