I am trying to connect to Splunk via API using python. I can connect, and get a 200 status code but when I read the content, it doesn't read the content of the page. View below:
Here is my code:
import json
import requests
import re
baseurl = 'https://my_splunk_url:8888'
username = 'my_username'
password = 'my_password'
headers={"Content-Type": "application/json"}
s = requests.Session()
s.proxies = {"http": "my_proxy"}
r = s.get(baseurl, auth=(username, password), verify=False, headers=None, data=None)
print(r.status_code)
print(r.text)
I am new to Splunk and python so any ideas or suggestions as to why this is happening would help.
You need to authenticate first to get a token, then you'll be able to hit the rest of REST endpoints. The auth endpoint it at /servicesNS/admin/search/auth/login, which will give you the session_key, which you then provide to subsequent requests.
Here is some code that uses requests to authenticate to a Splunk instance, then start a search. It then checks to see if the search is complete, if not, wait a second and then check again. Keep checking and sleeping until the search is done, then print out the results.
import time # need for sleep
from xml.dom import minidom
import json, pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
base_url = 'https://localhost:8089'
username = 'admin'
password = 'changeme'
search_query = "search=search index=*"
r = requests.get(base_url+"/servicesNS/admin/search/auth/login",
data={'username':username,'password':password}, verify=False)
session_key = minidom.parseString(r.text).getElementsByTagName('sessionKey')[0].firstChild.nodeValue
print ("Session Key:", session_key)
r = requests.post(base_url + '/services/search/jobs/', data=search_query,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
print ("Search ID", sid)
done = False
while not done:
r = requests.get(base_url + '/services/search/jobs/' + sid,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
response = minidom.parseString(r.text)
for node in response.getElementsByTagName("s:key"):
if node.hasAttribute("name") and node.getAttribute("name") == "dispatchState":
dispatchState = node.firstChild.nodeValue
print ("Search Status: ", dispatchState)
if dispatchState == "DONE":
done = True
else:
time.sleep(1)
r = requests.get(base_url + '/services/search/jobs/' + sid + '/results/',
headers = { 'Authorization': ('Splunk %s' %session_key)},
data={'output_mode': 'json'},
verify = False)
pprint.pprint(json.loads(r.text))
Many of the request calls thare used include the flag, verify = False to avoid issues with the default self-signed SSL certs, but you can drop that if you have legit certificates.
Published a while ago at https://gist.github.com/sduff/aca550a8df636fdc07326225de380a91
Nice piece of coding. One of the wonderful aspects of Python is the ability to use other people's well written packages. In this case, why not use Splunk's Python packages to do all of that work, with a lot less coding around it.
pip install splunklib.
Then add the following to your import block
import splunklib.client as client
import splunklib.results as results
pypi.org has documentation on some of the usage, Splunk has an excellent set of how-to documents. Remember, be lazy, use someone else's work to make your work look better.
Related
I am trying to fill html form and get the intended result as i get when i fill manually. But I fail.
I am trying to fill the site https://www.desco.org.bd/ebill/login.php with value 32000001. So far my try is as below-
import requests
#LOGIN_URL = 'https://www.desco.org.bd/ebill/login.php'
#LOGIN_URL = 'https://www.desco.org.bd/ebill/authentication.php'
LOGIN_URL = 'https://www.desco.org.bd/ebill/billinformation.php'
payload = {
'username': '32000001',
'login':'Login',
'login':'true'
}
with requests.Session() as s:
p = s.post(LOGIN_URL, data=payload)#, verify=False)
# print the html returned or something more intelligent to see if it's a successful login page.
print (p.text)
I have found that login.php redirects to authentication.php and it further redirects to billinformation.php which delivers the true data i needed.
Thanks in advance.
N.B. I am not planning to use selenium since it is too slow for my case i.e. collect huge data from this site.
i am working for similar case, may be you would try using websockets:
import websockets
def process_function(message):
# process the message
def server(ws:str, path:int):
while True:
message_received = await ws.recv() # receive from ui
print(f'Msg [{message_received}]')
message_to_send = process_function(message)
await ws.send(message_to_send) # send feedback to ui
server = websockets.serve(server, '127.0.0.1', 5678) # set the html to run in the same ip:port
another try:
import json, requests
def do_auth(url):
headers = {"Content-Type": "application/json", "Accept":'*/*'}
body = json.dumps({'username': 'user', 'password': 'pass'})
r = requests.post(url=url, headers=headers, data=body, verify=False)
print(r.status_code);
d = json.loads(r.text);
print(d['access_token']);
print(d['refresh_token'])
return d['access_token'], d['refresh_token']
do_auth(url_auth) # authorization
requests.get(url_content, headers=headers, verify=False) # get data
How do you use Bitbucket's 2.0 API to decline a pull request via Python?
According to their documentaion, it should be something like:
import requests
kwargs = {
'username': MY_BITBUCKET_ACCOUNT,
'repo_slug': MY_BITBUCKET_REPO,
'pull_request_id': pull_request_id
}
url = 'https://api.bitbucket.org/2.0/repositories/{username}/{repo_slug}/pullrequests/{pull_request_id}/decline'.format(**kwargs)
headers = {'Content-Type': 'application/json'}
response = requests.post(url, auth=(USERNAME, PASSWORD), headers=headers)
However, this fails with response.text simply saying "Bad Request".
This similar code works for me with their other API endpoints, so I'm not sure why the decline method is failing.
What am I doing wrong?
You have to authenticate with Oath. I wrote a wrapper for making these requests. Here is a simple example that works. The only thing I couldn't figure out was how to add a reason it was declined. I ended up making a request before I declined the PR that added a comment on why it was declined.
import os
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
class Bitbucket(object):
def __init__(self, client_id, client_secret, workplace, repo_slug):
self.workplace = workplace # username or company username
self.repo_slug = repo_slug
self.token_url = 'https://bitbucket.org/site/oauth2/access_token'
self.api_url = 'https://api.bitbucket.org/2.0/'
self.max_pages = 10
self.client = BackendApplicationClient(client_id=client_id)
self.oauth = OAuth2Session(client=self.client)
self.oauth.fetch_token(
token_url=self.token_url,
client_id=client_id,
client_secret=client_secret
)
def get_api_url(self, endpoint):
return f'{self.api_url}repositories/{self.workplace}/{self.repo_slug}/{endpoint}'
bitbucket = Bitbucket(os.environ['BITBUCKET_KEY'], os.environ['BITBUCKET_SECRET'], workplace='foo', repo_slug='bar')
pr_id = 1234
resp = bitbucket.oauth.post(f"{bitbucket.get_api_url('pullrequests')}/{pr_id}/decline")
if resp.status_code == 200:
print('Declined')
else:
print('Someting went wrong.')
I am trying to get the score for Application hash and IP address using VirusTotal API.
The code works fine for IP address. See the code below:
###### Code starts
import json
import urllib.request
from urllib.request import urlopen
url = 'https://www.virustotal.com/vtapi/v2/ip-address/report'
parameters = {'ip': '90.156.201.27', 'apikey': 'apikey'}
response = urllib.request.urlopen('%s?%s' % (url, urllib.parse.urlencode(parameters))).read()
response_dict = json.loads(response)
#### Code ends
But the same does not work for Application Hash. Has anyone worked on this before:
For example, the Application Hash " f67ce4cdea7425cfcb0f4f4a309b0adc9e9b28e0b63ce51cc346771efa34c1e3" has a score of 29/67. See the image here. Has anyone worked on this API to get the score.
You can Try the same with requests module in python library
import requests
params = {'apikey': '<your api key>', 'resource':<your hash>}
headers = {"Accept-Encoding": "gzip, deflate","User-Agent" : "gzip, My Python
requests library example client or username"}
response_dict={}
try:
response_dict = requests.get('https://www.virustotal.com/vtapi/v2/file/report',
params=params).json()
except Exception as e:
print(e)
And you can use this to get the data:
if response_dict.get("response_code") != None and response_dict.get("response_code") > 0:
# Hashes
sample_info["md5"] = response_dict.get("md5")
# AV matches
sample_info["positives"] = response_dict.get("positives")
sample_info["total"] = response_dict.get("total")
print(sample_info["md5"]+" Positives: "+str(sample_info["positives"])+"Total "+str(sample_info["total"]))
else:
print("Not Found in VT")
For reference check virustotalapi which lets you use multiple api keys simultaneously.
I tried to call AppDynamics API using python requests but face an issue.
I wrote a sample code using the python client as follows...
from appd.request import AppDynamicsClient
c = AppDynamicsClient('URL','group','appd#123')
for app in c.get_applications():
print app.id, app.name
It works fine.
But if I do a simple call like the following
import requests
usr =<uid>
pwd =<pwd>
url ='http://10.201.51.40:8090/controller/rest/applications?output=JSON'
response = requests.get(url,auth=(usr,pwd))
print 'response',response
I get the following response:
response <Response [401]>
Am I doing anything wrong here ?
Couple of things:
I think the general URL format for app dynamics applications are (notice the '#'):
url ='http://10.201.51.40:8090/controller/#/rest/applications?output=JSON'
Also, I think the requests.get method needs an additional parameter for the 'account'. For instance, my auth format looks like:
auth = (_username + '#' + _account, _password)
I am able to get a right response code back with this config. Let me know if this works for you.
You could also use native python code for more control:
example:
import os
import sys
import urllib2
import base64
# if you have a proxy else comment out this line
proxy = urllib2.ProxyHandler({'https': 'proxy:port'})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
username = "YOUR APPD REST API USER NAME"
password = "YOUR APPD REST API PASSWORD"
#Enter your request
request = urllib2.Request("https://yourappdendpoint/controller/rest/applications/141/events?time-range-type=BEFORE_NOW&duration-in-mins=5&event-types=ERROR,APPLICATION_ERROR,DIAGNOSTIC_SESSION&severities=ERROR")
base64string = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
response = urllib2.urlopen(request)
html = response.read()
This will get you the response and you can parse the XML as needed.
If you prefer it in JSON simply specify it in the request.
I want to access Twitter 1.1 search endpoint using application-only authentication. To do the same, I'm trying to implement the steps given on Twitter API's documentation here - https://dev.twitter.com/docs/auth/application-only-auth (scroll to "Issuing application-only requests")
I am not able to obtain the "bearer token" in Step 2. When I run the following code, I receive "Response: 302 Found" which is a redirection to Location: https://api.twitter.com/oauth2/token
Ideally it should be "200 OK"
import urllib
import base64
import httplib
CONSUMER_KEY = 'my_key'
CONSUMER_SECRET = 'my_secret'
encoded_CONSUMER_KEY = urllib.quote(CONSUMER_KEY)
encoded_CONSUMER_SECRET = urllib.quote(CONSUMER_SECRET)
concat_consumer_url = encoded_CONSUMER_KEY + ":" + encoded_CONSUMER_SECRET
host = 'api.twitter.com'
url = '/oauth2/token'
params = urllib.urlencode({'grant_type' : 'client_credentials'})
req = httplib.HTTP(host)
req.putrequest("POST", url)
req.putheader("Host", host)
req.putheader("User-Agent", "My Twitter 1.1")
req.putheader("Authorization", "Basic %s" % base64.b64encode(concat_consumer_url))
req.putheader("Content-Type" ,"application/x-www-form-urlencoded;charset=UTF-8")
req.putheader("Content-Length", "29")
req.putheader("Accept-Encoding", "gzip")
req.endheaders()
req.send(params)
# get the response
statuscode, statusmessage, header = req.getreply()
print "Response: ", statuscode, statusmessage
print "Headers: ", header
I do not want to use any Twitter API wrappers to access this.
The problem was that the URL had to be called with an HTTPS connection. Please check the modified code which works.
import urllib
import base64
import httplib
CONSUMER_KEY = 'my_key'
CONSUMER_SECRET = 'my_secret'
encoded_CONSUMER_KEY = urllib.quote(CONSUMER_KEY)
encoded_CONSUMER_SECRET = urllib.quote(CONSUMER_SECRET)
concat_consumer_url = encoded_CONSUMER_KEY + ":" + encoded_CONSUMER_SECRET
host = 'api.twitter.com'
url = '/oauth2/token/'
params = urllib.urlencode({'grant_type' : 'client_credentials'})
req = httplib.HTTPSConnection(host)
req.putrequest("POST", url)
req.putheader("Host", host)
req.putheader("User-Agent", "My Twitter 1.1")
req.putheader("Authorization", "Basic %s" % base64.b64encode(concat_consumer_url))
req.putheader("Content-Type" ,"application/x-www-form-urlencoded;charset=UTF-8")
req.putheader("Content-Length", "29")
req.putheader("Accept-Encoding", "gzip")
req.endheaders()
req.send(params)
resp = req.getresponse()
print resp.status, resp.reason
Although this is a bit late you might find this github page of some help. I've started creating a library for twitter application only authentication methods.
http://jonhurlock.github.io/Twitter-Application-Only-Authentication-OAuth-Python/