How do you use Bitbucket's 2.0 API to decline a pull request via Python?
According to their documentaion, it should be something like:
import requests
kwargs = {
'username': MY_BITBUCKET_ACCOUNT,
'repo_slug': MY_BITBUCKET_REPO,
'pull_request_id': pull_request_id
}
url = 'https://api.bitbucket.org/2.0/repositories/{username}/{repo_slug}/pullrequests/{pull_request_id}/decline'.format(**kwargs)
headers = {'Content-Type': 'application/json'}
response = requests.post(url, auth=(USERNAME, PASSWORD), headers=headers)
However, this fails with response.text simply saying "Bad Request".
This similar code works for me with their other API endpoints, so I'm not sure why the decline method is failing.
What am I doing wrong?
You have to authenticate with Oath. I wrote a wrapper for making these requests. Here is a simple example that works. The only thing I couldn't figure out was how to add a reason it was declined. I ended up making a request before I declined the PR that added a comment on why it was declined.
import os
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
class Bitbucket(object):
def __init__(self, client_id, client_secret, workplace, repo_slug):
self.workplace = workplace # username or company username
self.repo_slug = repo_slug
self.token_url = 'https://bitbucket.org/site/oauth2/access_token'
self.api_url = 'https://api.bitbucket.org/2.0/'
self.max_pages = 10
self.client = BackendApplicationClient(client_id=client_id)
self.oauth = OAuth2Session(client=self.client)
self.oauth.fetch_token(
token_url=self.token_url,
client_id=client_id,
client_secret=client_secret
)
def get_api_url(self, endpoint):
return f'{self.api_url}repositories/{self.workplace}/{self.repo_slug}/{endpoint}'
bitbucket = Bitbucket(os.environ['BITBUCKET_KEY'], os.environ['BITBUCKET_SECRET'], workplace='foo', repo_slug='bar')
pr_id = 1234
resp = bitbucket.oauth.post(f"{bitbucket.get_api_url('pullrequests')}/{pr_id}/decline")
if resp.status_code == 200:
print('Declined')
else:
print('Someting went wrong.')
Related
I am trying to fill html form and get the intended result as i get when i fill manually. But I fail.
I am trying to fill the site https://www.desco.org.bd/ebill/login.php with value 32000001. So far my try is as below-
import requests
#LOGIN_URL = 'https://www.desco.org.bd/ebill/login.php'
#LOGIN_URL = 'https://www.desco.org.bd/ebill/authentication.php'
LOGIN_URL = 'https://www.desco.org.bd/ebill/billinformation.php'
payload = {
'username': '32000001',
'login':'Login',
'login':'true'
}
with requests.Session() as s:
p = s.post(LOGIN_URL, data=payload)#, verify=False)
# print the html returned or something more intelligent to see if it's a successful login page.
print (p.text)
I have found that login.php redirects to authentication.php and it further redirects to billinformation.php which delivers the true data i needed.
Thanks in advance.
N.B. I am not planning to use selenium since it is too slow for my case i.e. collect huge data from this site.
i am working for similar case, may be you would try using websockets:
import websockets
def process_function(message):
# process the message
def server(ws:str, path:int):
while True:
message_received = await ws.recv() # receive from ui
print(f'Msg [{message_received}]')
message_to_send = process_function(message)
await ws.send(message_to_send) # send feedback to ui
server = websockets.serve(server, '127.0.0.1', 5678) # set the html to run in the same ip:port
another try:
import json, requests
def do_auth(url):
headers = {"Content-Type": "application/json", "Accept":'*/*'}
body = json.dumps({'username': 'user', 'password': 'pass'})
r = requests.post(url=url, headers=headers, data=body, verify=False)
print(r.status_code);
d = json.loads(r.text);
print(d['access_token']);
print(d['refresh_token'])
return d['access_token'], d['refresh_token']
do_auth(url_auth) # authorization
requests.get(url_content, headers=headers, verify=False) # get data
I am trying to connect to Splunk via API using python. I can connect, and get a 200 status code but when I read the content, it doesn't read the content of the page. View below:
Here is my code:
import json
import requests
import re
baseurl = 'https://my_splunk_url:8888'
username = 'my_username'
password = 'my_password'
headers={"Content-Type": "application/json"}
s = requests.Session()
s.proxies = {"http": "my_proxy"}
r = s.get(baseurl, auth=(username, password), verify=False, headers=None, data=None)
print(r.status_code)
print(r.text)
I am new to Splunk and python so any ideas or suggestions as to why this is happening would help.
You need to authenticate first to get a token, then you'll be able to hit the rest of REST endpoints. The auth endpoint it at /servicesNS/admin/search/auth/login, which will give you the session_key, which you then provide to subsequent requests.
Here is some code that uses requests to authenticate to a Splunk instance, then start a search. It then checks to see if the search is complete, if not, wait a second and then check again. Keep checking and sleeping until the search is done, then print out the results.
import time # need for sleep
from xml.dom import minidom
import json, pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
base_url = 'https://localhost:8089'
username = 'admin'
password = 'changeme'
search_query = "search=search index=*"
r = requests.get(base_url+"/servicesNS/admin/search/auth/login",
data={'username':username,'password':password}, verify=False)
session_key = minidom.parseString(r.text).getElementsByTagName('sessionKey')[0].firstChild.nodeValue
print ("Session Key:", session_key)
r = requests.post(base_url + '/services/search/jobs/', data=search_query,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
print ("Search ID", sid)
done = False
while not done:
r = requests.get(base_url + '/services/search/jobs/' + sid,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
response = minidom.parseString(r.text)
for node in response.getElementsByTagName("s:key"):
if node.hasAttribute("name") and node.getAttribute("name") == "dispatchState":
dispatchState = node.firstChild.nodeValue
print ("Search Status: ", dispatchState)
if dispatchState == "DONE":
done = True
else:
time.sleep(1)
r = requests.get(base_url + '/services/search/jobs/' + sid + '/results/',
headers = { 'Authorization': ('Splunk %s' %session_key)},
data={'output_mode': 'json'},
verify = False)
pprint.pprint(json.loads(r.text))
Many of the request calls thare used include the flag, verify = False to avoid issues with the default self-signed SSL certs, but you can drop that if you have legit certificates.
Published a while ago at https://gist.github.com/sduff/aca550a8df636fdc07326225de380a91
Nice piece of coding. One of the wonderful aspects of Python is the ability to use other people's well written packages. In this case, why not use Splunk's Python packages to do all of that work, with a lot less coding around it.
pip install splunklib.
Then add the following to your import block
import splunklib.client as client
import splunklib.results as results
pypi.org has documentation on some of the usage, Splunk has an excellent set of how-to documents. Remember, be lazy, use someone else's work to make your work look better.
I have successfully implemented OAuth1 with the regular requests module like this:
import requests
from requests_oauthlib import OAuth1
oauth = OAuth1(client_key=oauth_cred["consumer_key"], client_secret=oauth_cred["consumer_secret"], resource_owner_key=oauth_cred["access_token"], resource_owner_secret=oauth_cred["access_token_secret"])
session = requests.Session()
session.auth = oauth
When trying to transfer this to aiohttp, I have not been able to get it to work. Substituting aiohttp.ClientSession() for requests.Session() gives me {'errors': [{'code': 215, 'message': 'Bad Authentication data.'}]}.
I have looked at some solutions on the internet like https://github.com/klen/aioauth-client, but this seems to be a different approach. I just want it to function exactly like in my example above.
I tried
import aiohttp
from aioauth_client import TwitterClient
oauth = TwitterClient(consumer_key=oauth_cred["consumer_key"], consumer_secret=oauth_cred["consumer_secret"], oauth_token=oauth_cred["access_token"], oauth_token_secret=oauth_cred["access_token_secret"])
session = aiohttp.ClientSession()
session.auth = oauth
but I got the same error.
How can I get this to work?
Using oauthlib:
import oauthlib.oauth1, aiohttp, asyncio
async def main():
# Create the Client. This can be reused for multiple requests.
client = oauthlib.oauth1.Client(
client_key = oauth_cred['consumer_key'],
client_secret = oauth_cred['consumer_secret'],
resource_owner_key = oauth_cred['access_token'],
resource_owner_secret = oauth_cred['access_token_secret']
)
# Define your request. In my code I'm POSTing so that's what I have here,
# but if you're doing something different you'll need to change this a bit.
uri = '...'
http_method = 'POST'
body = '...'
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
# Sign the request data. This needs to be called for each request you make.
uri,headers,body = client.sign(
uri = uri,
http_method = http_method,
body = body,
headers = headers
)
# Make your request with the signed data.
async with aiohttp.ClientSession() as session:
async with session.post(uri, data=body, headers=headers, raise_for_status=True) as r:
...
# asyncio.run has a bug on Windows in Python 3.8 https://bugs.python.org/issue39232
#asyncio.run(main())
asyncio.get_event_loop().run_until_complete(main())
The oauthlib.oauth1.Client constructor takes a bunch more parameters too if you need them (for basic use you don't). The official documentation isn't very thorough, but the doc comment on the method itself is pretty good.
The doc comment on the Client.sign method has more information about the parameters it takes.
I am using python's slumber library to make HTTP call to a service. This is how it looks. I need to make get request for this URL https://sample-billing-api.test/2/billing-accounts?id=2169.
When I run this I get error NameError: name 'accounts' is not defined.
import slumber
class ParseAuth(AuthBase):
def __call__(self, r):
r.headers['x-api-key'] = '<API KEY>'
r.headers['Content-Type'] = 'application/json'
return r
api = slumber.API('https://sample-billing-api.test/2/', append_slash=False, auth=ParseAuth())
response = api.billing-accounts.get(id=2169)
api.billing-accounts.get(id=2169) line doesn't work.
One solution is to switch to python request package as do something like this. This approach works. But I need to use slumber package as I have been using Slumber package for all my API calls. Also I have written decorators to handle slumber response.
import requests
headers = {
'x-api-key': '<api_key>',
'Content-Type': 'application/json'
}
with requests.session() as s:
res = s.get(
'https://sample-billing-api.test/2/billing-accounts?id=2169',
headers=headers
)
s.close()
Thanks in advance.
This solution worked finally...
import slumber
class ParseAuth(AuthBase):
def __call__(self, r):
r.headers['x-api-key'] = '<API KEY>'
r.headers['Content-Type'] = 'application/json'
return r
api = slumber.API('https://sample-billing-api.test/2/', append_slash=False,
auth=ParseAuth())
response = getattr(self.api, "billing-accounts").get(id=2169)
I know how to do it with a current URL e.g.
>>> payload = {'key1': 'value1', 'key2': ['value2', 'value3']}
>>> r = requests.get('http://httpbin.org/get', params=payload)
>>> print(r.url)
But what about if after you visit a URL, such as one with OAuth, for example
authorize_url = facebook.get_authorize_url(**params)
requests.get(authorized_url)
The URL will then direct to one such as https://localhost:5000/authorized?code=AQCvF. How do I get the code=AQCvF?
I could probably do something like, get the address of the current browser and then parse the URL, but is there a cleaner way?
Complete code is below:
index.j2
<p>Login with Facebook</p>
routes.py
app.add_route('/facebook-login', LoginHandler('index.j2'))
app.add_route('/authorized', AuthorizedHandler('index.j2'))
handlers.py
from rauth.service import OAuth2Service
import requests
import os
# rauth OAuth 2.0 service wrapper
graph_url = 'https://graph.facebook.com/'
facebook = OAuth2Service(name='facebook',
authorize_url='https://www.facebook.com/dialog/oauth',
access_token_url=graph_url + 'oauth/access_token',
client_id=FB_CLIENT_ID,
client_secret=FB_CLIENT_SECRET,
base_url=graph_url)
class AuthorizedHandler(TemplateHandler):
def on_get(self, req, res):
code = self.requests.get['code']
data = dict(code=code, redirect_uri=REDIRECT_URI)
session = facebook.get_auth_session(data=data)
# response
me = session.get('me').json()
print('me', me)
UserController.create(me['username'], me['id'])
class LoginHandler(TemplateHandler):
async def on_get(self, req, res):
# visit URL and client authorizes
params = {'response_type': 'code',
'redirect_uri': REDIRECT_URI}
webbrowser.open(facebook.get_authorize_url(**params))
response = requests.get(facebook.get_authorize_url(**params))
print(response.url)
You can get the .url attribute from the Response object - this would be the final response URL:
response = requests.get(authorized_url)
print(response.url)
Then, you can urlparse the url to extract the GET parameters:
In [1]: from urllib.parse import parse_qs, urlparse
In [2]: url = "https://localhost:5000/authorized?code=AQCvF"
In [3]: parse_qs(urlparse(url).query)
Out[3]: {'code': ['AQCvF']}
Your code would work fine if you were using a synchronous Python framework, but it appears you are using an async framework, as implied by the async def on_get(self, req, res).
You will either have to write an async HTTP request function, use aiohttp.web, or your framework might have one built in and you can replace requests.get(facebook.get_authorize_url(**params)) with res.redirect(facebook.get_authorize_url(**params)).