PYTHON: requests and response 401 - python

I have a little problem with authentication. I am writting a script, which is getting login and password from user(input from keyboard) and then I want to get some data from the website(http not https), but every time I run the script the response is 401.I read some similar posts from stack and I tried this solutions:
Solution 1
c = HTTPConnection("somewebsite")
userAndPass = b64encode(b"username:password").decode("ascii")
headers = { 'Authorization' : 'Basic %s' % userAndPass }
c.request('GET', '/', headers=headers)
res = c.getresponse()
data = res.read()
Solution 2
with requests.Session() as c:
url = 'somewebsite'
USERNAME = 'username'
PASSWORD = 'password'
c.get(url)
login_data = dict(username = USERNAME, password = PASSWORD)
c.post(url,data = login_data)
page = c.get('somewebsite', headers = {"Referer": "somwebsite"})
print(page)
Solution 3
www = 'somewebsite'
value ={'filter':'somefilter'}
data = urllib.parse.urlencode(value)
data=data.encode('utf-8')
req = urllib.request.Request(www,data)
resp = urllib.request.urlopen(req)
respData = resp.read()
print(respData)
x = urllib.request.urlopen(www,"username","password")
print(x.read())'
I don't know how to solve this problem. Can somebody give me some link or tip ?

Have you tried the Basic Authentication example from requests?
>>> from requests.auth import HTTPBasicAuth
>>> requests.get('https://api.github.com/user', auth=HTTPBasicAuth('user', 'pass'))
<Response [200]>

Can I know what type of authentication on the website?
this is an official Basic Auth example (http://docs.python-requests.org/en/master/user/advanced/#http-verbs)
from requests.auth import HTTPBasicAuth
auth = HTTPBasicAuth('fake#example.com', 'not_a_real_password')
r = requests.post(url=url, data=body, auth=auth)
print(r.status_code)

To use api with authentication, we need to have token_id or app_id that will provide the access for our request. Below is an example how we can formulate the url and get the response:
strong text
import requests
city = input()
api_call = "http://api.openweathermap.org/data/2.5/weather?"
app_id = "892d5406f4811786e2b80a823c78f466"
req_url = api_call + "q=" + city + "&appid=" + app_id
response = requests.get(req_url)
data = response.json()
if (data["cod"] == 200):
hum = data["main"]["humidity"]
print("Humidity is % d " %(hum))
elif data["cod"] != 200:
print("Error occurred : " ,data["cod"], data["message"])

Related

Problem with response status code saying response is not defined

Basically, I am trying to pass a list of ids in payloads of 100 from a spreadsheet to delete organizations using the destroy many endpoint.
import json
import xlrd
import requests
session = requests.Session()
session.headers = {'Content-Type': 'application/json'}
session.auth = 'my email', 'password'
url = 'https://domain.zendesk.com/api/v2/organizations/destroy_many.json'
payloads = []
organizations_dict = {}
book = xlrd.open_workbook('orgs_list_destroy.xls')
sheet = book.sheet_by_name('Sheet1')
for row in range(1, sheet.nrows):
if sheet.row_values(row)[2]:
organizations_dict = {'ids': int(sheet.row_values(row)[2])}
if len(organizations_dict) == 100:
payloads.append(json.dumps(organizations_dict))
organizations_dict = {}
if organizations_dict:
payloads.append(json.dumps(organizations_dict))
for payload in payloads:
response = session.delete(url, data=payload)
if response.status_code != 200:
print('Import failed with status {}'.format(response.status_code))
exit()
print('Successfully imported a batch of organizations')
Try placing it outside the for loop, where you're defining your request headers:
url = 'https://{{YOURDOMAIN}}.zendesk.com/api/v2/organizations/destroy_many.json'
user = 'YOUR_EMAIL#DOMAIN.com' + '/token'
pwd = '{{YOUR_TOKEN}}'
headers = {'Content-Type': 'application/json'}
response = requests.delete(url, auth=(user, pwd), headers=headers)

Python: how to extract data from Odata API that contains pages #odata.nextLink

I need to pull data from an Odata API. With code below I do receive data, but only 250 rows.
The JSON contains a key called: #odata.nextLink that contains one value, this is the BASE_URL + endpoint + ?$skip=250
How can I loop through the next pages?
import requests
import pandas as pd
import json
BASE_URL = "base_url"
def session_token():
url = BASE_URL + '/api/oauth/token'
headers = {"Accept": "application\json",
"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
body = {"username":"user",
"password": "pwd",
"grant_type": "password"}
return "Bearer "+ requests.post(url, headers = headers, data = body).json()["access_token"]
def make_request(endpoint, token = session_token()):
headers = {"Authorization": token}
response = requests.get(BASE_URL + endpoint, headers = headers)
if response.status_code == 200:
json_data = json.loads(response.text)
return json_data
make_request("/odata/endpoint")
Following #Marek Piotrowski's advise I modified and came to a solution:
def main():
url = "endpoint"
while True:
if not url:
break
response = make_request("endpoint")
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data["#odata.nextLink"] # Fetch next link
yield json_data['value']
result = pd.concat((json_normalize(row) for row in main()))
print(result) # Final dataframe, works like a charm :)
Something like that would retrieve all records, I believe (assuming there's #odata.nextLink in json_data indeed):
def retrieve_all_records(endpoint, token = session_token()):
all_records = []
headers = {"Authorization": token}
url = BASE_URL + endpoint
while True:
if not url:
break
response = requests.get(url, headers = headers)
if response.status_code == 200:
json_data = json.loads(response.text)
all_records = all_records + json_data['records']
url = json_data['#odata.nextLink']
return all_records
The code is untested, though. Let me know if it works. Alternatively, you could make some recursive call to make_request, I believe, but you'd have to store results somewhere above the function itself then.
I know that this is late, but you could look at this article from Towards Data Science of Ephram Mwai
He pretty solved the problem with a good script.

I can't openning a session in ALM 12.5x

I'am new in ALM. I just read some guides from REST API and try to repeat this. But I face up to the situation. In my last request I have 401 return (User not authenticated). What am I doing wrong?
import requests
from requests.auth import HTTPBasicAuth
url = "https://almalmqc1250saastrial.saas.hpe.com"
login = "+++++++"
password = "+++++"
cookies = dict()
headers = {}
r = requests.get(url + "/qcbin/rest/is-authenticated")
print(r.status_code, r.headers.get('WWW-Authenticate'))
r = requests.get(url + "/qcbin/authentication-point/authentication",
auth=HTTPBasicAuth(login, password), headers=headers)
print(r.status_code, r.headers)
cookie = r.headers.get('Set-Cookie')
LWSSO_COOKIE_KEY = cookie[cookie.index("=") + 1: cookie.index(";")]
cookies['LWSSO_COOKIE_KEY'] = LWSSO_COOKIE_KEY
print(cookies)
r = requests.post(url + "/qcbin/rest/site-session", cookies=cookies)
print(r.status_code, r.headers)
The solution was found. The problem is incorrect URL. To authentication you need this URL:
url_log = "https://login.software.microfocus.com/msg/actions/doLogin.action"
And you need this headers:
self.__headers = {
"Content-Type": "application/x-www-form-urlencoded",
'Host': 'login.software.microfocus.com'
}
The POST request to authenticate will be next:
r = self.__session.post(self.url_log, data=self.input_auth, headers=self.__headers)
Where data is:
self.input_auth = 'username=' + login + '&' + 'password=' + password

rest api to trigger a concourse pipeline/job

I am able to use the below code to do a get request on the concourse api to fetch the pipeline build details.
However post request to trigger the pipeline build does not work and no error is reported .
Here is the code
url = "http://192.168.100.4:8080/api/v1/teams/main/"
r = requests.get(url + 'auth/token')
json_data = json.loads(r.text)
cookie = {'ATC-Authorization': 'Bearer '+ json_data["value"]}
r = requests.post(url + 'pipelines/pipe-name/jobs/job-name/builds'
, cookies=cookie)
print r.text
print r.content
r = requests.get(url + 'pipelines/pipe-name/jobs/job-name/builds/17', cookies=cookie)
print r.text
You may use Session :
[...] The Session object allows you to persist certain parameters across requests. It also persists cookies across all requests made from the Session instance [...]
url = "http://192.168.100.4:8080/api/v1/teams/main/"
req_sessions = requests.Session() #load session instance
r = req_sessions.get(url + 'auth/token')
json_data = json.loads(r.text)
cookie = {'ATC-Authorization': 'Bearer '+ json_data["value"]}
r = req_sessions.post(url + 'pipelines/pipe-name/jobs/job-name/builds', cookies=cookie)
print r.text
print r.content
r = req_sessions.get(url + 'pipelines/pipe-name/jobs/job-name/builds/17')
print r.text

Equivalent Python code for the following Java http get requests

I am trying to convert the following Java code to Python. Not sure what I am doing wrong, but I end up with an internal server error 500 with python.
Is the "body" in httplib.httpConnection method equivalent to Java httpentity?
Any other thoughts on what could be wrong?
The input information I collect is correct for sure.
Any help will be really appreciated. I have tried several things, but end up with the same internal server error.
Java Code:
HttpEntity reqEntitiy = new StringEntity("loginTicket="+ticket);
HttpRequestBase request = reMethod.getRequest(uri, reqEntitiy);
request.addHeader("ticket", ticket);
HttpResponse response = httpclient.execute(request);
HttpEntity responseEntity = response.getEntity();
StatusLine responseStatus = response.getStatusLine();
Python code:
url = serverURL + "resources/slmservices/templates/"+templateId+"/options"
#Create the request
ticket = ticket.replace("'",'"')
headers = {"ticket":ticket}
print "ticket",ticket
reqEntity = "loginTicket="+ticket
body = "loginTicket="+ticket
url2 = urlparse.urlparse(serverURL)
h1 = httplib.HTTPConnection(url2.hostname,8580)
print "h1",h1
url3 = urlparse.urlparse(url)
print "url path",url3.path
ubody = {"loginTicket":ticket}
data = urllib.urlencode(ubody)
conn = h1.request("GET",url3.path,data,headers)
#conn = h1.request("GET",url3.path)
response = h1.getresponse()
lines = response.read()
print "response.status",response.status
print "response.reason",response.reason
You don't need to go this low level. Using urllib2 instead:
import urllib2
from urllib import urlencode
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
url = '{}?{}'.format(url, urlencode(params))
request = urllib2.Request(url, headers=headers)
response = urllib2.urlopen(request)
print 'Status', response.getcode()
print 'Response data', response.read()
Note that the parameters are added to the URL to form URL query parameters.
You can do this simpler still by installing the requests library:
import requests
url = "{}resources/slmservices/templates/{}/options".format(
serverURL, templateId)
headers = {"ticket": ticket}
params = {"loginTicket": ticket}
response = requests.get(url, params=params, headers=headers)
print 'Status', response.status
print 'Response data', response.content # or response.text for Unicode
Here requests takes care of URL-encoding the URL query string parameters and adding it to the URL for you, just like Java does.

Categories