Need help adding API PUT method to Python script - python

I am using the script below to collect inventory information from servers and send it to a product called Device42. The script currently works however one of the APIs that I'm trying to add uses PUT instead of POST. I'm not a programmer and just started using python with this script. This script is using iron python. Can the PUT method be used in this script?
"""
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
##################################################
# a sample script to show how to use
# /api/ip/add-or-update
# /api/device/add-or-update
#
# requires ironPython (http://ironpython.codeplex.com/) and
# powershell (http://support.microsoft.com/kb/968929)
##################################################
import clr
clr.AddReference('System.Management.Automation')
from System.Management.Automation import (
PSMethod, RunspaceInvoke
)
RUNSPACE = RunspaceInvoke()
import urllib
import urllib2
import traceback
import base64
import math
import ssl
import functools
BASE_URL='https://device42_URL'
API_DEVICE_URL=BASE_URL+'/api/1.0/devices/'
API_IP_URL =BASE_URL+'/api/1.0/ips/'
API_PART_URL=BASE_URL+'/api/1.0/parts/'
API_MOUNTPOINT_URL=BASE_URL+'/api/1.0/device/mountpoints/'
API_CUSTOMFIELD_URL=BASE_URL+'/api/1.0/device/custom_field/'
USER ='usernme'
PASSWORD ='password'
old_init = ssl.SSLSocket.__init__
#functools.wraps(old_init)
def init_with_tls1(self, *args, **kwargs):
kwargs['ssl_version'] = ssl.PROTOCOL_TLSv1
old_init(self, *args, **kwargs)
ssl.SSLSocket.__init__ = init_with_tls1
def post(url, params):
"""
http post with basic-auth
params is dict like object
"""
try:
data= urllib.urlencode(params) # convert to ascii chars
headers = {
'Authorization' : 'Basic '+ base64.b64encode(USER + ':' + PASSWORD),
'Content-Type' : 'application/x-www-form-urlencoded'
}
req = urllib2.Request(url, data, headers)
print '---REQUEST---',req.get_full_url()
print req.headers
print req.data
reponse = urllib2.urlopen(req)
print '---RESPONSE---'
print reponse.getcode()
print reponse.info()
print reponse.read()
except urllib2.HTTPError as err:
print '---RESPONSE---'
print err.getcode()
print err.info()
print err.read()
except urllib2.URLError as err:
print '---RESPONSE---'
print err
def to_ascii(s):
# ignore non-ascii chars
return s.encode('ascii','ignore')
def wmi(query):
return [dict([(prop.Name, prop.Value) for prop in psobj.Properties]) for psobj in RUNSPACE.Invoke(query)]
def closest_memory_assumption(v):
return int(256 * math.ceil(v / 256.0))
def add_or_update_device():
computer_system = wmi('Get-WmiObject Win32_ComputerSystem -Namespace "root\CIMV2"')[0] # take first
bios = wmi('Get-WmiObject Win32_BIOS -Namespace "root\CIMV2"')[0]
operating_system = wmi('Get-WmiObject Win32_OperatingSystem -Namespace "root\CIMV2"')[0]
environment = wmi('Get-WmiObject Win32Reg_ESFFarmNode -Namespace "root\CIMV2"')[0]
mem = closest_memory_assumption(int(computer_system.get('TotalPhysicalMemory')) / 1047552)
dev_name = to_ascii(computer_system.get('Name')).upper()
fqdn_name = to_ascii(computer_system.get('Name')).upper() + '.' + to_ascii(computer_system.get('Domain')).lower()
device = {
'memory' : mem,
'os' : to_ascii(operating_system.get('Caption')),
'osver' : operating_system.get('OSArchitecture'),
'osmanufacturer': to_ascii(operating_system.get('Manufacturer')),
'osserial' : operating_system.get('SerialNumber'),
'osverno' : operating_system.get('Version'),
'service_level' : environment.get('Environment'),
'notes' : 'Test w/ Change to Device name collection'
}
devicedmn = ''
for dmn in ['Domain1', 'Domain2', 'Domain3', 'Domain4', 'Domain5']:
if dmn == to_ascii(computer_system.get('Domain')).strip():
devicedmn = 'Domain'
device.update({ 'name' : fqdn_name, })
break
if devicedmn != 'Domain':
device.update({
'name': dev_name,
})
manufacturer = ''
for mftr in ['VMware, Inc.', 'Bochs', 'KVM', 'QEMU', 'Microsoft Corporation', 'Xen']:
if mftr == to_ascii(computer_system.get('Manufacturer')).strip():
manufacturer = 'virtual'
device.update({ 'manufacturer' : 'vmware', })
break
if manufacturer != 'virtual':
device.update({
'manufacturer': to_ascii(computer_system.get('Manufacturer')).strip(),
'hardware': to_ascii(computer_system.get('Model')).strip(),
'serial_no': to_ascii(bios.get('SerialNumber')).strip(),
'type': 'Physical',
})
cpucount = 0
for cpu in wmi('Get-WmiObject Win32_Processor -Namespace "root\CIMV2"'):
cpucount += 1
cpuspeed = cpu.get('MaxClockSpeed')
cpucores = cpu.get('NumberOfCores')
if cpucount > 0:
device.update({
'cpucount': cpucount,
'cpupower': cpuspeed,
'cpucore': cpucores,
})
hddcount = 0
hddsize = 0
for hdd in wmi('Get-WmiObject Win32_LogicalDisk -Namespace "root\CIMV2" | where{$_.Size -gt 1}'):
hddcount += 1
hddsize += hdd.get('Size') / 1073741742
if hddcount > 0:
device.update({
'hddcount': hddcount,
'hddsize': hddsize,
})
post(API_DEVICE_URL, device)
for hdd in wmi('Get-WmiObject Win32_LogicalDisk -Namespace "root\CIMV2" | where{$_.Size -gt 1}'):
mountpoint = {
'mountpoint' : hdd.get('Name'),
'label' : hdd.get('Caption'),
'fstype' : hdd.get('FileSystem'),
'capacity' : hdd.get('Size') / 1024 / 1024,
'free_capacity' : hdd.get('FreeSpace') / 1024 / 1024,
'device' : dev_name,
'assignment' : 'Device',
}
post(API_MOUNTPOINT_URL, mountpoint)
network_adapter_configuration = wmi('Get-WmiObject Win32_NetworkAdapterConfiguration -Namespace "root\CIMV2" | where{$_.IPEnabled -eq "True"}')
for ntwk in network_adapter_configuration:
for ipaddr in ntwk.get('IPAddress'):
ip = {
'ipaddress' : ipaddr,
'macaddress' : ntwk.get('MACAddress'),
'label' : ntwk.get('Description'),
'device' : dev_name,
}
post(API_IP_URL, ip)
def main():
try:
add_or_update_device()
except:
traceback.print_exc()
if __name__ == "__main__":
main()

Ok first things first you need to understand the difference between PUT and POST. I would write it out but another member of the community gave a very good description of the two here.
Now, yes you can use requests with that script. Here is an example of using the requests library by python, in order to install requests if you have pip installed install it like this:
pip install requests
Now, lest go through some examples of using the Requests library, the documentation can be found here.
HTTP Get Request. So for this example, you call the get function from the request library, give the url as parameter, then you can print out the text from the touple that is returned. Since GET will return something, it will generally be in the text portion of the touple allowing you to print it.
r = requests.get('http://urlhere.com/apistuffhere')
print(r.text)
HTTP POST: Posting to a url, depending on how the API was set up will return something, it generally does for error handling, but you also have to pass in parameters. Here is an example for a POST request to a new user entry. And again, you can print the text from the touple to check the response from the API
payload = {'username': 'myloginname', 'password': 'passwordhere'}
r = requests.post('https://testlogin.com/newuserEntry', params=payload)
print(r.text)
Alternatively you can print just r and it should return you a response 200 which should be successful.
For PUT: You have to keep in mind put responses can not be cacheable, so you can post data to the PUT url, but you will not know if there is an error or not but use the same syntax as POST. I have not tried to print out the text response in a PUT request using the Request library as I don't use PUT in any API I write.
requests.put('http://urlhere.com/putextension')
Now for implementing this into your code, you already have the base of the url, in your post for the login just do:
payload = {'username': USERNAME, 'passwd':PASSWORD}
r = requests.post('https://loginurlhere.com/', params=payload)
#check response by printing text
print (r.text)
As for putting data to an extension of your api, let us assume you already have a payload variable ready with the info you need, for example the API device extension:
requests.put(API_DEVICE, params=payload)
And that should PUT to the url. If you have any questions comment below and I can answer them if you would like.

My standard answer would be to replace urllib2 with the Requests package. It makes doing HTTP work a lot easier.
But take a look at this SO answer for a 'hack' to get PUT working.

Related

Using Python Requests to POST data to REST API

I have been working with the FHIR REST API for a while but haven't had any experience with Python. As my first python project I am attempting to create a simple python script that can read and write to an open API. I am able to read but I am stuck on creating a successful POST due to the following: error [TypeError("unhashable type: 'dict'")]. I don't fully understand how the python dictionary works and attempted to use a tuple but get the same error.
import requests #REST Access to FHIR Server
print('Search patient by MRN to find existing appointment')
MRN = input("Enter patient's MRN -try CT12181 :")
url = 'http://hapi.fhir.org/baseR4/Patient?identifier='+MRN
print('Searching for Patient by MRN...#'+url)
response = requests.get(url)
json_response = response.json()
try:
key='entry'
EntryArray=json_response[key]
FirstEntry=EntryArray[0]
key='resource'
resource=FirstEntry['resource']
id=resource['id']
PatientServerId= id
patientName = resource['name'][0]['given'][0] + ' ' +resource['name'][0]['family']
print('Patient Found')
print('Patient Id:'+id)
#Searching for assertppointments
url='http://hapi.fhir.org/baseR4/Appointment?patient='+id #fhir server endpoint
#Print appointment data
print('Now Searching for Appointments...#'+url)
appt_response = requests.get(url).json()
key='entry'
EntryArray=appt_response[key]
print (f'Appointment(s) found for the patient {patientName}')
for entry in EntryArray:
appt=entry['resource']
# print('-------------------------')
# Date=appt['start']
# Status=appt['status']
# print(appt_response)
#print ('AppointmentStartDate/Time: ' ,appt['start'])
print ('Status: ' ,appt['status'])
print ('ID: ' ,appt['id'])
print('Search for open general practice slot?')
option = input('Enter yes or no: ')
while not(option == 'yes'):
print('Please search a different paitent')
option = input('Enter yes or no: ')
url = 'http://hapi.fhir.org/baseR4/Slot?service-type=57' #fhir server endpoint
print('Searching for General Practice Slot...#'+url)
slot_response = requests.get(url).json()
key='entry'
EntryArray=slot_response[key]
print ('Slot(s) found for the service type General Practice')
for entry in EntryArray:
slot=entry['resource']
#print('-------------------------')
#slotDate=slot['start']
#slotStatus=slot['status']
print (f'SlotID: ' +slot['id'])
#print (f'Status: ' +slot['status'])
print('Book a slot?')
option = input('Enter yes or no: ')
while not(option == 'yes'):
print('Please search a different paitent')
option = input('Enter yes or no: ')
#Book slot
slotID = input("Enter slot ID :")
url = 'http://hapi.fhir.org/baseR4/Appointment' #fhir server endpoint
print('Booking slot...#'+url)
headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
data = {{"resourceType": "Appointment","status": "booked","slot": tuple({"reference":"Slot/104602"}),"participant": tuple({"actor": {"reference":"Patient/1229151","status": "accepted"}}),"reasonCode": tuple({"text": "I have a cramp"})}}
#fhir server json header content
# headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
response = requests.post(url=url,headers=headers,data=data)
print(response)
print(response.json())
except Exception as e:
print ('error' ,[e])
I was expecting the JSON data to successfully write to the API. I am able to use the same JSON data in Postman to make a call, but I am not as familiar on how this should work within Python.
It looks like the Appointment POST endpoint accepts a simple payload like:
{
"resourceType": "Appointment"
}
Which then returns a corresponding ID, according to the API docs.
This differs from what you seem to be attempting in your code, where you try to pass other details to this endpoint:
ata = {{"resourceType": "Appointment","status": "booked","slot": tuple({"reference":"Slot/104602"}),"participant": tuple({"actor": {"reference":"Patient/1229151","status": "accepted"}}),"reasonCode": tuple({"text": "I have a cramp"})}}
However, to make a POST request to the endpoint as documented in the docs, perhaps try the json argument to requests.post. Something along the lines of:
>>> import requests
>>> headers = {"Content-Type": "application/fhir+json;charset=utf-8"}
>>> json_payload = {
... "resourceType": "Appointment"
... }
>>> url = 'http://hapi.fhir.org/baseR4/Appointment'
>>> r = requests.post(url, headers=headers, json=json_payload)
>>> r
<Response [201]>
>>> r.json()
{'resourceType': 'Appointment', 'id': '2261980', 'meta': {'versionId': '1', 'lastUpdated': '2022-03-25T23:40:42.621+00:00'}}
>>>
If you're already familiar with this API, then perhaps this might help. I suspect you then need to send another POST or PATCH request to another endpoint, using the ID returned in your first request to enter the relevant data.

Search on Splunk via Python SDK

I'm trying to run simple search via Python SDK (Python 3.8.5, splunk-sdk 1.6.14). Examples that are presented on dev.splunk.com are clear but something goes wrong when I run search with my own parameters
The code is as simple as this
search_kwargs_params = {
"exec_mode": "blocking",
"earliest_time": "2020-09-04T06:57:00.000-00:00",
"latest_time": "2020-11-08T07:00:00.000-00:00",
}
search_query = 'search index=qwe1 trace=111-aaa-222 action=Event.OpenCase'
job = self.service.jobs.create(search_query, **search_kwargs_params)
for result in results.ResultsReader(job.results()):
print(result)
But search returns no results. When I run same query manually in Splunk web GUI it works fine.
I've also tried to put all parameters in 'search_kwargs_params' dictionary, widened search time period and got some search results but they seem to be inappropriate to what I got in GUI.
Can someone advise?
This worked for me. You may also try this:
import requests
import time
import json
scheme = 'https'
host = '<your host>'
username = '<your username>'
password = '<your password>'
unique_id = '2021-03-22T18-43-00' #You may give any unique identifier here
search_query = 'search <your splunk query>'
post_data = { 'id' : unique_id,
'search' : search_query,
'earliest_time' : '1',
'latest_time' : 'now',
}
#'earliest_time' : '1', 'latest_time' : 'now'
#This will run the search query for all time
splunk_search_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs'.format(username)
resp = requests.post(splunk_search_base_url, data = post_data, verify = False, auth =
(username, password))
print(resp.text)
is_job_completed = ''
while(is_job_completed != 'DONE'):
time.sleep(5)
get_data = {'output_mode' : 'json'}
job_status_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs/{}'.format(username, unique_id)
resp_job_status = requests.post(job_status_base_url, data = get_data, verify =
False, auth = (username, password))
resp_job_status_data = resp_job_status.json()
is_job_completed = resp_job_status_data['entry'][0]['content']['dispatchState']
print("Current job status is {}".format(is_job_completed))
splunk_summary_base_url = scheme + '://' + host +
'/servicesNS/{}/search/search/jobs/{}/results?count=0'.format(username, unique_id)
splunk_summary_results = requests.get(splunk_summary_base_url, data = get_data, verify
= False, auth = (username, password))
splunk_summary_data = splunk_summary_results.json()
#Print the results in python format (strings will be in single quotes)
for data in splunk_summary_data['results']:
print(data)
print('status code...')
print(splunk_summary_results.status_code)
print('raise for status...')
print(splunk_summary_results.raise_for_status())
print('Results as JSON : ')
#Print the results in valid JSON format (Strings will be in double quotes)
#To get complete json data:
print(json.dumps(splunk_summary_data))
#To get only the relevant json data:
print(json.dumps(splunk_summary_data['results']))
Cheers!
You may also like to have a look at this very handy tutorial. https://www.youtube.com/watch?v=mmTzzp2ldgU

Python Error not reading config.cfg?

Traceback (most recent call last):
File "/Users/jondevereux/Desktop/Data reporting/kpex_code/1PD/api_python_publisher_1PD.py", line 40, in <module>
username = parser.get('api_samples', 'username')
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 607, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'api_samples'
The config file is in the correct directory (same as .py and has the appropriate section api_samples:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = xxx
password = xxx
Script works on co-workers PC not on mine? I had to use pip to install requests - i'm wondering I i'm missing something else?
Code is as follows:
# Set up the libs we need
import requests
import sys
import csv
import json
from ConfigParser import SafeConfigParser # used to get information from a config file
reload(sys)
sys.setdefaultencoding('utf-8')
'''
Now let's get what we need from our config file, including the username and password
We are assuming we have a config file called config.config in the same directory
where this python script is run, where the config file looks like:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = USERNAME_FOR_API_CALLS
password = PASSWORD
'''
# Set up our Parser and get the values - usernames and password should never be in code!
parser = SafeConfigParser()
parser.read('config.cfg')
username = parser.get('api_samples', 'username')
password = parser.get('api_samples', 'password')
authentication_url = parser.get('api_samples', 'authentication_url')
base_api_url = parser.get('api_samples', 'api_url')
# OK, all set with our parameters, let's get ready to make our call to get a Ticket Granting Ticket
# Add the username and password to the payload (requests encodes for us, no need to urlencode)
payload = {'username': username,
'password': password}
# We want to set some headers since we are going to post some url encoded params.
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent":"python" }
# Now, let's make our Ticket Granting Ticket request. We get the location from the response header
tg_ticket_location = requests.post(authentication_url, data=payload).headers['location']
# Let's take a look at what a Ticket Granting Ticket looks like:
# print ('Ticket Granting Ticket - %s \n') % (tg_ticket_location[tg_ticket_location.rfind('/') + 1:])
# Now we have our Ticket Granting Ticket, we can get a service ticket for the service we want to call
# The first service call will be to get information on behavior id 5990.
# service_call = base_api_url + 'behaviors/5990'
# Add the service call to the payload and get the ticket
#payload = {'service': service_call}
#service_ticket = requests.post( tg_ticket_location, data=payload ).text
# Let's take a look at the service ticket
#print ('Here is our Service Ticket - %s \n') % ( service_ticket )
'''
Now let's make our call to the service ... remember we need to be quick about it because
we only have 10 seconds to do it before the Service Ticket expires.
A couple of things to note:
JSON is the default response, and it is what we want, so we don't need to specify
like {'Accept':'application/json'}, but we will anyway because it is a good practice.
We don't need to pass any parameters to this call, so we just add the parameter
notation and then 'ticket=[The Service Ticet]'
'''
headers = {'Accept':'application/json'}
#behavior_info = requests.get( ('%s?ticket=%s') % (service_call, service_ticket), headers=headers)
# Let's print out our JSON to see what it looks like
# requests support JSON on it's own, so not other package needed for this
# print ('Behavior Information: \n %s \n') % (behavior_info.json() )
'''
Now let's get the names and IDs of some audiences
We can reuse our Ticket Granting Ticket for a 3 hour period ( we haven't passed that yet),
so let's use it to get a service ticket for the audiences service call.
Note that here we do have a parameter that is part of the call. That needs to be included
in the Service Ticket request.
We plan to make a call to the audience service to get the first 10 audiences in the system
ascending by audience id. We don't need to pass the sort order, because it defaults to ascending
'''
# Set up our call and get our new Service Ticket, we plan to sort by id
# Please insert audiences ID below:
audienceids = ['243733','243736','241134','242480','240678','242473','242483','241119','243732','242492','243784','242497','242485','243785','242486','242487','245166','245167','245168','245169','245170','245171','240860']
f = open("publisher_report_1PD.csv", 'w+')
title_str = ['1PD % Contribution','audienceId','publisherName','audienceName']
print >> f,(title_str)
for audience_id in audienceids:
service_call = base_api_url + 'reports/audiences/' + audience_id + '/publisher?stat_interval=LAST_MONTH&page_count=100&page_num=1&sort_attr=audienceName&inc_network=false&sort_order=ASC'
payload = {'service': service_call}
# Let's get the new Service Ticket, we can print it again to see it is a new ticket
service_ticket = requests.post( tg_ticket_location, data=payload ).text
#print ('Here is our new Service Ticket - %s \n') % ( service_ticket )
# Use the new ticket to query the service, remember we did have a parameter this time,
# so we need to & 'ticket=[The Service Ticket]' to the parameter list
audience_list = requests.get( ('%s&ticket=%s') % (service_call, service_ticket)).json()
#print audience_list
# create an array to hold the audiences, pull ou the details we want, and print it out
audiences = []
for ln in audience_list['stats']:
audiences.append({ 'audienceId': ln['audienceId'], 'audienceName': ln['audienceName'], 'publisherName': ln['publisherName'], '1PD % Contribution': ln['percentOfAudience']})
for ii in range( 0, len(audiences) ):
data = audiences[ii]
data_str = json.dumps(data)
result = data_str.replace("\"","")
result1 = result.replace("{1PD % Contribution:","")
result2 = result1.replace("publisherName: ","")
result3 = result2.replace("audienceName: ","")
result4 = result3.replace("audienceId: ","")
result5 = result4.replace("}","")
print >> f,(result5)
# Once we are done with the Ticket Granting Ticket we should clean it up'
remove_tgt = requests.delete( tg_ticket_location )
print ( 'Status for closing TGT - %s') % (remove_tgt.status_code)
i = input('YAY! Gotcha!!')
I see only one reason for your problem: you run script from different folder and then script is looking for config.cfg in different folder.
You can get full path to folder with script
import os
script_folder = os.path.dirname(os.path.realpath(__file__))
and create full path to config.cfg
parser.read( os.path.join(script_folder, 'config.cfg') )

urllib3 define server_hostname ( for ssl check ) or ip where request [duplicate]

In my project I'm handling all HTTP requests with python requests library.
Now, I need to query the http server using specific DNS - there are two environments, each using its own DNS, and changes are made independently.
So, when the code is running, it should use DNS specific to the environment, and not the DNS specified in my internet connection.
Has anyone tried this using python-requests? I've only found solution for urllib2:
https://stackoverflow.com/questions/4623090/python-set-custom-dns-server-for-urllib-requests
requests uses urllib3, which ultimately uses httplib.HTTPConnection as well, so the techniques from https://stackoverflow.com/questions/4623090/python-set-custom-dns-server-for-urllib-requests (now deleted, it merely linked to Tell urllib2 to use custom DNS) still apply, to a certain extent.
The urllib3.connection module subclasses httplib.HTTPConnection under the same name, having replaced the .connect() method with one that calls self._new_conn. In turn, this delegates to urllib3.util.connection.create_connection(). It is perhaps easiest to patch that function:
from urllib3.util import connection
_orig_create_connection = connection.create_connection
def patched_create_connection(address, *args, **kwargs):
"""Wrap urllib3's create_connection to resolve the name elsewhere"""
# resolve hostname to an ip address; use your own
# resolver here, as otherwise the system resolver will be used.
host, port = address
hostname = your_dns_resolver(host)
return _orig_create_connection((hostname, port), *args, **kwargs)
connection.create_connection = patched_create_connection
and you'd provide your own code to resolve the host portion of the address into an ip address instead of relying on the connection.create_connection() call (which wraps socket.create_connection()) to resolve the hostname for you.
Like all monkeypatching, be careful that the code hasn't significantly changed in later releases; the patch here was created against urllib3 version 1.21.1. but should work for versions as far back as 1.9.
Note that this answer was re-written to work with newer urllib3 releases, which have added a much more convenient patching location. See the edit history for the old method, applicable to version < 1.9, as a patch to the vendored urllib3 version rather than a stand-alone installation.
You should look into the TransportAdapters, including the source code. The documentation on them isn't great, but they give low-level access to a lot of the functionality described in RFC 2818 and RFC 6125. In particular, those documents encourage (require?) client-side code to support application-specific DNS for the purpose of checking certificates' CommonName and SubjectAltName. The keyword argument you need in those calls is "assert_hostname". Here's how to set it with the requests library:
from requests import Session, HTTPError
from requests.adapters import HTTPAdapter, DEFAULT_POOLSIZE, DEFAULT_RETRIES, DEFAULT_POOLBLOCK
class DNSResolverHTTPSAdapter(HTTPAdapter):
def __init__(self, common_name, host, pool_connections=DEFAULT_POOLSIZE, pool_maxsize=DEFAULT_POOLSIZE,
max_retries=DEFAULT_RETRIES, pool_block=DEFAULT_POOLBLOCK):
self.__common_name = common_name
self.__host = host
super(DNSResolverHTTPSAdapter, self).__init__(pool_connections=pool_connections, pool_maxsize=pool_maxsize,
max_retries=max_retries, pool_block=pool_block)
def get_connection(self, url, proxies=None):
redirected_url = url.replace(self.__common_name, self.__host)
return super(DNSResolverHTTPSAdapter, self).get_connection(redirected_url, proxies=proxies)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
pool_kwargs['assert_hostname'] = self.__common_name
super(DNSResolverHTTPSAdapter, self).init_poolmanager(connections, maxsize, block=block, **pool_kwargs)
common_name = 'SuperSecretSarahServer'
host = '192.168.33.51'
port = 666
base_url = 'https://{}:{}/api/'.format(common_name, port)
my_session = Session()
my_session.mount(self.base_url.lower(), DNSResolverHTTPSAdapter(common_name, host))
user_name = 'sarah'
url = '{}users/{}'.format(self.base_url, user_name)
default_response_kwargs = {
'auth': (NAME, PASSWORD),
'headers': {'Content-Type': 'application/json'},
'verify': SSL_OPTIONS['ca_certs'],
'cert': (SSL_OPTIONS['certfile'], SSL_OPTIONS['keyfile'])
}
response = my_session.get(url, **default_response_kwargs)
I use common_name for the name expected to be on the certificate and how your code will reference the desired machine. I use host for a name recognized by the external world - FQDN, IP, DNS entry, ... Of course, the SSL_OPTIONS dictionary (in my example) must list appropriate certificate / key filenames on your machine. (Plus, NAME and PASSWORD should resolve to correct strings.)
A customized HTTPAdapter will do the trick.
Don't forget to set server_hostname to enable SNI.
import requests
class HostHeaderSSLAdapter(requests.adapters.HTTPAdapter):
def resolve(self, hostname):
# a dummy DNS resolver
import random
ips = [
'104.16.89.20', # CloudFlare
'151.101.2.109', # Fastly
]
resolutions = {
'cdn.jsdelivr.net': random.choice(ips),
}
return resolutions.get(hostname)
def send(self, request, **kwargs):
from urllib.parse import urlparse
connection_pool_kwargs = self.poolmanager.connection_pool_kw
result = urlparse(request.url)
resolved_ip = self.resolve(result.hostname)
if result.scheme == 'https' and resolved_ip:
request.url = request.url.replace(
'https://' + result.hostname,
'https://' + resolved_ip,
)
connection_pool_kwargs['server_hostname'] = result.hostname # SNI
connection_pool_kwargs['assert_hostname'] = result.hostname
# overwrite the host header
request.headers['Host'] = result.hostname
else:
# theses headers from a previous request may have been left
connection_pool_kwargs.pop('server_hostname', None)
connection_pool_kwargs.pop('assert_hostname', None)
return super(HostHeaderSSLAdapter, self).send(request, **kwargs)
url = 'https://cdn.jsdelivr.net/npm/bootstrap/LICENSE'
session = requests.Session()
session.mount('https://', HostHeaderSSLAdapter())
r = session.get(url)
print(r.headers)
r = session.get(url)
print(r.headers)
I know this is an old thread but here is my python3 compatible solution using tldextract and dnspython. I've left some commented out code to illustrate how to debug and set additional session parameters.
#!/usr/bin/env python3
import sys
from pprint import pprint as pp
import requests
import dns.resolver # NOTE: dnspython package
import tldextract
class CustomAdapter(requests.adapters.HTTPAdapter):
def __init__(self, nameservers):
self.nameservers = nameservers
super().__init__()
def resolve(self, host, nameservers, record_type):
dns_resolver = dns.resolver.Resolver()
dns_resolver.nameservers = nameservers
answers = dns_resolver.query(host, record_type)
for rdata in answers:
return str(rdata)
def get_connection(self, url, proxies=None):
ext = tldextract.extract(url)
fqdn = ".".join([ ext.subdomain, ext.domain, ext.suffix ])
print("FQDN: {}".format(fqdn))
a_record = self.resolve(fqdn, nameservers, 'A')
print("A record: {}".format(a_record))
resolved_url = url.replace(fqdn, a_record) # NOTE: Replace first occurrence only
print("Resolved URL: {}".format(resolved_url))
return super().get_connection(resolved_url, proxies=proxies)
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: {} <url>".format(sys.argv[0]))
sys.exit(0)
url = sys.argv[1]
nameservers = [
'208.67.222.222', # NOTE: OpenDNS
'8.8.8.8' # NOTE: Google
]
session = requests.Session()
session.mount(url, CustomAdapter(nameservers))
parameters = {
# "headers": {'Content-Type': 'application/json'},
# "timeout" : 45,
# "stream" : True
# "proxies" : {
# "http": "http://your_http_proxy:8080/",
# "https": "http://your_https_proxy:8081/"
# },
# "auth": (name, password),
# ...
}
response = session.get(url, **parameters)
pp(response.__dict__)
And here it the console output :
$ ./run.py http://www.test.com
FQDN: www.test.com
A record: 69.172.200.235
Resolved URL: http://69.172.200.235/
{'_content': b'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3'
b'.org/TR/html4/strict.dtd">\n<html>\n<head>\n<meta http-equiv="C'
b'ontent-Type" content="text/html; charset=iso-8859-1">\n<meta '
b'http-equiv="Content-Script-Type" content="text/javascript">\n'
b'<script type="text/javascript">\nfunction getCookie(c_name) {'
b' // Local function for getting a cookie value\n if (docume'
b'nt.cookie.length > 0) {\n c_start = document.cookie.in'
b'dexOf(c_name + "=");\n if (c_start!=-1) {\n c_st'
b'art=c_start + c_name.length + 1;\n c_end=document.cook'
b'ie.indexOf(";", c_start);\n\n if (c_end==-1) \n '
b' c_end = document.cookie.length;\n\n return unescape('
b'document.cookie.substring(c_start,c_end));\n }\n }\n '
b' return "";\n}\nfunction setCookie(c_name, value, expiredays'
b') { // Local function for setting a value of a cookie\n va'
b'r exdate = new Date();\n exdate.setDate(exdate.getDate()+e'
b'xpiredays);\n document.cookie = c_name + "=" + escape(valu'
b'e) + ((expiredays==null) ? "" : ";expires=" + exdate.toGMTString'
b'()) + ";path=/";\n}\nfunction getHostUri() {\n var loc = doc'
b"ument.location;\n return loc.toString();\n}\nsetCookie('YPF8"
b"827340282Jdskjhfiw_928937459182JAX666', '171.68.244.56', 10)"
b';\ntry { \n location.reload(true); \n} catch (err1) { \n '
b' try { \n location.reload(); \n } catch (err2) { '
b' \n \tlocation.href = getHostUri(); \n } \n}\n</scrip'
b't>\n</head>\n<body>\n<noscript>This site requires JavaScript an'
b'd Cookies to be enabled. Please change your browser settings or '
b'upgrade your browser.</noscript>\n</body>\n</html>\n',
'_content_consumed': True,
'_next': None,
'connection': <requests.adapters.HTTPAdapter object at 0x109130e48>,
'cookies': <RequestsCookieJar[]>,
'elapsed': datetime.timedelta(microseconds=992676),
'encoding': 'ISO-8859-1',
'headers': {'Server': 'nginx/1.14.2', 'Date': 'Wed, 01 May 2019 18:01:58 GMT', 'Content-Type': 'text/html', 'Transfer-Encoding': 'chunked', 'Connection': 'keep-alive', 'Keep-Alive': 'timeout=20', 'X-DIS-Request-ID': '2a5057a7c7b8a93dd700856c48fda74a', 'P3P': 'CP="NON DSP COR ADMa OUR IND UNI COM NAV INT"', 'Cache-Control': 'no-cache', 'Content-Encoding': 'gzip'},
'history': [<Response [302]>],
'raw': <urllib3.response.HTTPResponse object at 0x1095b90b8>,
'reason': 'OK',
'request': <PreparedRequest [GET]>,
'status_code': 200,
'url': 'https://www.test.com/'}
Hope this helps.

Yahoo BOSS V2 authorization troubles

I'm having an awfully hard time with Yahoo's authentication/authorization. I've enabled BOSS in my account, set up a payment method, and now I'm trying to run a search using some python code:
import urllib2
import oauth2 as oauth
import time
OAUTH_CONSUMER_KEY = "blahblahblah"
OAUTH_CONSUMER_SECRET = "blah"
def oauth_request(url, params, method="GET"):
params['oauth_version'] = "1.0",
params['oauth_nonce'] = oauth.generate_nonce(),
params['oauth_timestamp'] = int(time.time())
consumer = oauth.Consumer(key=OAUTH_CONSUMER_KEY,
secret=OAUTH_CONSUMER_SECRET)
params['oauth_consumer_key'] = consumer.key
req = oauth.Request(method=method, url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
return req
if __name__ == "__main__":
url = "http://yboss.yahooapis.com/ysearch/web"
req = oauth_request(url, params={"q": "cats dogs"})
req_url = req.to_url()
print req_url
result = urllib2.urlopen(req_url)
I keep getting a urllib2.HTTPError: HTTP Error 401: Unauthorized exception. I can't figure out whether there's something wrong with my key, or the method of signing, or if I'm somehow tampering with my data after signing, or what the deal is. Anyone have suggestions?
I made some small changes to make your example work. See code for comments.
import urllib2
import oauth2 as oauth
import time
OAUTH_CONSUMER_KEY = "blahblahblah"
OAUTH_CONSUMER_SECRET = "blah"
def oauth_request(url, params, method="GET"):
# Removed trailing commas here - they make a difference.
params['oauth_version'] = "1.0" #,
params['oauth_nonce'] = oauth.generate_nonce() #,
params['oauth_timestamp'] = int(time.time())
consumer = oauth.Consumer(key=OAUTH_CONSUMER_KEY,
secret=OAUTH_CONSUMER_SECRET)
params['oauth_consumer_key'] = consumer.key
req = oauth.Request(method=method, url=url, parameters=params)
req.sign_request(oauth.SignatureMethod_HMAC_SHA1(), consumer, None)
return req
if __name__ == "__main__":
url = "http://yboss.yahooapis.com/ysearch/web"
req = oauth_request(url, params={"q": "cats dogs"})
# This one is a bit nasty. Apparently the BOSS API does not like
# "+" in its URLs so you have to replace "%20" manually.
# Not sure if the API should be expected to accept either.
# Not sure why to_url does not just return %20 instead...
# Also, oauth2.Request seems to store parameters as unicode and forget
# to encode to utf8 prior to percentage encoding them in its to_url
# method. However, it's handled correctly for generating signatures.
# to_url fails when query parameters contain non-ASCII characters. To
# work around, manually utf8 encode the request parameters.
req['q'] = req['q'].encode('utf8')
req_url = req.to_url().replace('+', '%20')
print req_url
result = urllib2.urlopen(req_url)
Here is a Python code snippet that works for me against Yahoo! BOSS:
import httplib2
import oauth2
import time
OAUTH_CONSUMER_KEY = "Blah"
OAUTH_CONSUMER_SECRET = "Blah"
if __name__ == "__main__":
url = "http://yboss.yahooapis.com/ysearch/web?q=cats%20dogs"
consumer = oauth2.Consumer(key=OAUTH_CONSUMER_KEY,secret=OAUTH_CONSUMER_SECRET)
params = {
'oauth_version': '1.0',
'oauth_nonce': oauth2.generate_nonce(),
'oauth_timestamp': int(time.time()),
}
oauth_request = oauth2.Request(method='GET', url=url, parameters=params)
oauth_request.sign_request(oauth2.SignatureMethod_HMAC_SHA1(), consumer, None)
oauth_header=oauth_request.to_header(realm='yahooapis.com')
# Get search results
http = httplib2.Http()
resp, content = http.request(url, 'GET', headers=oauth_header)
print resp
print content
Im using an Authenticate Header to submit the OAuth signature.
So I decided to ditch Python and try Perl, and it Just Worked. Here's a minimal code sample:
use strict;
use Net::OAuth;
use LWP::UserAgent;
my $CC_KEY = "blahblahblah";
my $CC_SECRET = "blah";
my $url = 'http://yboss.yahooapis.com/ysearch/web';
print make_request($url, {q => "cat dog", format => "xml", count => 5});
sub make_request {
my ($url, $args) = #_;
my $request = Net::OAuth->request("request token")
->new(
consumer_key => $CC_KEY,
consumer_secret => $CC_SECRET,
request_url => $url,
request_method => 'GET',
signature_method => 'HMAC-SHA1',
timestamp => time,
nonce => int(rand 10**6),
callback => 'oob',
extra_params => $args,
protocol_version => Net::OAuth::PROTOCOL_VERSION_1_0A,
);
$request->sign;
my $res = LWP::UserAgent->new(env_proxy=>1)->get($request->to_url);
return $res->content if $res->is_success;
die $res->status_line;
}
Here's another solution, this time back in python-land. This was put together by Tom De Smedt, author of the Pattern web-mining kit.
I'll communicate with the author of python-oauth2 to see if it can be fixed.
OAUTH_CONSUMER_KEY = "blahblahblah"
OAUTH_CONSUMER_SECRET = "blah"
import urllib
import hmac
import time
import random
import base64
try:
from hashlib import sha1
from hashlib import md5
except:
import sha as sha1
import md5; md5=md5.new
def hmac_sha1(key, text):
return hmac.new(key, text, sha1).digest()
def oauth_nonce(length=40):
h = "".join([str(random.randint(0, 9)) for i in range(length)])
h = md5(str(time.time()) + h).hexdigest()
return h
def oauth_timestamp():
return str(int(time.time()))
def oauth_encode(s):
return urllib.quote(s, "~")
def oauth_signature(url, data={}, method="get", secret="", token=""):
# Signature base string: http://tools.ietf.org/html/rfc5849#section-3.4.1
base = oauth_encode(method.upper()) + "&"
base += oauth_encode(url.rstrip("?")) + "&"
base += oauth_encode("&".join(["%s=%s" % (k, v) for k, v in sorted(data.items())]))
# HMAC-SHA1 signature algorithm: http://tools.ietf.org/html/rfc5849#section-3.4.2
signature = hmac_sha1(oauth_encode(secret) + "&" + token, base)
signature = base64.b64encode(signature)
return signature
q = "cat"
url = "http://yboss.yahooapis.com/ysearch/" + "web" # web | images | news
data = {
"q": q,
"start": 0,
"count": 50, # 35 for images
"format": "xml",
"oauth_version": "1.0",
"oauth_nonce" : oauth_nonce(),
"oauth_timestamp" : oauth_timestamp(),
"oauth_consumer_key" : OAUTH_CONSUMER_KEY,
"oauth_signature_method" : "HMAC-SHA1",
}
data["oauth_signature"] = oauth_signature(url, data, secret=OAUTH_CONSUMER_SECRET)
complete_url = url + "?" + urllib.urlencode(data)
response = urllib.urlopen(complete_url)
print response.read()
Here is sample code to access Yahoo! BOSS API v2 using with python-oauth as oauth liberary.
OAUTH_CONSUMER_KEY = "<oauth consumer key>"
OAUTH_CONSUMER_SECRET = "<oauth consumer secret>"
URL = "http://yboss.yahooapis.com/ysearch/web"
import urllib
import oauth.oauth as oauth
data = {
"q": "yahoo boss search",
"start":0,
"count":2,
"format":"json"
}
consumer = oauth.OAuthConsumer(OAUTH_CONSUMER_KEY, OAUTH_CONSUMER_SECRET)
signature_method_plaintext = oauth.OAuthSignatureMethod_PLAINTEXT()
signature_method_hmac_sha1 = oauth.OAuthSignatureMethod_HMAC_SHA1()
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer, token=None, http_method='GET', http_url=URL, parameters=data)
oauth_request.sign_request(signature_method_hmac_sha1, consumer, "")
complete_url = oauth_request.to_url()
response = urllib.urlopen(complete_url)
print "REQUEST URL => %s" % complete_url
print ""
print "RESPONSE =>"
print response.read()
I stepped into the urllib2.open code using the debugger, and found that the response has this header:
WWW-Authenticate: OAuth oauth_problem="version_rejected", realm="yahooapis.com"
So I guess I'm having some kind of version mismatch of OAuth.

Categories