HP ALM 12 rest api help in python - python

For HP ALM 12.20.3264.
Using python 2.7.9 - wish to complete an automation task. For which - need to achieve below tasks:
Connect HP ALM  Success . Please find below success message.
Log :(u'Open ALM session success', u'AUTH URL:', u'https://abcdefgh.com/qcbin/authentication-point/authenticate', u'HEADERS:', {'Cookie': None, 'Content-Type': 'application/xml', 'Accept': 'application/xml', 'KeepAlive': 'true'})
Get test case name information from Testing -> TestLab  Failed with below error:
(u'[ALMSession] Get ALM function with errors', 401, "Authentication failed. Browser based integrations - to login append '?login-form-required=y' to the url you tried to access.", u'PATH:', u'https://abcdefgh.com/qcbin/rest/domains/CORE_PRODUCTS/projects/NEO/Testing/TestLab', u'HEADERS:', {'Cookie': 'LWSSO_COOKIE_KEY=kFvs5DG2lK918ErK8Kf11u1bua_1bjLYpuPxw-1QCLBd3Pu4DoXZzCoVjuzMckASy-_87uA-5hGBnLd-atrhiMaRxkD2Ed79frDzx-qzWCCw-V0lSeWOXTWt57L-HdA9ZzWb3biMqaEnEdQvokPZteJKSgsXyMVqqRQgUrj3bB-ybLNuWngycagsTkLGnshoaNdqGaW6H_UVu7tOsNQxK2on3rMrbnqe2UrP6gPzyViBMPKFPRvuwhb_bsgPF8L3GdfWTbKg7u5Fz6cxq_eerwe2G8PrwFe2PzRC5D2VCHyxxAvk4trI4eUx4U5cVMPZ;Path=/;HTTPOnly', 'Content-Type': 'application/xml', 'Accept': 'application/xml', 'KeepAlive': 'true'})
Update test case state- pass/Fail ==> Yet to Implement
Could you please help understanding the restful api’s exposed by HP ALM 12.
Below is my sample Python script:
import requests
import xml.etree.ElementTree as ET
class ALMUrl:
def __init__(self, qcurl, domain, project):
self.__base = u'https://' + qcurl + u'/qcbin'
self.__auth = self.__base + u'/authentication-point/authenticate'
self.__logout = self.__base + u'/authentication-point/logout'
self.__work = self.__base + u'/rest/domains/' + domain + u'/projects/' + project
def get_auth(self):
return self.__auth
def get_logout(self):
return self.__logout
def __getattr__(self, *args):
result = self.__work
for arg in args:
result += '/' + arg
return result
class ALMSession:
def __init__(self, login, password):
try:
self.__headers = {"Accept":"application/xml",
"Content-Type":"application/xml",
"KeepAlive":"true",
"Cookie": None}#"Authorization":"Basic " + base64.b64encode(login + ':' + password)}
self.__user_pass = (login, password)
except:
print(u"Exception while creating ALMSession", self.__headers, self.__h)
def Open(self, ALMUrl):
#head, context = self.__h.request(ALMUrl.get_auth(), "GET", headers=self.__headers)
r = requests.get(ALMUrl.get_auth(), auth=self.__user_pass)
#if head.status is 200:
if r.status_code is 200:
print(u"Open ALM session success", u'AUTH URL:', ALMUrl.get_auth(), u'HEADERS:', self.__headers)
self.__headers["Cookie"] = r.headers['set-cookie']
return 0
else:
print(u"Open ALM session", r.status_code, r.reason, u'AUTH URL:', ALMUrl.get_auth(), u'HEADERS:', self.__headers)
return int(r.status_code)
def Close(self, ALMUrl):
if self.__headers["Cookie"] is not None:
r = requests.get(ALMUrl.get_logout(), headers=self.__headers, auth=self.__user_pass)
if r.status_code is 200:
print(u"Close ALM session success", u'LOGOUT URL:', ALMUrl.get_logout(), u'HEADERS:', self.__headers)
return 0
else:
print(u"Close ALM session", r.status_code, r.reason, u'LOGOUT URL:', ALMUrl.get_logout(), u'HEADERS:', self.__headers)
return int(r.status_code)
else:
print(u"Close ALM session", u"1", u"httplib2.Http was not initialized")
return 1
def Get(self, ALMUrl, *args):
if self.__headers["Cookie"] is not None:
r = requests.get(ALMUrl.__getattr__(*args), headers=self.__headers, auth=self.__user_pass)
if r.status_code == 200:
print(u"[ALMSession] Get success", u"URL:", ALMUrl.__getattr__(*args), u"HEADERS:", self.__headers)
res = []
self.parse_xml(r.content, res)
return 0, res
elif r.status_code == 500:
try:
if isinstance(r.text, unicode):
exceptionxml = ET.fromstring(r.text.encode('utf8','ignore'))
else:
exceptionxml = ET.fromstring(r.text)
print(u"[ALMSession] Get ALM function with errors", exceptionxml[0].text, exceptionxml[1].text, u"PATH:", ALMUrl.__getattr__(*args), u"HEADERS:", self.__headers)
except ET.ParseError:
print(u"[ALMSession] Get ALM function with errors, returned message is not XML", u"PATH:", ALMUrl.__getattr__(*args), u"HEADERS:", self.__headers, ET.ParseError.message)
return int(r.status_code), None
else:
print(u"[ALMSession] Get ALM function with errors", r.status_code, r.reason, u"PATH:", ALMUrl.__getattr__(*args), u"HEADERS:", self.__headers)
return int(r.status_code), None
else:
print(u"[ALMSession] Get ALM function with errors", u"1", u"httplib2.Http not initialized")
return 1, None
def Update(self, ALMUrl, data, *args):
if self.__headers["Cookie"] is not None:
r = requests.put(ALMUrl.__getattr__(*args),
headers=self.__headers,
data=data,
auth=self.__user_pass)
if r.status_code == 200:
print(u"[ALMSession] Update success", u"URL:", ALMUrl.__getattr__(*args))
return 0
elif r.status_code == 500:
if isinstance(r.text, unicode):
exceptionxml = ET.fromstring(r.text.encode('utf8','ignore'))
else:
exceptionxml = ET.fromstring(r.text)
print(u"[ALMSession] Update ALM function with errors", exceptionxml[0].text, exceptionxml[1].text, u"PATH:", ALMUrl.__getattr__(*args), u"DATA:", data, u"HEADERS:", self.__headers)
return int(r.status_code)
else:
print(u"[ALMSession] Update ALM function with errors", r.status_code, r.reason, u"PATH:", ALMUrl.__getattr__(*args), u"DATA:", data, u"HEADERS:", self.__headers)
return int(r.status_code)
else:
print(u"[ALMSession] Update ALM function with errors", u"1", u"httplib2.Http not initialized")
return 1
if __name__ == '__main__':
qcurl = "almint.eu.abc.com"
qcuname = "abc"
qcpwd = "acb"
qcdomain = "CORE_PRODUCTS"
qcproject = "NEO"
objALMUrl = ALMUrl(qcurl,qcdomain,qcproject)
objALMSession = ALMSession(qcuname,qcpwd)
objALMSession.Open(objALMUrl)
objALMSession.Get(objALMUrl,"Testing/TestLab")
objALMSession.Close(objALMUrl)

Below code covers most of your requirement. In short, this code takes the output of protractor test from Jenkins and create test set (If not exist) in HP ALM and update the test status and attaches the report.
To know the list of end points, enter the following in your favourite browser
<<ALM_SERVER>>/qcbin/rest/resouce-list
To understand the limitations and the schema details, GoTo Help in HP ALM.
import re
import json
import requests
import datetime
import time
import sys
from requests.auth import HTTPBasicAuth
protractor_result_file = './combined_result.json'
almUserName = ""
almPassword = ""
almDomain = ""
almProject = ""
almURL = "https://---/qcbin/"
authEndPoint = almURL + "authentication-point/authenticate"
qcSessionEndPoint = almURL + "rest/site-session"
qcLogoutEndPoint = almURL + "authentication-point/logout"
midPoint = "rest/domains/" + almDomain + "/projects/"
mydate = datetime.datetime.now()
testSetName = ""
assignmentGroup = ""
parser_temp_dic = {}
cookies = dict()
headers = {
'cache-control': "no-cache"
}
'''
Function : alm_login
Description : Authenticate user
Parameters : global parameter
alm_username - ALM User
alm_password - ALM Password
'''
def alm_login():
response = requests.post(authEndPoint, auth=HTTPBasicAuth(almUserName, almPassword), headers=headers)
if response.status_code == 200:
cookieName = response.headers.get('Set-Cookie')
LWSSO_COOKIE_KEY = cookieName[cookieName.index("=") + 1: cookieName.index(";")]
cookies['LWSSO_COOKIE_KEY'] = LWSSO_COOKIE_KEY
response = requests.post(qcSessionEndPoint, headers=headers, cookies=cookies)
if response.status_code == 200 | response.status_code == 201:
cookieName = response.headers.get('Set-Cookie').split(",")[1]
QCSession = cookieName[cookieName.index("=") + 1: cookieName.index(";")]
cookies['QCSession'] = QCSession
return
'''
Function : alm_logout
Description : terminate user session
Parameters : No Parameters
'''
def alm_logout():
response = requests.post(qcLogoutEndPoint, headers=headers, cookies=cookies)
print(response.headers.get('Expires'))
return
'''
Function : parse_result
Description : Parse protractor result file
Parameters : No Parameters
'''
def parse_result():
try:
f = open(protractor_result_file, 'r')
except (FileNotFoundError) as err:
print("File Not found error: {0}".format(err))
return
obj = json.load(f)
test_set_id = find_test_set(find_test_set_folder(testSetPath), "test-sets")
test_instance_data = "<Entities>"
test_instance_data_put = "<Entities>"
test_step_data = "<Entities>"
# Get all the test id's if test plan folder exists already
test_plan_details = find_test_plan_folder(testPlanPath)
payload = {"query": "{test-folder.hierarchical-path['" + test_plan_details["hierarchical-path"] + "*']}",
"fields": "id,name,steps", "page-size": 5000}
response = requests.get(almURL + midPoint + "/tests", params=payload, headers=headers, cookies=cookies)
all_tests = json.loads(response.text)
# Get all test instance if test set exists already
str_api = "test-instances"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": "test-id", "page-size": 5000}
response = requests.get(almURL + midPoint + "/" + str_api, params=payload, headers=headers, cookies=cookies)
all_test_instance = json.loads(response.text)
test_order = 0
for spec in obj:
for testSuite in spec:
if len(spec[testSuite]['specs']) > 0:
for test in spec[testSuite]['specs']:
outputTestName = re.sub('[^A-Za-z0-9\s]+', '', test['fullName']).strip()
# Check if the test case already exits in test plan
test_details = test_exists(outputTestName, all_tests)
test_case_exists = True
if len(test_details) == 0:
test_case_exists = False
if test_case_exists is True:
parser_temp_dic[int(test_details['id'])] = {'status': []}
# Check if test instance exists in test set
test_instance_exists = True
if test_case_exists == True:
parser_temp_dic[int(test_details['id'])]['status'].append(test['status'].capitalize())
if len(test_exists(test_details['id'], all_test_instance)) == 0:
test_instance_exists = False
if test_instance_exists is False and test_case_exists is True:
test_order += 1
test_instance_data = test_instance_data + "<Entity Type=" + chr(34) + "test-instance" + chr(
34) + "><Fields><Field Name=" + chr(
34) + "owner" + chr(34) + "><Value>" + almUserName + "</Value></Field><Field Name=" + chr(
34) + "subtype-id" + chr(
34) + "><Value>hp.qc.test-instance.MANUAL</Value></Field><Field Name=" + chr(
34) + "test-order" + chr(34) + "><Value>" + str(
test_order) + "</Value></Field><Field Name=" + chr(
34) + "cycle-id" + chr(
34) + "><Value>" + test_set_id + "</Value></Field><Field Name=" + chr(
34) + "test-id" + chr(34) + "><Value>" + str(
test_details['id']) + "</Value></Field></Fields></Entity>"
template_in = "{\"Type\": \"test-instance\", \"Fields\": [{\"Name\": \"id\", \"values\": [{\"value\"" \
": \"675\"}]}, {\"Name\": \"test-id\", \"values\": [{\"value\": \"" + str(
test_details['id']) + "\"}]}]}"
all_test_instance['entities'].append(json.loads(template_in))
bulk_operation_post("test-instances", test_instance_data + "</Entities>", True, "POST")
strAPI = "test-instances"
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": "id,test-id,test-config-id,cycle-id",
"page-size": 5000}
response = requests.get(almURL + midPoint + "/" + strAPI, params=payload, headers=headers, cookies=cookies)
obj = json.loads(response.text)
run_instance_post = "<Entities>"
for entity in obj["entities"]:
run_name = re.sub('[-:]', '_',
'automation_' + datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
temp_map = create_key_value(entity["Fields"])
parser_temp_dic[int(temp_map['test-id'])]['testcycl-id'] = temp_map['id']
parser_temp_dic[int(temp_map['test-id'])]['test-config-id'] = temp_map['test-config-id']
parser_temp_dic[int(temp_map['test-id'])]['test-id'] = temp_map['test-id']
parser_temp_dic[int(temp_map['test-id'])]['cycle-id'] = temp_map['cycle-id']
# parser_temp_dic[int(temp_map['test-id'])]['status'].sort()
status = "Passed"
if 'Failed' in parser_temp_dic[int(temp_map['test-id'])]['status']:
status = 'Failed'
parser_temp_dic[int(temp_map['test-id'])]['final-status'] = status
run_instance_post = run_instance_post + "<Entity Type=" + chr(34) + "run" + chr(
34) + "><Fields><Field Name=" + chr(
34) + "name" + chr(34) + "><Value>" + run_name + "</Value></Field><Field Name=" + chr(34) + "owner" + chr(
34) + "><Value>" + almUserName + "</Value></Field><Field Name=" + chr(34) + "test-instance" + chr(
34) + "><Value>1</Value></Field><Field Name=" + chr(34) + "testcycl-id" + chr(34) + "><Value>" + str(
temp_map['id']) + "</Value></Field><Field Name=" + chr(34) + "cycle-id" + chr(34) + "><Value>" + str(
temp_map['cycle-id']) + "</Value></Field><Field Name=" + chr(34) + "status" + chr(
34) + "><Value>" + "Not Completed" + "</Value></Field><Field Name=" + chr(34) + "test-id" + chr(
34) + "><Value>" + temp_map['test-id'] + "</Value></Field><Field Name=" + chr(34) + "subtype-id" + chr(
34) + "><Value>hp.qc.run.MANUAL</Value></Field></Fields></Entity>"
bulk_operation_post("runs", run_instance_post + "</Entities>", True, "POST")
# ("*************\tRUNS\t*********************")
payload = {"query": "{cycle-id['" + test_set_id + "']}", "fields": "id,test-id", "page-size": 5000}
response = requests.get(almURL + midPoint + "/runs", params=payload, headers=headers, cookies=cookies)
obj = json.loads(response.text)
run_ids = []
run_instance_put = "<Entities>"
for entity in obj["entities"]:
if len(entity["Fields"]) != 1:
temp_map = create_key_value(entity["Fields"])
parser_temp_dic[int(temp_map['test-id'])]['run-id'] = temp_map['id']
run_ids.append(temp_map['id'])
status = parser_temp_dic[int(temp_map['test-id'])]['final-status']
run_instance_put = run_instance_put + "<Entity Type=" + chr(34) + "run" + chr(
34) + "><Fields><Field Name=" + chr(
34) + "id" + chr(34) + "><Value>" + str(temp_map['id']) + "</Value></Field><Field Name=" + chr(
34) + "testcycl-id" + chr(34) + "><Value>" + str(
parser_temp_dic[int(temp_map['test-id'])]['testcycl-id']) + "</Value></Field><Field Name=" + chr(
34) + "status" + chr(
34) + "><Value>" + status + "</Value></Field></Fields></Entity>"
bulk_operation_post("runs", run_instance_put + "</Entities>", True, "PUT")
# Upload result file
payload = open("./screenshots/combined_result.html", 'rb')
headers['Content-Type'] = "application/octet-stream"
headers['slug'] = "protractor-test-results.html"
response = requests.post(almURL + midPoint + "/" + "test-sets/" + str(test_set_id) + "/attachments/",
cookies=cookies, headers=headers,
data=payload)
return
'''
Function : find_test_set_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
def find_test_set_folder(test_set_path):
json_str = json.loads(find_folder_id(test_set_path.split("\\"), "test-set-folders", 0, "id"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])['id']
else:
return create_key_value(json_str['Fields'])['id']
'''
Function : find_test_set
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
def find_test_set(test_set_folder_id, strAPI):
payload = {"query": "{name['" + testSetName + "'];parent-id[" + str(test_set_folder_id) + "]}", "fields": "id"}
response = requests.get(almURL + midPoint + "/" + strAPI, params=payload, headers=headers, cookies=cookies)
obj = json.loads(response.text)
parentID = ""
if obj["TotalResults"] >= 1:
parentID = get_field_value(obj['entities'][0]['Fields'], "id")
# print("test set id of " + testSetName + " is " + str(parentID))
else:
# print("Folder " + testSetName + " does not exists")
data = "<Entity Type=" + chr(34) + strAPI[0:len(strAPI) - 1] + chr(34) + "><Fields><Field Name=" + chr(
34) + "name" + chr(
34) + "><Value>" + testSetName + "</Value></Field><Field Name=" + chr(34) + "parent-id" + chr(
34) + "><Value>" + str(test_set_folder_id) + "</Value></Field><Field Name=" + chr(34) + "subtype-id" + chr(
34) + "><Value>hp.qc.test-set.default</Value></Field> </Fields> </Entity>"
response = requests.post(almURL + midPoint + "/" + strAPI, data=data, headers=headers, cookies=cookies)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parentID = get_field_value(obj['Fields'], "id")
# print("test set id of " + testSetName + " is " + str(parentID))
return parentID
'''
Function : find_test_plan_folder
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
def find_test_plan_folder(test_plan_path):
json_str = json.loads(find_folder_id(test_plan_path.split("\\"), "test-folders", 2, "id,hierarchical-path"))
if 'entities' in json_str:
return create_key_value(json_str['entities'][0]['Fields'])
else:
return create_key_value(json_str['Fields'])
'''
Function : find_folder_id
Description : This sends a couple of http request and authenticate the user
Parameters : 1 Parameter
test_set_path - ALM test set path
'''
def find_folder_id(arrFolder, strAPI, parentID, fields):
response = ""
for folderName in arrFolder:
payload = {"query": "{name['" + folderName + "'];parent-id[" + str(parentID) + "]}", "fields": fields}
response = requests.get(almURL + midPoint + "/" + strAPI, params=payload, headers=headers, cookies=cookies)
obj = json.loads(response.text)
if obj["TotalResults"] >= 1:
parentID = get_field_value(obj['entities'][0]['Fields'], "id")
# print("folder id of " + folderName + " is " + str(parentID))
else:
# print("Folder " + folderName + " does not exists")
data = "<Entity Type=" + chr(34) + strAPI[0:len(strAPI) - 1] + chr(34) + "><Fields><Field Name=" + chr(
34) + "name" + chr(
34) + "><Value>" + folderName + "</Value></Field><Field Name=" + chr(34) + "parent-id" + chr(
34) + "><Value>" + str(parentID) + "</Value></Field></Fields> </Entity>"
response = requests.post(almURL + midPoint + "/" + strAPI, data=data, headers=headers, cookies=cookies)
obj = json.loads(response.text)
if response.status_code == 200 | response.status_code == 201:
parentID = get_field_value(obj['Fields'], "id")
# print("folder id of " + folderName + " is " + str(parentID))
return response.text
'''
Function : get_field_value
Description : Find the value of matching json key
Parameters : 2 Parameters
obj - JSON object
field_name - JSON KEY
'''
def get_field_value(obj, field_name):
for field in obj:
if field['Name'] == field_name:
return field['values'][0]['value']
'''
Function : findTestCase
Description : Check if given test case exists, if not create one
Parameters : 3 parameters
str_api - End point name
str_test_name - Name of the test case
parent_id - Test Plan folder id
'''
def test_exists(str_test_name, obj_json):
str_exists = ''
for test in obj_json['entities']:
almtestname = re.sub('[^A-Za-z0-9\s_]+', '', test['Fields'][1]['values'][0]['value'].replace("_", " ")).strip()
if almtestname == str_test_name:
return create_key_value(test['Fields'])
return str_exists
'''
Function : Post Test Case / Test Instance
Description : Generic function to post multiple entities. Make sure to build the data in correct format
Parameters : 3 parameters
str_api - End point name
data - Actual data to post
bulk_operation - True or False
'''
def bulk_operation_post(str_api, data, bulk_operation, request_type):
response = ""
try:
if bulk_operation:
headers['Content-Type'] = "application/xml;type = collection"
if request_type == 'POST':
response = requests.post(almURL + midPoint + "/" + str_api, data=data, headers=headers, cookies=cookies)
elif request_type == 'PUT':
response = requests.put(almURL + midPoint + "/" + str_api, data=data, headers=headers, cookies=cookies)
finally:
headers['Content-Type'] = "application/xml"
if response.status_code == 200 | response.status_code == 201:
return response.text
return response
'''
Function : remove_special_char
Description : Function to remove non-acceptable characters
Parameters : 1 parameter
str_input - input string
'''
def remove_special_char(str_input):
return re.sub('[^A-Za-z0-9\s_-]+', '', str_input).strip()
'''
Function : create_key_value
Description : Function to generate key-value pair from json
Parameters : 1 parameter
obj_json - JSON Object
'''
def create_key_value(obj_json):
final_dic = {}
for elem in obj_json:
if len(elem['values']) >= 1:
if 'value' in elem['values'][0]:
final_dic[elem["Name"]] = elem["values"][0]['value']
return final_dic
'''
'''
'''
CORE FUNCTION
'''
def update_results_alm():
try:
alm_login()
headers['Accept'] = "application/json"
headers['Content-Type'] = "application/xml"
parse_result()
finally:
alm_logout()
if len(sys.argv) - 1 != 4:
print('Build number is required.You have passed :', str(sys.argv), 'arguments.')
else:
testSetName = sys.argv[1]
testPlanPath = sys.argv[2]
testSetPath = sys.argv[3]
almProject = sys.argv[4]
print(testSetName + "\n" + testPlanPath + "\n" + testSetPath + "\n" + almProject)
midPoint += almProject
update_results_alm()

You can find the code here
https://github.com/arunprabusamy/Python-Libraries/blob/main/alm_RestAPI/almLib.py
Only three parameters required - Test Set ID (Cycle ID), ALM Test ID, Execution Status. Uses JSON payload and simple.

Related

Using data from API in subsequent API calls

I should preface this with I am not a programmer and most of this code was not written by me. I unfortunately have a need and am trying to hack my way through this.
What I am trying to do is chain a few API calls together to ultimately get a list of IPs. What this script does is queries the API and pulls (and prints) a list of device IDs. The device IDs look like this:
akdjlfijoaidjfod
g9jkidfjlskdjf44
3jdhfj4hf9dfiiu4
The device IDs then need to be passed as a parameter in the next API call like this:
https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4 and so on.
I dont know where to begin. Instead of printing the asset ids, I assume they should be stored as a parameter (or variable) and then appended to the URL. I tried doing that with "ID_LIST" but that didnt seem to work. Can you guys point me in the right direction?
import requests
import json
# Define API REST paths
BASE_URL = "https://api.example.com/"
OAUTH_URL_PART = "oauth2/token"
DEVICE_SEARCH = "devices/queries/devices/v1"
DEVICE_DETAILS = "devices/entities/devices/v1"
# Empty auth token to hold value for subsequent request
auth_Token = ""
# Section 1 - Authenticate to Example OAUTH
# Build a dictionary to hold the headers
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'
}
# Build a dictionary to holds the authentication data to be posted to get a token
auth_creds = {}
auth_creds['client_id'] = "<client_id>"
auth_creds['client_secret'] = "<client_secret>"
auth_creds['grant_type'] = "client_credentials"
# Call the API to get a Authentication token - NOTE the authentication creds
print("Requesting token from " + BASE_URL + OAUTH_URL_PART)
auth_response = requests.post(BASE_URL + OAUTH_URL_PART,data=auth_creds, headers=headers)
# Check if successful
if auth_response.status_code != 201:
# Output debug information
print("\n Return Code: " + str(auth_response.status_code) + " " + auth_response.reason)
print("Path: " + auth_response.request.path_url)
print("Headers: ")
print(auth_response.request.headers)
print("Body: " + auth_response.request.body)
print("\n")
print("Trace_ID: " + auth_response.json()['meta']['trace_id'])
else:
# Section 2 - Capture OAUTH token and store in headers for later use
print("Token Created")
# Capture the auth token for reuse in subsequent calls, by pulling it from the response
# Note this token can be reused multiple times until it expires after 30 mins
auth_Token = auth_response.json()['access_token']
headers = {
'authorization':'bearer ' + auth_Token,
'accept': 'application/json'
}
# Section 3 - Reuse authentication token to call other Example OAUTH2 APIs
# Build parameter dictionary
call_params = {}
call_params['offset'] ="" # Non-mandatory param
call_params['limit'] ="5000" # The number of results
call_params['sort'] ="" #
call_params['filter'] ="" # To exclude devices
# Call devices API
print("Searching Asset ID by getting from " + BASE_URL + DEVICE_SEARCH)
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
#DEVICE_DETAILS_response = request.get(BASE_URL + DEVICE_DETAILS,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
###########Part that is not working###########
DEVICE_DETAILS_response = requests.get(BASE_URL + DEVICE_DETAILS,headers=headers)
#ID_LIST = str(len(result['resources']).replace(",", "&ids=")
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)
Hi to convert the strings in result['resources']:
['akdjlfijoaidjfod',
'g9jkidfjlskdjf44',
'3jdhfj4hf9dfiiu4']
to : https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4
try this funciton:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
print(get_modified_url(result['resources'], BASE_URL + DEVICE_DETAILS ))
full code would be:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
device_list = []
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
device_list.append(devices)
new_url = get_modified_url(device_list, BASE_URL + DEVICE_DETAILS )
DEVICE_DETAILS_response = requests.get(new_url, headers=headers)
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)

Error when connecting to azure-datalakes using continuation token

Im currently trying list files/directories inside of adls2 using a continuation token (currently our folder has over 5000 files). I am able to send my first request, however receive a 403 error (presumably meaning incorrect formatting) when trying to connect with the continuation token in the response, and unsure what formatting problems could be causing this error.
I have currently tried removing the = sign at the end of the key for the uri, seeing as that was the problem for someone else. I had also tried creating a header for the continuation inside of my request with no luck.
adls_request is the main function. This gets run twice, once for the initial request, second for the continuation. Currently I have the continuation set up inside the uri and signature.
def gen_signature(request_time, api_version, storage_account_name, file_system_name, storage_account_key, signature_params):
string_params = {
'verb': 'GET',
'Content-Encoding': '',
'Content-Language': '',
'Content-Length': '',
'Content-MD5': '',
'Content-Type': '',
'Date': '',
'If-Modified-Since': '',
'If-Match': '',
'If-None-Match': '',
'If-Unmodified-Since': '',
'Range': '',
'CanonicalizedHeaders': 'x-ms-date:' + request_time + '\nx-ms-version:' + api_version,
'CanonicalizedResource': '/' + storage_account_name+'/'+file_system_name+signature_params
}
string_to_sign = (string_params['verb'] + '\n'
+ string_params['Content-Encoding'] + '\n'
+ string_params['Content-Language'] + '\n'
+ string_params['Content-Length'] + '\n'
+ string_params['Content-MD5'] + '\n'
+ string_params['Content-Type'] + '\n'
+ string_params['Date'] + '\n'
+ string_params['If-Modified-Since'] + '\n'
+ string_params['If-Match'] + '\n'
+ string_params['If-None-Match'] + '\n'
+ string_params['If-Unmodified-Since'] + '\n'
+ string_params['Range'] + '\n'
+ string_params['CanonicalizedHeaders']+'\n'
+ string_params['CanonicalizedResource'])
signed_string = base64.b64encode(hmac.new(base64.b64decode(storage_account_key), msg=string_to_sign.encode('utf-8'), digestmod=hashlib.sha256).digest()).decode()
return signed_string
def create_headers(request_time, api_version, storage_account_name, signed_string):
headers = {
'x-ms-date' : request_time,
'x-ms-version' : api_version,
'Authorization' : ('SharedKey ' + storage_account_name + ':' + signed_string)
}
return headers
def create_url(storage_account_name, file_system_name, url_params):
url = ('https://' + storage_account_name + '.dfs.core.windows.net/'+file_system_name+url_params)
return url
def set_optional_params(list_dir, file_dir, token_continuation):
if token_continuation != '':
token_continuation_sig = '\ncontinuation:'+token_continuation
token_continuation_url = '&continuation='+token_continuation[:-1]
else:
token_continuation_sig = ''
token_continuation_url = ''
print token_continuation_sig
print token_continuation_url
if list_dir:
print type(token_continuation)
signature_params = '\ndirectory:'+file_dir+'\nrecursive:true'+token_continuation_sig+'\nresource:filesystem'
url_params = '?directory='+file_dir+'&recursive=true'+token_continuation_url+'&resource=filesystem'
return signature_params, url_params
else:
signature_params = ''
url_params = ''
return signature_params, url_params
def get_request_time():
return datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
def adls_request(list_dir,
file_system_name,
file_dir = '',
storage_account_name = 'account_name',
storage_account_key = '123456789==',
api_version = '2018-11-09',
token_continuation = ''):
signature_params, url_params = set_optional_params(list_dir, file_dir, token_continuation)
request_time = get_request_time()
signature = gen_signature(request_time, api_version, storage_account_name, file_system_name, storage_account_key, signature_params)
headers = create_headers(request_time, api_version, storage_account_name, signature)
url = create_url(storage_account_name, file_system_name, url_params)
r = requests.get(url, headers = headers)
return r
I expect the response output to come up 200, containing the rest of the files inside the directory, but still am currently receiving 403 error.
Please try the code below, I use python 3.7 for the test:
import requests
import datetime
import hmac
import hashlib
import base64
import urllib.parse
def gen_signature(request_time, api_version, storage_account_name, file_system_name, storage_account_key, signature_params):
string_params = {
'verb': 'GET',
'Content-Encoding': '',
'Content-Language': '',
'Content-Length': '',
'Content-MD5': '',
'Content-Type': '',
'Date': '',
'If-Modified-Since': '',
'If-Match': '',
'If-None-Match': '',
'If-Unmodified-Since': '',
'Range': '',
'CanonicalizedHeaders': 'x-ms-date:' + request_time + '\nx-ms-version:' + api_version,
'CanonicalizedResource': '/' + storage_account_name + '/' + file_system_name + signature_params,
}
string_to_sign = (string_params['verb'] + '\n'
+ string_params['Content-Encoding'] + '\n'
+ string_params['Content-Language'] + '\n'
+ string_params['Content-Length'] + '\n'
+ string_params['Content-MD5'] + '\n'
+ string_params['Content-Type'] + '\n'
+ string_params['Date'] + '\n'
+ string_params['If-Modified-Since'] + '\n'
+ string_params['If-Match'] + '\n'
+ string_params['If-None-Match'] + '\n'
+ string_params['If-Unmodified-Since'] + '\n'
+ string_params['Range'] + '\n'
+ string_params['CanonicalizedHeaders']+'\n'
+ string_params['CanonicalizedResource'])
signed_string = base64.b64encode(hmac.new(base64.b64decode(storage_account_key), msg=string_to_sign.encode('utf-8'), digestmod=hashlib.sha256).digest()).decode()
return signed_string
def create_headers(request_time, api_version, storage_account_name, signed_string):
headers = {
'x-ms-date' : request_time,
'x-ms-version' : api_version,
'Authorization' : ('SharedKey ' + storage_account_name + ':' + signed_string)
}
return headers
def create_url(storage_account_name, file_system_name, url_params):
url = ('https://' + storage_account_name + '.dfs.core.windows.net/'+file_system_name+url_params)
return url
def set_optional_params(list_dir, file_dir, token_continuation):
if token_continuation != '':
token_continuation_sig = '\ncontinuation:'+ token_continuation
#Note that since the continuation token ended with =, you should encode the token, then add to url.
token_continuation_url = '&continuation='+urllib.parse.quote_plus(token_continuation)
else:
token_continuation_sig = ''
token_continuation_url = ''
#print(token_continuation_sig)
#print(token_continuation_url)
if list_dir:
if token_continuation !='':
signature_params = token_continuation_sig + '\ndirectory:' + file_dir + '\nrecursive:true' + '\nresource:filesystem'
url_params = '?directory=' + file_dir + '&recursive=true' + token_continuation_url + '&resource=filesystem'
return signature_params, url_params
else:
signature_params = '\ndirectory:' + file_dir + '\nrecursive:true' + '\nresource:filesystem'
url_params = '?directory=' + file_dir + '&recursive=true' + '&resource=filesystem'
return signature_params, url_params
else:
signature_params = ''
url_params = ''
return signature_params, url_params
def get_request_time():
return datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
def adls_request(list_dir,
file_system_name,
file_dir = '',
storage_account_name = 'account_name',
storage_account_key = '123456789==',
api_version = '2018-11-09',
token_continuation = ''):
signature_params,url_params = set_optional_params(list_dir, file_dir, token_continuation)
request_time = get_request_time()
signature = gen_signature(request_time, api_version, storage_account_name, file_system_name, storage_account_key, signature_params)
headers = create_headers(request_time, api_version, storage_account_name, signature)
url = create_url(storage_account_name, file_system_name, url_params)
print(url)
r = requests.get(url, headers = headers)
return r
if __name__ == '__main__':
list_dir = True
file_system_name ="dd1"
file_dir="testfile"
storage_account_name = 'xxx'
storage_account_key = 'xxxx'
api_version = '2018-11-09'
token_continuation = ''
print("******First Time without continuation token******")
#The 1st time to get files which can be up to 5000
r = adls_request(list_dir,file_system_name,file_dir,storage_account_name,storage_account_key,api_version,token_continuation)
print(r)
print("\n\n******Sencond Time with continuation token******")
#Then 2nd time to get files with continuation token
#when files are more than 5000, you will get a continuation token
if 'x-ms-continuation' in r.headers:
token_continuation=r.headers["x-ms-continuation"]
print("continuation token: "+token_continuation)
r = adls_request(list_dir,file_system_name,file_dir,storage_account_name,storage_account_key,api_version,token_continuation)
print(r)
Test result:
I have 6000 files in the directory, and note that if you get the continuation token(if files in directory are more than 5000, otherwise no token returned), you should encode the token, then add the encoded token to the url.
This is just a simple test, and please feel free to change the code to meet your need.

How to change records on BigQuery from different rows to one row?

I have done inserting values into BigQuery from JSON file but my JSON file have multiple objects.
Eg:
{"A":{"queryID": "newId", "newCol": "newCol"}}
{"B":{"date":"2013-05-31 20:56:41", "device":"pc"}}
{"C":{"keyword": ["new", "ict"]}}
The results on BigQuery is one row per object, with empty rows for other objects. How do I do to make it all in one row with different columns?
def loadTable(http, service):
url = "https://www.googleapis.com/upload/bigquery/v2/projects/" + projectId + "/jobs"
newresource = ('--xxx\n' +
'Content-Type: application/json; charset=UTF-8\n' + '\n' +
'{\n' +
' "configuration": {\n' +
' "load": {\n' +
' "sourceFormat": "NEWLINE_DELIMITED_JSON",\n' +
' "autodetect": "' + "True" + '",\n' +
' "destinationTable": {\n' +
' "projectId": "' + projectId + '",\n' +
' "datasetId": "' + datasetId + '",\n' +
' "tableId": "' + tableId + '"\n' +
' }\n' +
' }\n' +
' }\n' +
'}\n' +
'--xxx\n' +
'Content-Type: application/octet-stream\n' +
'\n')
f = open('samplejson.json', 'r')
newresource += f.read().replace('\n', '\r\n')
newresource += ('--xxx--\n')
print newresource
headers = {'Content-Type': 'multipart/related; boundary=xxx'}
resp, content = http.request(url, method="POST", body=newresource, headers=headers)
if not resp.status == 200:
print resp
print content
else:
jsonResponse = json.loads(content)
jobReference = jsonResponse['jobReference']['jobId']
while True:
jobCollection = service.jobs()
getJob = jobCollection.get(projectId=projectId, jobId=jobReference).execute()
currentStatus = getJob['status']['state']
if 'DONE' == currentStatus:
print "Done Loading!"
return
else:
print 'Waiting to load...'
print 'Current status: ' + currentStatus
print time.ctime()
time.sleep(10)
def main(argv):
credentials = ServiceAccountCredentials.from_json_keyfile_name("samplecredentials.json")
scope = ['https://www.googleapis.com/auth/bigquery']
credentials = credentials.create_scoped(scope)
http = httplib2.Http()
http = credentials.authorize(http)
service = build('bigquery','v2', http=http)
loadTable(http, service)
I would recommend doing that final "assembling" into one row using below type of query (BigQuery Standard SQL)
#standardSQL
SELECT
ARRAY_AGG(A IGNORE NULLS) AS A,
ARRAY_AGG(B IGNORE NULLS) AS B,
ARRAY_AGG(C IGNORE NULLS) AS C
FROM `yourtable`
If you would have some extra field that would indicate which rows to combine/group together into one - for example some id - the query can look as below
#standardSQL
SELECT
id,
ARRAY_AGG(A IGNORE NULLS) AS A,
ARRAY_AGG(B IGNORE NULLS) AS B,
ARRAY_AGG(C IGNORE NULLS) AS C
FROM `yourtable`
GROUP BY id

Upload Files to S3 using POST from different servers with python

I wan to achieve below. I have 10 log servers and 10 web servers in different locations. Every location has a pair i.e 1 log server and 1 web server. In S3 for every pair of servers, there is a bucket for storing logs like Location 1, Location 2, location 3.
I want to upload logs from every location to its respective buckets. I can do that with awscli but for that i have to create Iam user for every location and attach a s3 policy and put in access keys and secret keys in every location. I do not want this approach.
Instead, i was thinking that i would embed my access keys and secret access keys in every web server and then using AWS Signature version 4 , i would generate a signature for every file with respect to its bucket and upload to S3.
import sys, os, base64, datetime, hashlib, hmac
from cassandra.cluster import Cluster
from datetime import datetime, timedelta
import json
import requests
import logging
LOGGER = None
def sign(secret_key, msg):
return hmac.new(secret_key, msg.encode("utf-8"),hashlib.sha256).digest()
def getSignatureKey(secret_key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + secret_key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def S3UploadPolicy(date_stampfiso,customer_name, amz_date,credential):
params = {}
params['expiration'] = date_stampfiso
params['conditions'] = [{'bucket': customer_name},
{'acl': 'private'},
{'success_action_status': '201'},
['starts_with', '', ''],
{'x-amz-algorithm': 'AWS4-HMAC-SHA256'},
{'x-amz-credential': credential},
{'x-amz-date': amz_date}]
params = json.dumps(params)
return params
def S3Upload(access_key,date_stamp,date_stampfiso,customer_name, amz_date, regionName, secret_key, serviceName, filename):
host = '<bucketname>.s3.amazonaws.com'
endpoint_url = 'http://' + customer_name + '.s3.amazonaws.com'
#content_type = 'multipart/form-data; charset=UTF-8'
content_type = 'text/plain'
#method = 'POST'
method = 'PUT'
canonical_uri = '/' + customer_name
canonical_querystring = filename
canonical_headers = 'content-type:' + content_type + '\n' + 'host:' + host + '\n' + 'x-amz-date:' + amz_date + '\n'
credential_scope = date_stamp + '/' + regionName + '/' + serviceName + '/' + 'aws4_request'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
policy = S3UploadPolicy(date_stampfiso,customer_name, amz_date,credential_scope)
policyBase64 = base64.b64encode(policy)
payload_hash = hashlib.sha256(policyBase64).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
xamzalgorithm = 'AWS4-HMAC-SHA256'
algorithm = xamzalgorithm
string_to_sign = algorithm + '\n' + amz_date + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request).hexdigest()
signing_key = getSignatureKey(secret_key, date_stamp, regionName, serviceName)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
print authorization_header
#sys.exit(1)
headers = {
'content-type': content_type,
'x-amz-date': amz_date,
'authorization': authorization_header,
'x-amz-content-sha256': payload_hash
}
try:
print '\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++'
print 'Request URL = ' + endpoint_url
r = requests.put(endpoint_url, headers=headers)
print '\nRESPONSE++++++++++++++++++++++++++++++++++++'
print 'Response code: %d\n' % r.status_code
print r.text
except Exception as e:
LOGGER.error(e)
def main():
global LOGGER
msg = ''
access_key = 'xxxxxxxxxxxx'
secret_key = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
customer_name = 'abc-test2'
regionName = 'us-west-2'
serviceName = 's3'
filename = '/home/abc/abc.pem'
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(module)s [%(process)d %(thread)d] | [%(filename)s:%(lineno)s - %(funcName)s() ] | \n%(message)s')
LOGGER = logging.getLogger(__name__)
## Calculate Date
t = datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
date_stampf = datetime.now() + timedelta(hours=24)
amz_date_future = date_stampf.strftime('%Y%m%dT%H%M%SZ')
date_stampfiso = date_stampf.isoformat()
S3Upload(access_key, date_stamp, date_stampfiso, customer_name, amz_date, regionName, secret_key, serviceName,
filename)
if __name__ == '__main__':
main()

Arcgis Server Write properties of all services to a CSV file

I have 36 Services running on ArcGIS Server and would like export all properties for each service in a csv file. I managed to write a code by the help of ESRI Helpdesk http://resources.arcgis.com/en/help/main/10.2/index.html#//0154000005wt000000 adding the properties "maxImageHeight" and "maxImageWidth" to the request. However if I run the code it starts to work, writes properties of the first 22 services, but than it stopps suddenly and returns
Traceback (most recent call last):
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 436, in
sys.exit(main(sys.argv[1:]))
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 201, in main
+ "NA" + "\n"
KeyError: 'maxImageHeight'
It's odd because it delivered allready the "maxImageHeight" property for the first services.
Code:
# Reads the following properties from services and writes them to a comma-delimited file:
# ServiceName, Folder, Type, Status, Min Instances, Max Instances, Max Wainting Time,Max Startup Time,Max Idle Time,Max Usage Time, KML,
# WMS, WFS, WCS, Max Records, Cluster, Cache Directory, Jobs Directory, Output Directory
# For HTTP calls
import httplib, urllib, json
# For system tools
import sys
# For reading passwords without echoing
import getpass
def main(argv=None):
# Ask for admin/publisher user name and password
username = raw_input("Enter user name: ")
password = getpass.getpass("Enter password: ")
# Ask for server name & port
serverName = raw_input("Enter server name: ")
serverPort = 6080
# Get the location and the name of the file to be created
resultFile = raw_input("Output File (get the location and the name of the file to be created): ")
# Get a token
token = getToken(username, password, serverName, serverPort)
# Get the root info
serverURL = "/arcgis/admin/services/"
#serverURL = "/arcgis/manager/services/"
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serverURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading server information. " + str(data)
return
else:
print "Processed server information successfully. Now processing folders..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
#Store the Folders in a list to loop on
folders = dataObj["folders"]
#Remove the System and Utilities folders
folders.remove("System")
#folders.remove("Utilities")
#Add an entry for the root folder
folders.append("")
#Create the summary file of services
serviceResultFile = open(resultFile,'w')
#serviceResultFile.write("ServiceName,Folder,Type,Status,Min Instances,Max Instances,FeatureService,kml,wms,Max Records,Cluster,Cache Directory,Jobs Directory,Output Directory" + "\n")
serviceResultFile.write("\
ServiceName,\
Folder,\
Type,\
MaxImageHeight,\
MaxImageWidth,\
Status,\
Min Instances,\
Max Instances,\
Max Wainting Time,\
Max Startup Time,\
Max Idle Time,\
Max Usage Time,\
FeatureService,\
kml,\
wms,\
wfs,\
wcs,\
Max Records,\
Cluster,\
Cache Directory,\
Jobs Directory,\
Output Directory" + "\n")
#Loop on the found folders and discover the services and write the service information
for folder in folders:
# Determine if the loop is working on the root folder or not
if folder != "":
folder += "/"
# Build the URL for the current folder
folderURL = "/arcgis/admin/services/" + folder
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading folder information. " + str(data)
else:
print "Processed folder information successfully. Now processing services..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
# Loop through each service in the folder
for item in dataObj['services']:
if item["type"] == "GeometryServer":# and folder == "":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
servStatusResponse = httpConn.getresponse()
# Obtain the data from the response
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
elif item["type"] == "SearchServer":# and folder == "":
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
#####MapServer########################################
elif item["type"] == "MapServer":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
if folder:
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Check for Map Cache
isCached = jsonOBJ["properties"]["isCached"]
if isCached == "true":
cacheDir = str(jsonOBJ["properties"]["cacheDir"])
else:
cacheDir = jsonOBJ["properties"]["isCached"]
if len(jsonOBJ["extensions"]) == 0:
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ "FeatServHolder" + ","\
+ "Disabled" + ","\
+ "Disabled" +","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + ","\
+ "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
else:
# Extract the KML properties from the response
kmlProps = [mapKML for mapKML in jsonOBJ["extensions"] if mapKML["typeName"] == 'KmlServer']#.items()[0][1] == 'KmlServer']
# Extract the WMS properties from the response
wmsProps = [mapWMS for mapWMS in jsonOBJ["extensions"] if mapWMS["typeName"] == 'WMSServer']#.items()[0][1] == 'WMSServer']
Extract the WFS properties from the response
wfsProps = [mapWFS for mapWFS in jsonOBJ["extensions"] if mapWFS["typeName"] == 'WFSServer']#.items()[0][1] == 'WFSServer']
Extract the WCS properties from the response
wcsProps = [mapWCS for mapWCS in jsonOBJ["extensions"] if mapWCS["typeName"] == 'WCSServer']#.items()[0][1] == 'WCSServer']
# Extract the FeatureService properties from the response
featServProps = [featServ for featServ in jsonOBJ["extensions"] if featServ["typeName"] == 'FeatureServer']#.items()[0][1] == 'FeatureServer']
if len(featServProps) > 0:
featureStatus = str(featServProps[0]["enabled"])
else:
featureStatus = "NA"
if len(kmlProps) > 0:
kmlStatus = str(kmlProps[0]["enabled"])
else:
kmlStatus = "NA"
if len(wmsProps) > 0:
wmsStatus = str(wmsProps[0]["enabled"])
else:
wmsStatus = "NA"
#MZ#
if len(wfsProps) > 0:
wfsStatus = str(wfsProps[0]["enabled"])
else:
wfsStatus = "NA"
#MZ#
if len(wcsProps) > 0:
wcsStatus = str(wcsProps[0]["enabled"])
else:
wcsStatus = "NA"
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ featureStatus + ","\
+ kmlStatus + ","\
+ wmsStatus + ","\
+ wfsStatus + ","\
+ wcsStatus + ","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + "," + "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
# Write the results to the file
serviceResultFile.write(ln)
else:
# Close the connection to the current service
httpConn.close()
# Close the file
serviceResultFile.close()
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))`
I've managed to get past the error by adding bufsize
#Create the summary file of services
bufsize = 0
serviceResultFile = open(resultFile,'w',bufsize)
Now it's complaining about how to handle the end of the file. Still working on this bit
sys.exit(main(sys.argv[1:]))
File "D:/Cognos_Testing/Esri/python/get_mapsrv_Stats.py", line 378, in main
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
KeyError: 'maxImageHeight'

Categories