How do I remove node from Cloudera Manager using cm_api - python

I am trying to remove host from Cloudera Manager 5.3.2 using cm_api 9.0.0
I tried
api.get_cloudera_manager().hosts_decommission([host])
api.get_cluster("cluster").remove_host(host)
but got error for remove_host()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/clusters.py", line 218, in remove_host
return self._delete("hosts/" + hostId, ApiHostRef, api_version=3)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 352, in _delete
api_version)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 380, in _call
api_version)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 139, in call
ret = method(path, params=params)
File "/usr/lib/python2.6/site-packages/cm_api/resource.py", line 124, in delete
return self.invoke("DELETE", relpath, params)
File "/usr/lib/python2.6/site-packages/cm_api/resource.py", line 63, in invoke
headers=headers)
File "/usr/lib/python2.6/site-packages/cm_api/http_client.py", line 161, in execute
raise self._exc_class(ex)
cm_api.api_client.ApiException: ip-10-0-8-187.ec2.internal still has roles assigned to it. (error 400)
What is right sequence to remove host from Cluster or Is there one command in cm_api to do this ?
Thanks

This python script helps to remove the hosts from the cluster. The following are the steps:
stop and decommission all roles in a host
remove the roles from a host
identify and delete the roles one by one
remove host from a cluster
remove host from cloudera manager
This script removes the hosts from the cloudera managed cluster running in aws. It is intend to scale down the worker node(node manager role) and gateway role from the cluster once there is no resource demand.
You can change the script accordingly based on your environment.
#!/bin/python
import httplib2
import os
import requests
import json
import boto3
import time
from requests.auth import HTTPBasicAuth
os.environ["AWS_ACCESS_KEY_ID"] = "ACCESS_KEY"
os.environ["AWS_SECRET_ACCESS_KEY"] = "SECRET_ACCESS_KEY"
os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
region='us-east-1'
metadata = requests.get(url='http://169.254.169.254/latest/meta-data/instance-id')
instance_id = metadata.text
host = requests.get(url='http://169.254.169.254/latest/meta-data/hostname')
host_id = host.text
username='admin'
password='admin'
cluster_name='cluster001'
scm_protocol='http'
scm_host='host.compute-1.amazonaws.com'
scm_port='7180'
scm_api='v17'
client = boto3.client('autoscaling')
ec2 = boto3.client('autoscaling', region_name=region)
response = client.describe_auto_scaling_instances(InstanceIds=[instance_id,])
state = response['AutoScalingInstances'][0]['LifecycleState']
print "vm is in " + state
if state == 'Terminating:Wait':
print "host decommision started"
##decommission host
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/commands/hostsDecommission'
#service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/hostsRecommission'
#service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/commands/hostsStartRoles'
print service_url
headers = {'content-type': 'application/json'}
req_body = { "items":[ host_id ]}
print req_body
req = requests.post(url=service_url, auth=HTTPBasicAuth(username, password), data=json.dumps(req_body), headers=headers)
print req.text
time.sleep(120)
##delete roles in a host
api_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/hosts/' + host_id
req = requests.get(api_url, auth=HTTPBasicAuth(username, password))
a = json.loads(req.content)
for i in a['roleRefs']:
scm_uri='/api/' + scm_api + '/clusters/' + cluster_name + '/services/'+i['serviceName']+'/roles/'+i['roleName']
scm_url = scm_protocol + '://' + scm_host + ':' + scm_port + scm_uri
print scm_url
req = requests.delete(scm_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##remove host from cluster
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + cluster_name + '/hosts/' + host_id
print service_url
req = requests.delete(service_url, auth=HTTPBasicAuth(username, password))
time.sleep(10)
##remove host from cloudera manager
os.system("/etc/init.d/cloudera-scm-agent stop")
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/hosts/' + host_id
print service_url
req = requests.delete(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##refresh cluster configuration
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + 'commands/refresh'
print service_url
req = requests.post(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##deploy client configuration
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + 'commands/deployClientConfig'
print service_url
req = requests.post(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)

Related

Amazon Selling Partner API Signature

method = 'GET'
service = 'execute-api'
user_agent = 'My Selling Tool/2.0 (Language=Python3; Platform=Windows/10)'
region = 'us-east-1'
host = 'sellingpartnerapi-na.amazon.com'
endpoint = 'https://sellingpartnerapi-na.amazon.com'
request_parameters = '/fba/inbound/v0/shipments/shipmentId1/preorder/confirm?MarketplaceId=ATVPDKIKX0DER&NeedByDate=2020-10-10'
acess_token = 'xxx'
access_key = 'xxx'
secret_key = 'xxx'
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(
def getSignatureKey(key, dateStamp, regionName, serviceName):
t = datetime.datetime.utcnow()
amzdate = t.strftime('%Y%m%dT%H%M%SZ')
datestamp = t.strftime('%Y%m%d')
canonical_uri = '/'
canonical_querystring = request_parameters
canonical_headers = 'host:' + host + '\n' + 'user-agent:' + user_agent + '\n' + 'x-amz-access-token:' + acess_token + '\n' + 'x-amz-date:' + amzdate
signed_headers = 'host;user-agent;x-amz-access-token'
payload_hash = hashlib.sha256(('').encode('utf-8')).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
signing_key = getSignatureKey(secret_key, datestamp, region, service)
signature = hmac.new(signing_key, (string_to_sign).encode(
'utf-8'), hashlib.sha256).hexdigest()
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
headers = {'Authorization': authorization_header,
'host':host,
'user-agent': user_agent,
'x-amz-access-token': acess_token,
'x-amz-date': amzdate}
request_url = endpoint + '' + canonical_querystring
r = requests.get(request_url, headers=headers)
I follow sp-api and signatureV4 but got response "InvalidSignature"
this error is occur on Step 4. Create and sign your request.
I have no idea for 2 months, I guess the problem is about headers ?
Does my code need to be corrected or the problem is not here ?
You can use pip install aws-requests-auth There is Pip install link
from aws_requests_auth.aws_auth import AWSRequestsAuth
import json
import sys
AWS_AUTH = AWSRequestsAuth(
aws_access_key=config[0]["aws_access_key"],
aws_secret_access_key=config[0]["aws_secret_access_key"],
aws_host=config[0]["aws_host"],
aws_region=config[0]["aws_region"],
aws_service=config[0]["aws_service"],
)
AUTH_URL = config[0]["AUTH_URL"]
REFRESH_TOKEN = config[0]["REFRESH_TOKEN"]
CLIENT_ID = config[0]["CLIENT_ID"]
CLIENT_SECRET = config[0]["CLIENT_SECRET"]
AUTH_BODY = {
"grant_type": "refresh_token",
"refresh_token": REFRESH_TOKEN,
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET,
}
headers = {
"content-type": "application/x-www-form-urlencoded;charset=UTF-8",
"host": "sellingpartnerapi-fe.amazon.com",
}
auth_login = requests.post(AUTH_URL, headers=headers, data=AUTH_BODY)
response = auth_login.json()
reponseCode = auth_login.status_code
if reponseCode == 200:
print("====== AWS AUTH SIGN ====== OK", reponseCode)
return response["access_token"], AWS_AUTH, config[0]
else:
print("AUTH SIGN Error", response)
return False

Python - Add .gzip attachment to email

If I use my Script there always comes this error:
IOError: [Errno 2] No such file or directory: "'/folder/my/20200114-013815/backup.sql.gz'"
Why can't the file be found? It's on the Path.
Or do I have to add a gzip encoding or whatever to attachment.add_header? Don't know what's wrong, it's the first time I tried to add an attachment in python.
Thanks
DB_HOST = 'XXXXXXX'
DB_USER = 'XXXXXXX'
DB_USER_PASSWORD = 'XXXXXXX'
DB_NAME = 'XXXXXXX'
BACKUP_PATH = '/folder/my'
DATETIME = time.strftime('%Y%m%d-%H%M%S')
TODAYBACKUPPATH = BACKUP_PATH + '/' + DATETIME
try:
os.stat(TODAYBACKUPPATH)
except:
os.mkdir(TODAYBACKUPPATH)
if os.path.exists(DB_NAME):
file1 = open(DB_NAME)
multi = 1
else:
multi = 0
if multi:
in_file = open(DB_NAME,"r")
flength = len(in_file.readlines())
in_file.close()
p = 1
dbfile = open(DB_NAME,"r")
while p <= flength:
db = dbfile.readline()
db = db[:-1]
dumpcmd = "mysqldump -h " + DB_HOST + " -u " + DB_USER + " -p" + DB_USER_PASSWORD + " " + db + " > " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(dumpcmd)
gzipcmd = "gzip " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(gzipcmd)
p = p + 1
dbfile.close()
else:
db = DB_NAME
dumpcmd = "mysqldump -h " + DB_HOST + " -u " + DB_USER + " -p" + DB_USER_PASSWORD + " " + db + " > " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(dumpcmd)
gzipcmd = "gzip " + pipes.quote(TODAYBACKUPPATH) + "/" + db + ".sql"
os.system(gzipcmd)
msg = MIMEMultipart()
message = "Test"
password = "XXXXXXXX"
msg['From'] = "XXXXXXXX"
msg['To'] = "XXXXXXXX"
msg['Subject'] = "Test"
filename = "'" + TODAYBACKUPPATH + "/backup.sql.gz'"
f = file(filename)
msg.attach(MIMEText(message, 'plain'))
attachment = MIMEText(f.read())
attachment.add_header('Content-Disposition', 'attachment', filename=filename)
msg.attach(attachment)
server = smtplib.SMTP('XXXXXXXX: 587')
server.starttls()
server.login(msg['From'], password)
server.sendmail(msg['From'], msg['To'], msg.as_string())
server.quit()
Are you using the absolute path here ? if not, try using it.
Your problem lies in your string concatenation, here:
filename = "'" + TODAYBACKUPPATH + "/backup.sql.gz'"
You need to remove the single quotes that you're adding - why are you adding them?
Change it to this:
filename = TODAYBACKUPPATH + "/backup.sql.gz"

Initiate Multipart upload to S3 Version 4 Method Not allowed err.code 405

I have tried everything. It works well for GET AND PUT requests. But with POST (to initiate multipart request) it throws various error like 405 Method not allowed or Signature Mismatch error. Any help is appreciated and welcome. Please if anyone have any idea throw some hints.
If anyone can edit this python code to get it working i would really appreciate that. Thanks in advance.
import sys, os, base64, datetime, hashlib, hmac
import requests # pip install requests
method = 'POST'
service = 's3'
host = 'freemedianews.s3.amazonaws.com'
region = 'ap-southeast-1'
endpoint = 'https://freemedianews.s3-ap-southeast-1.amazonaws.com'
request_parameters = ''
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
def getSignatureKey(key, dateStamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), dateStamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
access_key = '*******'
secret_key = '*******'
if access_key is None or secret_key is None:
print 'No access key is available.'
sys.exit()
t = datetime.datetime.utcnow()
amzdate = '20180621T151517Z'
datestamp = '20180621' # Date w/o time, used in credential scope
request.html
canonical_uri = '/a/message/1200/1200.png'
content_type = "multipart/form-data"
request_parameters variable.
canonical_querystring = request_parameters
payload_hash = hashlib.sha256('').hexdigest()
canonical_headers ='content-type:' + content_type + '\n' + 'host:' + host + '\n' + 'x-amz-content-sha256:' + payload_hash + '\n' + 'x-amz-date:' + amzdate + '\n'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
algorithm = 'AWS4-HMAC-SHA256'
credential_scope = datestamp + "/" + region + '/' + service + '/' + 'aws4_request'
credential_scope_final = access_key + "/" + datestamp + "/" + region + '/' + service + '/' + 'aws4_request'
string_to_sign = algorithm + '\n' + amzdate + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request).hexdigest()
signing_key = getSignatureKey(secret_key, datestamp, region, service)
print "signing_key -----" + signing_key
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
print "signature -----" + signature
authorization_header = algorithm + ' ' + 'Credential=' + credential_scope_final + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
headers = {'Content-Type': content_type, 'Host': host, 'X-Amz-Content-Sha256': payload_hash, 'X-Amz-Date':amzdate, 'Authorization':authorization_header}
print authorization_header + "---this"
request_url = endpoint + canonical_uri
r = requests.post(request_url, data=request_parameters, headers=headers)
print r
print '\nRESPONSE++++++++++++++++++++++++++++++++++++'
print 'Response code: %d\n' % r.status_code
print r.text

Upload Files to S3 using POST from different servers with python

I wan to achieve below. I have 10 log servers and 10 web servers in different locations. Every location has a pair i.e 1 log server and 1 web server. In S3 for every pair of servers, there is a bucket for storing logs like Location 1, Location 2, location 3.
I want to upload logs from every location to its respective buckets. I can do that with awscli but for that i have to create Iam user for every location and attach a s3 policy and put in access keys and secret keys in every location. I do not want this approach.
Instead, i was thinking that i would embed my access keys and secret access keys in every web server and then using AWS Signature version 4 , i would generate a signature for every file with respect to its bucket and upload to S3.
import sys, os, base64, datetime, hashlib, hmac
from cassandra.cluster import Cluster
from datetime import datetime, timedelta
import json
import requests
import logging
LOGGER = None
def sign(secret_key, msg):
return hmac.new(secret_key, msg.encode("utf-8"),hashlib.sha256).digest()
def getSignatureKey(secret_key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + secret_key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
def S3UploadPolicy(date_stampfiso,customer_name, amz_date,credential):
params = {}
params['expiration'] = date_stampfiso
params['conditions'] = [{'bucket': customer_name},
{'acl': 'private'},
{'success_action_status': '201'},
['starts_with', '', ''],
{'x-amz-algorithm': 'AWS4-HMAC-SHA256'},
{'x-amz-credential': credential},
{'x-amz-date': amz_date}]
params = json.dumps(params)
return params
def S3Upload(access_key,date_stamp,date_stampfiso,customer_name, amz_date, regionName, secret_key, serviceName, filename):
host = '<bucketname>.s3.amazonaws.com'
endpoint_url = 'http://' + customer_name + '.s3.amazonaws.com'
#content_type = 'multipart/form-data; charset=UTF-8'
content_type = 'text/plain'
#method = 'POST'
method = 'PUT'
canonical_uri = '/' + customer_name
canonical_querystring = filename
canonical_headers = 'content-type:' + content_type + '\n' + 'host:' + host + '\n' + 'x-amz-date:' + amz_date + '\n'
credential_scope = date_stamp + '/' + regionName + '/' + serviceName + '/' + 'aws4_request'
signed_headers = 'content-type;host;x-amz-content-sha256;x-amz-date'
policy = S3UploadPolicy(date_stampfiso,customer_name, amz_date,credential_scope)
policyBase64 = base64.b64encode(policy)
payload_hash = hashlib.sha256(policyBase64).hexdigest()
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
xamzalgorithm = 'AWS4-HMAC-SHA256'
algorithm = xamzalgorithm
string_to_sign = algorithm + '\n' + amz_date + '\n' + credential_scope + '\n' + hashlib.sha256(canonical_request).hexdigest()
signing_key = getSignatureKey(secret_key, date_stamp, regionName, serviceName)
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + credential_scope + ', ' + 'SignedHeaders=' + signed_headers + ', ' + 'Signature=' + signature
print authorization_header
#sys.exit(1)
headers = {
'content-type': content_type,
'x-amz-date': amz_date,
'authorization': authorization_header,
'x-amz-content-sha256': payload_hash
}
try:
print '\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++'
print 'Request URL = ' + endpoint_url
r = requests.put(endpoint_url, headers=headers)
print '\nRESPONSE++++++++++++++++++++++++++++++++++++'
print 'Response code: %d\n' % r.status_code
print r.text
except Exception as e:
LOGGER.error(e)
def main():
global LOGGER
msg = ''
access_key = 'xxxxxxxxxxxx'
secret_key = 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
customer_name = 'abc-test2'
regionName = 'us-west-2'
serviceName = 's3'
filename = '/home/abc/abc.pem'
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(module)s [%(process)d %(thread)d] | [%(filename)s:%(lineno)s - %(funcName)s() ] | \n%(message)s')
LOGGER = logging.getLogger(__name__)
## Calculate Date
t = datetime.utcnow()
amz_date = t.strftime('%Y%m%dT%H%M%SZ')
date_stamp = t.strftime('%Y%m%d')
date_stampf = datetime.now() + timedelta(hours=24)
amz_date_future = date_stampf.strftime('%Y%m%dT%H%M%SZ')
date_stampfiso = date_stampf.isoformat()
S3Upload(access_key, date_stamp, date_stampfiso, customer_name, amz_date, regionName, secret_key, serviceName,
filename)
if __name__ == '__main__':
main()

Arcgis Server Write properties of all services to a CSV file

I have 36 Services running on ArcGIS Server and would like export all properties for each service in a csv file. I managed to write a code by the help of ESRI Helpdesk http://resources.arcgis.com/en/help/main/10.2/index.html#//0154000005wt000000 adding the properties "maxImageHeight" and "maxImageWidth" to the request. However if I run the code it starts to work, writes properties of the first 22 services, but than it stopps suddenly and returns
Traceback (most recent call last):
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 436, in
sys.exit(main(sys.argv[1:]))
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 201, in main
+ "NA" + "\n"
KeyError: 'maxImageHeight'
It's odd because it delivered allready the "maxImageHeight" property for the first services.
Code:
# Reads the following properties from services and writes them to a comma-delimited file:
# ServiceName, Folder, Type, Status, Min Instances, Max Instances, Max Wainting Time,Max Startup Time,Max Idle Time,Max Usage Time, KML,
# WMS, WFS, WCS, Max Records, Cluster, Cache Directory, Jobs Directory, Output Directory
# For HTTP calls
import httplib, urllib, json
# For system tools
import sys
# For reading passwords without echoing
import getpass
def main(argv=None):
# Ask for admin/publisher user name and password
username = raw_input("Enter user name: ")
password = getpass.getpass("Enter password: ")
# Ask for server name & port
serverName = raw_input("Enter server name: ")
serverPort = 6080
# Get the location and the name of the file to be created
resultFile = raw_input("Output File (get the location and the name of the file to be created): ")
# Get a token
token = getToken(username, password, serverName, serverPort)
# Get the root info
serverURL = "/arcgis/admin/services/"
#serverURL = "/arcgis/manager/services/"
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serverURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading server information. " + str(data)
return
else:
print "Processed server information successfully. Now processing folders..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
#Store the Folders in a list to loop on
folders = dataObj["folders"]
#Remove the System and Utilities folders
folders.remove("System")
#folders.remove("Utilities")
#Add an entry for the root folder
folders.append("")
#Create the summary file of services
serviceResultFile = open(resultFile,'w')
#serviceResultFile.write("ServiceName,Folder,Type,Status,Min Instances,Max Instances,FeatureService,kml,wms,Max Records,Cluster,Cache Directory,Jobs Directory,Output Directory" + "\n")
serviceResultFile.write("\
ServiceName,\
Folder,\
Type,\
MaxImageHeight,\
MaxImageWidth,\
Status,\
Min Instances,\
Max Instances,\
Max Wainting Time,\
Max Startup Time,\
Max Idle Time,\
Max Usage Time,\
FeatureService,\
kml,\
wms,\
wfs,\
wcs,\
Max Records,\
Cluster,\
Cache Directory,\
Jobs Directory,\
Output Directory" + "\n")
#Loop on the found folders and discover the services and write the service information
for folder in folders:
# Determine if the loop is working on the root folder or not
if folder != "":
folder += "/"
# Build the URL for the current folder
folderURL = "/arcgis/admin/services/" + folder
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading folder information. " + str(data)
else:
print "Processed folder information successfully. Now processing services..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
# Loop through each service in the folder
for item in dataObj['services']:
if item["type"] == "GeometryServer":# and folder == "":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
servStatusResponse = httpConn.getresponse()
# Obtain the data from the response
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
elif item["type"] == "SearchServer":# and folder == "":
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
#####MapServer########################################
elif item["type"] == "MapServer":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
if folder:
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Check for Map Cache
isCached = jsonOBJ["properties"]["isCached"]
if isCached == "true":
cacheDir = str(jsonOBJ["properties"]["cacheDir"])
else:
cacheDir = jsonOBJ["properties"]["isCached"]
if len(jsonOBJ["extensions"]) == 0:
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ "FeatServHolder" + ","\
+ "Disabled" + ","\
+ "Disabled" +","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + ","\
+ "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
else:
# Extract the KML properties from the response
kmlProps = [mapKML for mapKML in jsonOBJ["extensions"] if mapKML["typeName"] == 'KmlServer']#.items()[0][1] == 'KmlServer']
# Extract the WMS properties from the response
wmsProps = [mapWMS for mapWMS in jsonOBJ["extensions"] if mapWMS["typeName"] == 'WMSServer']#.items()[0][1] == 'WMSServer']
Extract the WFS properties from the response
wfsProps = [mapWFS for mapWFS in jsonOBJ["extensions"] if mapWFS["typeName"] == 'WFSServer']#.items()[0][1] == 'WFSServer']
Extract the WCS properties from the response
wcsProps = [mapWCS for mapWCS in jsonOBJ["extensions"] if mapWCS["typeName"] == 'WCSServer']#.items()[0][1] == 'WCSServer']
# Extract the FeatureService properties from the response
featServProps = [featServ for featServ in jsonOBJ["extensions"] if featServ["typeName"] == 'FeatureServer']#.items()[0][1] == 'FeatureServer']
if len(featServProps) > 0:
featureStatus = str(featServProps[0]["enabled"])
else:
featureStatus = "NA"
if len(kmlProps) > 0:
kmlStatus = str(kmlProps[0]["enabled"])
else:
kmlStatus = "NA"
if len(wmsProps) > 0:
wmsStatus = str(wmsProps[0]["enabled"])
else:
wmsStatus = "NA"
#MZ#
if len(wfsProps) > 0:
wfsStatus = str(wfsProps[0]["enabled"])
else:
wfsStatus = "NA"
#MZ#
if len(wcsProps) > 0:
wcsStatus = str(wcsProps[0]["enabled"])
else:
wcsStatus = "NA"
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ featureStatus + ","\
+ kmlStatus + ","\
+ wmsStatus + ","\
+ wfsStatus + ","\
+ wcsStatus + ","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + "," + "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
# Write the results to the file
serviceResultFile.write(ln)
else:
# Close the connection to the current service
httpConn.close()
# Close the file
serviceResultFile.close()
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))`
I've managed to get past the error by adding bufsize
#Create the summary file of services
bufsize = 0
serviceResultFile = open(resultFile,'w',bufsize)
Now it's complaining about how to handle the end of the file. Still working on this bit
sys.exit(main(sys.argv[1:]))
File "D:/Cognos_Testing/Esri/python/get_mapsrv_Stats.py", line 378, in main
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
KeyError: 'maxImageHeight'

Categories