Decode .ilearner from Azure - python

I would like to ask for solutions for the problem with decoding .ilearner file downloaded via API from Microsoft Azure ML. The error I am receiving looks like this:
File "<ipython-input-1-157aac1e9b7a>", line 162, in <module>
invokeBatchExecutionService()
File "<ipython-input-1-157aac1e9b7a>", line 157, in invokeBatchExecutionService
processResults(result)
File "<ipython-input-1-157aac1e9b7a>", line 51, in processResults
saveBlobToFile(url3, "The results for " + outputName)
File "<ipython-input-1-157aac1e9b7a>", line 28, in saveBlobToFile
f.write(response.read().decode("utf8", 'ignore'))
File "D:\XXXXXX\lib\encodings\cp1252.py", line 19, in encode
return codecs.charmap_encode(input,self.errors,encoding_table)[0
" UnicodeEncodeError: 'charmap' codec can't encode character '\u053e' in position 1463: character maps to <undefined>"
My full code looks like this:
import urllib.request
import json
import time
from azure.storage.blob import *
def printHttpError(httpError):
print("The request failed with status code: " + str(httpError.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(httpError.info())
print(json.loads(httpError.read().decode("utf8", 'ignore')))
return
def saveBlobToFile(blobUrl, resultsLabel):
output_file = "C:/XXX/API/output.csv" # Replace this with the location you would like to use for your output file, and valid file extension (usually .csv for scoring results, or .ilearner for trained models)
print("Reading the result from " + blobUrl)
try:
response = urllib.request.urlopen(blobUrl)
except urllib.error.HTTPError as error:
printHttpError(error)
return
# print("Response: " + response)
with open(output_file, "w+") as f:
# print(response.read())
f.write(response.read().decode("utf8", 'ignore'))
print(resultsLabel + " have been written to the file " + output_file)
return
def processResults(result):
first = True
results = result["Results"]
# print(" Results : " + results)
for outputName in results:
result_blob_location = results[outputName]
sas_token = result_blob_location["SasBlobToken"]
base_url = result_blob_location["BaseLocation"]
relative_url = result_blob_location["RelativeLocation"]
print("The results for " + outputName + " are available at the following Azure Storage location:")
print("BaseLocation: " + base_url)
print("RelativeLocation: " + relative_url)
print("SasBlobToken: " + sas_token)
if (first):
first = False
url3 = base_url + relative_url + sas_token
saveBlobToFile(url3, "The results for " + outputName)
# first = True
return
def uploadFileToBlob(input_file, input_blob_name, storage_container_name, storage_account_name, storage_account_key):
blob_service = BlockBlobService(account_name=storage_account_name, account_key=storage_account_key)
print("Uploading the input to blob storage...")
blob_service.create_blob_from_path(storage_container_name, input_blob_name, input_file)
def invokeBatchExecutionService():
storage_account_name = "XXX" # Replace this with your Azure Storage Account name
storage_account_key = "XXX # Replace this with your Azure Storage Key
storage_container_name = "XXX" # Replace this with your Azure Storage Container name
connection_string = "DefaultEndpointsProtocol=https;AccountName=" + storage_account_name + ";AccountKey=" + storage_account_key
api_key = "XXX” # Replace this with the API key for the web service
url = "https://uswestcentral.services.azureml.net/subscriptions/75b6fa4e098c4ad88df85a3533530bd4/services/4ed857d52f3140e1b422e6e986437b6e/jobs"
uploadFileToBlob("C:/XXX/API/input.csv", # Replace this with the location of your input file, and valid file extension (usually .csv)
"input2.csv", # Replace this with the name you would like to use for your Azure blob; this needs to have the same extension as the input file
storage_container_name, storage_account_name, storage_account_key);
payload = {
"Inputs": {
"input1":
{
"ConnectionString": connection_string,
"RelativeLocation": "/" + storage_container_name + "/input.csv"
},
},
"Outputs": {
"output1":
{
"ConnectionString": connection_string,
"RelativeLocation": "/" + storage_container_name + "/output.csv" # Replace this with the location you would like to use for your output file, and valid file extension (usually .csv for scoring results, or .ilearner for trained models)
},
"output2":
{
"ConnectionString": connection_string,
"RelativeLocation": "/" + storage_container_name + "/output.ilearner" # Replace this with the location you would like to use for your output file, and valid file extension (usually .csv for scoring results, or .ilearner for trained models)
},
},
"GlobalParameters": {
}
}
body = str.encode(json.dumps(payload))
headers = { "Content-Type":"application/json", "Authorization":("Bearer " + api_key)}
print("Submitting the job...")
# submit the job
req = urllib.request.Request(url + "?api-version=2.0", body, headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as error:
printHttpError(error)
return
result = response.read()
job_id = result.decode("utf8", 'ignore')[1:-1]
print("Job ID: " + job_id)
# start the job
print("Starting the job...")
body = str.encode(json.dumps({}))
req = urllib.request.Request(url + "/" + job_id + "/start?api-version=2.0", body, headers)
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as error:
printHttpError(error)
return
url2 = url + "/" + job_id + "?api-version=2.0"
while True:
print("Checking the job status...")
req = urllib.request.Request(url2, headers = { "Authorization":("Bearer " + api_key) })
try:
response = urllib.request.urlopen(req)
except urllib.error.HTTPError as error:
printHttpError(error)
return
result = json.loads(response.read().decode("utf8", 'ignore'))
status = result["StatusCode"]
if (status == 0 or status == "NotStarted"):
print("Job " + job_id + " not yet started...")
elif (status == 1 or status == "Running"):
print("Job " + job_id + " running...")
elif (status == 2 or status == "Failed"):
print("Job " + job_id + " failed!")
print("Error details: " + result["Details"])
break
elif (status == 3 or status == "Cancelled"):
print("Job " + job_id + " cancelled!")
break
elif (status == 4 or status == "Finished"):
print("Job " + job_id + " finished!")
# print("Results: " + results)
processResults(result)
break
time.sleep(1) # wait one second
return
invokeBatchExecutionService()
The code follows guideline described by https://learn.microsoft.com/en-us/azure/machine-learning/studio/retrain-models-programmatically I run the Python 3.5 script .

You are downloading a file encoded as UTF-8, then writing the data to your local filesystem using the filesystem's default encoding (probably cp1252 or similar). Writing to the local filesystem is failing because the data contains characters that cannot be encoded as cp1252.
>>> s = '\u053e'
>>> s
'Ծ'
>>> s.encode('cp1252')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/XXXXXX/data36/lib/python3.6/encodings/cp1252.py", line 12, in encode
return codecs.charmap_encode(input,errors,encoding_table)
UnicodeEncodeError: 'charmap' codec can't encode character '\u053e' in position 0: character maps to <undefined>
One solution is to encode your output file as UTF-8 (you will need to remember to open the file as UTF-8 if you open it elsewhere in your code)
with open(output_file, "wb+", encoding='utf-8') as f:
f.write(response.read())
Another approach might be to set the errors argument to open to handle encoding errors. For example:
with open(output_file, "w+", errors="replace") as f:
f.write(response.read().decode("utf8", 'ignore'))
will replace un-encodeable characters with a ? character.

Related

Using data from API in subsequent API calls

I should preface this with I am not a programmer and most of this code was not written by me. I unfortunately have a need and am trying to hack my way through this.
What I am trying to do is chain a few API calls together to ultimately get a list of IPs. What this script does is queries the API and pulls (and prints) a list of device IDs. The device IDs look like this:
akdjlfijoaidjfod
g9jkidfjlskdjf44
3jdhfj4hf9dfiiu4
The device IDs then need to be passed as a parameter in the next API call like this:
https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4 and so on.
I dont know where to begin. Instead of printing the asset ids, I assume they should be stored as a parameter (or variable) and then appended to the URL. I tried doing that with "ID_LIST" but that didnt seem to work. Can you guys point me in the right direction?
import requests
import json
# Define API REST paths
BASE_URL = "https://api.example.com/"
OAUTH_URL_PART = "oauth2/token"
DEVICE_SEARCH = "devices/queries/devices/v1"
DEVICE_DETAILS = "devices/entities/devices/v1"
# Empty auth token to hold value for subsequent request
auth_Token = ""
# Section 1 - Authenticate to Example OAUTH
# Build a dictionary to hold the headers
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'
}
# Build a dictionary to holds the authentication data to be posted to get a token
auth_creds = {}
auth_creds['client_id'] = "<client_id>"
auth_creds['client_secret'] = "<client_secret>"
auth_creds['grant_type'] = "client_credentials"
# Call the API to get a Authentication token - NOTE the authentication creds
print("Requesting token from " + BASE_URL + OAUTH_URL_PART)
auth_response = requests.post(BASE_URL + OAUTH_URL_PART,data=auth_creds, headers=headers)
# Check if successful
if auth_response.status_code != 201:
# Output debug information
print("\n Return Code: " + str(auth_response.status_code) + " " + auth_response.reason)
print("Path: " + auth_response.request.path_url)
print("Headers: ")
print(auth_response.request.headers)
print("Body: " + auth_response.request.body)
print("\n")
print("Trace_ID: " + auth_response.json()['meta']['trace_id'])
else:
# Section 2 - Capture OAUTH token and store in headers for later use
print("Token Created")
# Capture the auth token for reuse in subsequent calls, by pulling it from the response
# Note this token can be reused multiple times until it expires after 30 mins
auth_Token = auth_response.json()['access_token']
headers = {
'authorization':'bearer ' + auth_Token,
'accept': 'application/json'
}
# Section 3 - Reuse authentication token to call other Example OAUTH2 APIs
# Build parameter dictionary
call_params = {}
call_params['offset'] ="" # Non-mandatory param
call_params['limit'] ="5000" # The number of results
call_params['sort'] ="" #
call_params['filter'] ="" # To exclude devices
# Call devices API
print("Searching Asset ID by getting from " + BASE_URL + DEVICE_SEARCH)
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
#DEVICE_DETAILS_response = request.get(BASE_URL + DEVICE_DETAILS,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
###########Part that is not working###########
DEVICE_DETAILS_response = requests.get(BASE_URL + DEVICE_DETAILS,headers=headers)
#ID_LIST = str(len(result['resources']).replace(",", "&ids=")
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)
Hi to convert the strings in result['resources']:
['akdjlfijoaidjfod',
'g9jkidfjlskdjf44',
'3jdhfj4hf9dfiiu4']
to : https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4
try this funciton:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
print(get_modified_url(result['resources'], BASE_URL + DEVICE_DETAILS ))
full code would be:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
device_list = []
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
device_list.append(devices)
new_url = get_modified_url(device_list, BASE_URL + DEVICE_DETAILS )
DEVICE_DETAILS_response = requests.get(new_url, headers=headers)
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)

Azure Python SDK & Machine Learning Studio Web Service Batch Execution Snippet: TypeError

First Issue resolved, please read scroll down to EDIT2
I'm trying to access a Web Service deployed via Azure Machine Learning Studio, using the Batch Execution-Sample Code for Python on the bottom of below page:
https://studio.azureml.net/apihelp/workspaces/306bc1f050ba4cdba0dbc6cc561c6ab0/webservices/e4e3d2d32ec347ae9a829b200f7d31cd/endpoints/61670382104542bc9533a920830b263c/jobs
I have already fixed an Issue according to this question (replaced BlobService by BlobBlockService and so on):
https://studio.azureml.net/apihelp/workspaces/306bc1f050ba4cdba0dbc6cc561c6ab0/webservices/e4e3d2d32ec347ae9a829b200f7d31cd/endpoints/61670382104542bc9533a920830b263c/jobs
And I also have entered the API-Key, Container-Name, URL, account_key and account_name according to the instructions.
However it seems that today the Code Snippet is even more outdated than it was back then because I receive a different error now:
File "C:/Users/Alex/Desktop/scripts/BatchExecution.py", line 80, in uploadFileToBlob
blob_service = asb.BlockBlobService(account_name=storage_account_name, account_key=storage_account_key)
File "C:\Users\Alex\Anaconda3\lib\site-packages\azure\storage\blob\blockblobservice.py", line 145, in __init__
File "C:\Users\Alex\Anaconda3\lib\site-packages\azure\storage\blob\baseblobservice.py", line 205, in __init__
TypeError: get_service_parameters() got an unexpected keyword argument 'token_credential'
I also noticed, that when installing the Azure SDK for Python via pip, I get the following warnings in the end of the process (installation is successful however):
azure-storage-queue 1.3.0 has requirement azure-storage-common<1.4.0,>=1.3.0, but you'll have azure-storage-common 1.1.0 which is incompatible.
azure-storage-file 1.3.0 has requirement azure-storage-common<1.4.0,>=1.3.0, but you'll have azure-storage-common 1.1.0 which is incompatible.
azure-storage-blob 1.3.0 has requirement azure-storage-common<1.4.0,>=1.3.0, but you'll have azure-storage-common 1.1.0 which is incompatible.
I can't find anything about all this in the latest documentation for the Python SDK (the word 'token_credential' is not even contained):
https://media.readthedocs.org/pdf/azure-storage/latest/azure-storage.pdf
Does anyone have a clue what's going wrong during the installation or why the type-error with the 'token_credential' pops up during execution?
Or does anyone know how I can install the necessary version of azure-storage-common or azure-storage-blob?
EDIT: Here's a my code (however not-reproducible because I changed the keys before posting)
# How this works:
#
# 1. Assume the input is present in a local file (if the web service accepts input)
# 2. Upload the file to an Azure blob - you"d need an Azure storage account
# 3. Call BES to process the data in the blob.
# 4. The results get written to another Azure blob.
# 5. Download the output blob to a local file
#
# Note: You may need to download/install the Azure SDK for Python.
# See: http://azure.microsoft.com/en-us/documentation/articles/python-how-to-install/
import urllib
# If you are using Python 3+, import urllib instead of urllib2
import json
import time
import azure.storage.blob as asb # replaces BlobService by BlobBlockService
def printHttpError(httpError):
print("The request failed with status code: " + str(httpError.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(httpError.info())
print(json.loads(httpError.read()))
return
def saveBlobToFile(blobUrl, resultsLabel):
output_file = "myresults.csv" # Replace this with the location you would like to use for your output file
print("Reading the result from " + blobUrl)
try:
# If you are using Python 3+, replace urllib2 with urllib.request in the following code
response = urllib.request.urlopen(blobUrl)
except urllib.request.HTTPError:
printHttpError(urllib.HTTPError)
return
with open(output_file, "w+") as f:
f.write(response.read())
print(resultsLabel + " have been written to the file " + output_file)
return
def processResults(result):
first = True
results = result["Results"]
for outputName in results:
result_blob_location = results[outputName]
sas_token = result_blob_location["SasBlobToken"]
base_url = result_blob_location["BaseLocation"]
relative_url = result_blob_location["RelativeLocation"]
print("The results for " + outputName + " are available at the following Azure Storage location:")
print("BaseLocation: " + base_url)
print("RelativeLocation: " + relative_url)
print("SasBlobToken: " + sas_token)
if (first):
first = False
url3 = base_url + relative_url + sas_token
saveBlobToFile(url3, "The results for " + outputName)
return
def uploadFileToBlob(input_file, input_blob_name, storage_container_name, storage_account_name, storage_account_key):
blob_service = asb.BlockBlobService(account_name=storage_account_name, account_key=storage_account_key)
print("Uploading the input to blob storage...")
data_to_upload = open(input_file, "r").read()
blob_service.put_blob(storage_container_name, input_blob_name, data_to_upload, x_ms_blob_type="BlockBlob")
def invokeBatchExecutionService():
storage_account_name = "storage1" # Replace this with your Azure Storage Account name
storage_account_key = "kOveEtQMoP5zbUGfFR47" # Replace this with your Azure Storage Key
storage_container_name = "input" # Replace this with your Azure Storage Container name
connection_string = "DefaultEndpointsProtocol=https;AccountName=" + storage_account_name + ";AccountKey=" + storage_account_key #"DefaultEndpointsProtocol=https;AccountName=mayatostorage1;AccountKey=aOYA2P5VQPR3ZQCl+aWhcGhDRJhsR225teGGBKtfXWwb2fNEo0CrhlwGWdfbYiBTTXPHYoKZyMaKuEAU8A/Fzw==;EndpointSuffix=core.windows.net"
api_key = "5wUaln7n99rt9k+enRLG2OrhSsr9VLeoCfh0q3mfYo27hfTCh32f10PsRjJtuA==" # Replace this with the API key for the web service
url = "https://ussouthcentral.services.azureml.net/workspaces/306bc1f050/services/61670382104542bc9533a920830b263c/jobs" #"https://ussouthcentral.services.azureml.net/workspaces/306bc1f050ba4cdba0dbc6cc561c6ab0/services/61670382104542bc9533a920830b263c/jobs/job_id/start?api-version=2.0"
uploadFileToBlob(r"C:\Users\Alex\Desktop\16_da.csv", # Replace this with the location of your input file
"input1datablob.csv", # Replace this with the name you would like to use for your Azure blob; this needs to have the same extension as the input file
storage_container_name, storage_account_name, storage_account_key)
payload = {
"Inputs": {
"input1": { "ConnectionString": connection_string, "RelativeLocation": "/" + storage_container_name + "/input1datablob.csv" },
},
"Outputs": {
"output1": { "ConnectionString": connection_string, "RelativeLocation": "/" + storage_container_name + "/output1results.csv" },
},
"GlobalParameters": {
}
}
body = str.encode(json.dumps(payload))
headers = { "Content-Type":"application/json", "Authorization":("Bearer " + api_key)}
print("Submitting the job...")
# If you are using Python 3+, replace urllib2 with urllib.request in the following code
# submit the job
req = urllib.request.Request(url + "?api-version=2.0", body, headers)
try:
response = urllib.request.urlopen(req)
except urllib.request.HTTPError:
printHttpError(urllib.HTTPError)
return
result = response.read()
job_id = result[1:-1] # remove the enclosing double-quotes
print("Job ID: " + job_id)
# If you are using Python 3+, replace urllib2 with urllib.request in the following code
# start the job
print("Starting the job...")
req = urllib.request.Request(url + "/" + job_id + "/start?api-version=2.0", "", headers)
try:
response = urllib.request.urlopen(req)
except urllib.request.HTTPError:
printHttpError(urllib.HTTPError)
return
url2 = url + "/" + job_id + "?api-version=2.0"
while True:
print("Checking the job status...")
# If you are using Python 3+, replace urllib2 with urllib.request in the follwing code
req = urllib.request.Request(url2, headers = { "Authorization":("Bearer " + api_key) })
try:
response = urllib.request.urlopen(req)
except urllib.request.HTTPError:
printHttpError(urllib.HTTPError)
return
result = json.loads(response.read())
status = result["StatusCode"]
if (status == 0 or status == "NotStarted"):
print("Job " + job_id + " not yet started...")
elif (status == 1 or status == "Running"):
print("Job " + job_id + " running...")
elif (status == 2 or status == "Failed"):
print("Job " + job_id + " failed!")
print("Error details: " + result["Details"])
break
elif (status == 3 or status == "Cancelled"):
print("Job " + job_id + " cancelled!")
break
elif (status == 4 or status == "Finished"):
print("Job " + job_id + " finished!")
processResults(result)
break
time.sleep(1) # wait one second
return
invokeBatchExecutionService()
EDIT 2: The above issue has been resolved thanks to jon and the csv gets uploaded in blob storage.
However now there is an HTTPError, when the job gets submitted in Line 130:
raise HTTPError(req.full_url, code, msg, hdrs, fp) HTTPError: Bad Request
I think the code they give may be pretty old at this point.
The latest version of azure.storage.blob is 1.3. So perhaps a pip install azure.storage.blob --update or simply uninstalling and reinstalling would help.
Once you got the latest version, try using the create_blob_from_text method to load the file to your storage container.
from azure.storage.blob import BlockBlobService
blobService = BlockBlobService(account_name="accountName", account_key="accountKey)
blobService.create_blob_from_text("containerName", "fileName", csv_file)
Hope that works to help lead you down the right path, but if not we can work through it. :)

Getting ValueError: unknown url type: ' '

I have this code below that iterate through some tracks. And then for each track I want to use the musixmatch api to get and print the lyrics of the track based on the artist name and track name.
code that iterete trough some tracks and print the lyrics:
for i, v in tracks.items():
artist = tracks[i]['artist'].replace(" ", "+")
title = tracks[i]['title'].replace(" ", "+")
print(tracks)
print(song_lyric(title, artist))
The print(tracks) returns in this format:
{12: {'trackID': 12, 'title': 'Achtung Baby', 'number': '1', 'artist': 'U2', 'album': 'Achtung Baby', 'albumID': 2, 'duration': '291'}
When the code exuted the lyrics for the firsts tracks are printed, but then it appears an error:
Traceback (most recent call last):
File "C:/Users/Ozzy/PycharmProjects/getData/getData.py", line 239, in <module>
print(song_lyric(title, artist))
File "C:/Users/Ozzy/PycharmProjects/getData/getData.py", line 72, in song_lyric
lyrics_tracking(tracking_url)
File "C:/Users/Ozzy/PycharmProjects/getData/getData.py", line 79, in lyrics_tracking
request = urllib.request.Request(querystring)
File "C:\Users\Ozzy\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 329, in __init__
self.full_url = url
File "C:\Users\Ozzy\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 355, in full_url
self._parse()
File "C:\Users\Ozzy\AppData\Local\Programs\Python\Python36\lib\urllib\request.py", line 384, in _parse
raise ValueError("unknown url type: %r" % self.full_url)
ValueError: unknown url type: ''
Do you know why this error is appearing?
The methods to get the lyrics from musixmatch are public available:
def song_lyric(song_name, artist_name):
while True:
querystring = apiurl_musixmatch + "matcher.lyrics.get?q_track=" + urllib.parse.quote(
song_name) + "&q_artist=" + urllib.parse.quote(
artist_name) + "&apikey=" + apikey_musixmatch + "&format=json&f_has_lyrics=1"
# matcher.lyrics.get?q_track=sexy%20and%20i%20know%20it&q_artist=lmfao
request = urllib.request.Request(querystring)
# request.add_header("Authorization", "Bearer " + client_access_token)
request.add_header("User-Agent",
"curl/7.9.8 (i686-pc-linux-gnu) libcurl 7.9.8 (OpenSSL 0.9.6b) (ipv6 enabled)") # Must include user agent of some sort, otherwise 403 returned
while True:
try:
response = urllib.request.urlopen(request,
timeout=4) # timeout set to 4 seconds; automatically retries if times out
raw = response.read()
except socket.timeout:
print("Timeout raised and caught")
continue
break
json_obj = json.loads(raw.decode('utf-8'))
body = json_obj["message"]["body"]["lyrics"]["lyrics_body"]
copyright = json_obj["message"]["body"]["lyrics"]["lyrics_copyright"]
tracking_url = json_obj["message"]["body"]["lyrics"]["html_tracking_url"]
#print(tracking_url)
lyrics_tracking(tracking_url)
return (body + "\n\n" + copyright)
def lyrics_tracking(tracking_url):
while True:
querystring = tracking_url
request = urllib.request.Request(querystring)
# request.add_header("Authorization", "Bearer " + client_access_token)
request.add_header("User-Agent",
"curl/7.9.8 (i686-pc-linux-gnu) libcurl 7.9.8 (OpenSSL 0.9.6b) (ipv6 enabled)") # Must include user agent of some sort, otherwise 403 returned
try:
response = urllib.request.urlopen(request,
timeout=4) # timeout set to 4 seconds; automatically retries if times out
raw = response.read()
except socket.timeout:
print("Timeout raised and caught")
continue
break
print(raw)
Full working exemple that reproduces the error:
import requests
import json
import urllib.request, urllib.error, urllib.parse
import socket
apikey_musixmatch = '0b4a363bbd71974c2634837d5b5d1d9a' #generated for the example
apiurl_musixmatch = 'http://api.musixmatch.com/ws/1.1/'
api_key = "b088cbedecd40b35dd89e90f55227ac2" #generated for the example
def song_lyric(song_name, artist_name):
while True:
querystring = apiurl_musixmatch + "matcher.lyrics.get?q_track=" + urllib.parse.quote(
song_name) + "&q_artist=" + urllib.parse.quote(
artist_name) + "&apikey=" + apikey_musixmatch + "&format=json&f_has_lyrics=1"
# matcher.lyrics.get?q_track=sexy%20and%20i%20know%20it&q_artist=lmfao
request = urllib.request.Request(querystring)
# request.add_header("Authorization", "Bearer " + client_access_token)
request.add_header("User-Agent",
"curl/7.9.8 (i686-pc-linux-gnu) libcurl 7.9.8 (OpenSSL 0.9.6b) (ipv6 enabled)") # Must include user agent of some sort, otherwise 403 returned
while True:
try:
response = urllib.request.urlopen(request,
timeout=4) # timeout set to 4 seconds; automatically retries if times out
raw = response.read()
except socket.timeout:
print("Timeout raised and caught")
continue
break
json_obj = json.loads(raw.decode('utf-8'))
body = json_obj["message"]["body"]["lyrics"]["lyrics_body"]
copyright = json_obj["message"]["body"]["lyrics"]["lyrics_copyright"]
tracking_url = json_obj["message"]["body"]["lyrics"]["html_tracking_url"]
print("Tracking_url====================" +tracking_url + "==================================")
lyrics_tracking(tracking_url)
return (body + "\n\n" + copyright)
def lyrics_tracking(tracking_url):
while True:
querystring = tracking_url
request = urllib.request.Request(querystring)
# request.add_header("Authorization", "Bearer " + client_access_token)
request.add_header("User-Agent",
"curl/7.9.8 (i686-pc-linux-gnu) libcurl 7.9.8 (OpenSSL 0.9.6b) (ipv6 enabled)") # Must include user agent of some sort, otherwise 403 returned
try:
response = urllib.request.urlopen(request,
timeout=4) # timeout set to 4 seconds; automatically retries if times out
raw = response.read()
except socket.timeout:
print("Timeout raised and caught")
continue
break
print(raw)
ID = 0
#get top artists from country
artists = {}
for i in range(2, 3):
artists_response = requests.get(
'http://ws.audioscrobbler.com/2.0/?method=geo.gettopartists&country=spain&format=json&page=' + str(i) + '&api_key=' + api_key)
artists_data = artists_response.json()
for artist in artists_data["topartists"]["artist"]:
name = artist["name"]
url = artist["url"]
if ID > 1: continue
artists[ID] = {}
artists[ID]['ID'] = ID
artists[ID]['name'] = name
ID += 1
for i, v in artists.items():
chosen = artists[i]['name'].replace(" ", "+")
artist_response = requests.get(
'http://ws.audioscrobbler.com/2.0/?method=artist.getinfo&format=json&artist=' + chosen + '&api_key=' + api_key)
artist_data = artist_response.json()
# get top albums of the artists
albums = {}
for i, v in artists.items():
chosen = artists[i]['name'].replace(" ", "+")
topalbums_response = requests.get(
'http://ws.audioscrobbler.com/2.0/?method=artist.gettopalbums&format=json&artist=' + chosen + '&api_key=' + api_key + '&limit=5')
albums_data = topalbums_response.json()
for album in albums_data['topalbums']['album']:
name = album["name"]
url = album["url"]
albums[ID] = {}
albums[ID]['ID'] = ID
albums[ID]['artist'] = artists[i]['name']
albums[ID]['artistID'] = artists[i]['ID']
albums[ID]['name'] = name
ID += 1
# Get tracks of the album
tracks = {}
for i, v in albums.items():
artist = albums[i]['artist'].replace(" ", "+")
name = albums[i]['name'].replace(" ", "+")
album_response_data = requests.get(
'http://ws.audioscrobbler.com/2.0/?method=album.getinfo&format=json&api_key=' + api_key + '&artist=' + artist + '&album=' + name)
album_response = album_response_data.json()
for album in album_response['album']['tracks']['track']:
title = album['name']
tracks[ID] = {}
tracks[ID]['trackID'] = ID
tracks[ID]['title'] = title
tracks[ID]['artist'] = albums[i]['artist']
tracks[ID]['album'] = albums[i]['name']
tracks[ID]['albumID'] = albums[i]['ID']
ID += 1
for i, v in tracks.items():
artist = tracks[i]['artist'].replace(" ", "+")
title = tracks[i]['title'].replace(" ", "+")
# print the lyric of each track
print(song_lyric(title, artist))
It seems like url is not correct. It happens here:
tracking_url = json_obj["message"]["body"]["lyrics"]["html_tracking_url"]
If you have ability to run that API locally and see what is returned into tracking_url, you can find out what is still wrong with it.
UPDATE:
I reproduced it, so the urllib.request cannot process empty string URL: "", that is why you need to check if the tracking_url != "" and only if its not empty string or None you need to request for the song.

Python TypeError on executing weather service code

I am using a weather API to design a slack bot service using python.
My source code is-
import requests
import re
import json
from bs4 import BeautifulSoup
def weather(cityname):
cityid = extractid(cityname)
url = "http://api.openweathermap.org/data/2.5/forecast?id=" + str(cityid) + "&APPID=c72f730d08a4ea1d121c8e25da7e4411"
while True:
r = requests.get(url, timeout=5)
while r.status_code is not requests.codes.ok:
r = requests.get(url, timeout=5)
soup = BeautifulSoup(r.text)
data = ("City: " + soup.city["name"] + ", Country: " + soup.country.text + "\nTemperature: " + soup.temperature["value"] +
" Celsius\nWind: " + soup.speed["name"] + ", Direction: " + soup.direction["name"] + "\n\n" + soup.weather["value"])
# print data
return data
def extractid(cname):
with open('/home/sourav/Git-Github/fabulous/fabulous/services/city.list.json') as data_file:
data = json.load(data_file)
for item in data:
if item["name"] == cname:
return item["id"]
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"~weather (.*)", text)
if not match:
return
searchterm = match[0]
return weather(searchterm.encode("utf8"))
on_bot_message = on_message
But executing the code gives the following error-
File "/usr/local/lib/python2.7/dist-packages/fabulous-0.0.1-py2.7.egg/fabulous/services/weather.py", line 19, in weather
" Celsius\nWind: " + soup.speed["name"] + ", Direction: " + soup.direction["name"] + "\n\n" + soup.weather["value"])
TypeError: 'NoneType' object has no attribute '__getitem__'
I can't figure out what's the error. Please help!
__getitem__ is called when you ask for dictionary key like a['abc'] translates to a.__getitem__('abc')
so in this case one attribute of soup is None (speed, direction or weather)
ensure that your r.text contains data you want, simply print it:
print(r.text)
list structure in parsed data:
for child in soup.findChildren():
print child
always assume your entry data might be wrong, instead doing soup.city do soup.find('city'), it might be empty so:
city = soup.find('city')
if len(city):
city_name = city[0]['name']
else:
city_name = 'Error' # or empty, or sth

Arcgis Server Write properties of all services to a CSV file

I have 36 Services running on ArcGIS Server and would like export all properties for each service in a csv file. I managed to write a code by the help of ESRI Helpdesk http://resources.arcgis.com/en/help/main/10.2/index.html#//0154000005wt000000 adding the properties "maxImageHeight" and "maxImageWidth" to the request. However if I run the code it starts to work, writes properties of the first 22 services, but than it stopps suddenly and returns
Traceback (most recent call last):
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 436, in
sys.exit(main(sys.argv[1:]))
File "D:\SCRIPTS\Py-scripts\ArcPy\AGS - ArcPy\AGS_service_report_as_csv2.py", line 201, in main
+ "NA" + "\n"
KeyError: 'maxImageHeight'
It's odd because it delivered allready the "maxImageHeight" property for the first services.
Code:
# Reads the following properties from services and writes them to a comma-delimited file:
# ServiceName, Folder, Type, Status, Min Instances, Max Instances, Max Wainting Time,Max Startup Time,Max Idle Time,Max Usage Time, KML,
# WMS, WFS, WCS, Max Records, Cluster, Cache Directory, Jobs Directory, Output Directory
# For HTTP calls
import httplib, urllib, json
# For system tools
import sys
# For reading passwords without echoing
import getpass
def main(argv=None):
# Ask for admin/publisher user name and password
username = raw_input("Enter user name: ")
password = getpass.getpass("Enter password: ")
# Ask for server name & port
serverName = raw_input("Enter server name: ")
serverPort = 6080
# Get the location and the name of the file to be created
resultFile = raw_input("Output File (get the location and the name of the file to be created): ")
# Get a token
token = getToken(username, password, serverName, serverPort)
# Get the root info
serverURL = "/arcgis/admin/services/"
#serverURL = "/arcgis/manager/services/"
# This request only needs the token and the response formatting parameter
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", serverURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading server information. " + str(data)
return
else:
print "Processed server information successfully. Now processing folders..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
#Store the Folders in a list to loop on
folders = dataObj["folders"]
#Remove the System and Utilities folders
folders.remove("System")
#folders.remove("Utilities")
#Add an entry for the root folder
folders.append("")
#Create the summary file of services
serviceResultFile = open(resultFile,'w')
#serviceResultFile.write("ServiceName,Folder,Type,Status,Min Instances,Max Instances,FeatureService,kml,wms,Max Records,Cluster,Cache Directory,Jobs Directory,Output Directory" + "\n")
serviceResultFile.write("\
ServiceName,\
Folder,\
Type,\
MaxImageHeight,\
MaxImageWidth,\
Status,\
Min Instances,\
Max Instances,\
Max Wainting Time,\
Max Startup Time,\
Max Idle Time,\
Max Usage Time,\
FeatureService,\
kml,\
wms,\
wfs,\
wcs,\
Max Records,\
Cluster,\
Cache Directory,\
Jobs Directory,\
Output Directory" + "\n")
#Loop on the found folders and discover the services and write the service information
for folder in folders:
# Determine if the loop is working on the root folder or not
if folder != "":
folder += "/"
# Build the URL for the current folder
folderURL = "/arcgis/admin/services/" + folder
params = urllib.urlencode({'token': token, 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", folderURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Could not read folder information."
return
else:
data = response.read()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
print "Error when reading folder information. " + str(data)
else:
print "Processed folder information successfully. Now processing services..."
# Deserialize response into Python object
dataObj = json.loads(data)
httpConn.close()
# Loop through each service in the folder
for item in dataObj['services']:
if item["type"] == "GeometryServer":# and folder == "":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
servStatusResponse = httpConn.getresponse()
# Obtain the data from the response
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
elif item["type"] == "SearchServer":# and folder == "":
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ "NA" + ","\
+ "NA" + ","\
+ "NA" + "\n"
# Write the results to the file
serviceResultFile.write(ln)
httpConn.close()
#####MapServer########################################
elif item["type"] == "MapServer":
# Build the Service URL
if folder:
sUrl = "/arcgis/admin/services/%s%s.%s" %(folder,item["serviceName"], item["type"])
else:
sUrl = "/arcgis/admin/services/%s.%s" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", sUrl, params, headers)
# Get the response
servResponse = httpConn.getresponse()
readData = servResponse.read()
jsonOBJ = json.loads(readData)
# Build the Service URL to test the running status
if folder:
statusUrl = "/arcgis/admin/services/%s%s.%s/status" %(folder,item["serviceName"], item["type"])
else:
statusUrl = "/arcgis/admin/services/%s.%s/status" %(item["serviceName"], item["type"])
# Submit the request to the server
httpConn.request("POST", statusUrl, params, headers)
# Get the response
servStatusResponse = httpConn.getresponse()
readData = servStatusResponse.read()
jsonOBJStatus = json.loads(readData)
# Check for Map Cache
isCached = jsonOBJ["properties"]["isCached"]
if isCached == "true":
cacheDir = str(jsonOBJ["properties"]["cacheDir"])
else:
cacheDir = jsonOBJ["properties"]["isCached"]
if len(jsonOBJ["extensions"]) == 0:
# Build the line to write to the output file
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ "FeatServHolder" + ","\
+ "Disabled" + ","\
+ "Disabled" +","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + ","\
+ "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
else:
# Extract the KML properties from the response
kmlProps = [mapKML for mapKML in jsonOBJ["extensions"] if mapKML["typeName"] == 'KmlServer']#.items()[0][1] == 'KmlServer']
# Extract the WMS properties from the response
wmsProps = [mapWMS for mapWMS in jsonOBJ["extensions"] if mapWMS["typeName"] == 'WMSServer']#.items()[0][1] == 'WMSServer']
Extract the WFS properties from the response
wfsProps = [mapWFS for mapWFS in jsonOBJ["extensions"] if mapWFS["typeName"] == 'WFSServer']#.items()[0][1] == 'WFSServer']
Extract the WCS properties from the response
wcsProps = [mapWCS for mapWCS in jsonOBJ["extensions"] if mapWCS["typeName"] == 'WCSServer']#.items()[0][1] == 'WCSServer']
# Extract the FeatureService properties from the response
featServProps = [featServ for featServ in jsonOBJ["extensions"] if featServ["typeName"] == 'FeatureServer']#.items()[0][1] == 'FeatureServer']
if len(featServProps) > 0:
featureStatus = str(featServProps[0]["enabled"])
else:
featureStatus = "NA"
if len(kmlProps) > 0:
kmlStatus = str(kmlProps[0]["enabled"])
else:
kmlStatus = "NA"
if len(wmsProps) > 0:
wmsStatus = str(wmsProps[0]["enabled"])
else:
wmsStatus = "NA"
#MZ#
if len(wfsProps) > 0:
wfsStatus = str(wfsProps[0]["enabled"])
else:
wfsStatus = "NA"
#MZ#
if len(wcsProps) > 0:
wcsStatus = str(wcsProps[0]["enabled"])
else:
wcsStatus = "NA"
ln = str(jsonOBJ["serviceName"]) + ","\
+ folder + ","\
+ str(item["type"]) + ","\
+ str(jsonOBJ["properties"]["maxImageHeight"]) + ","\
+ str(jsonOBJ["properties"]["maxImageWidth"]) + ","\
+ jsonOBJStatus['realTimeState'] + ","\
+ str(jsonOBJ["minInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxInstancesPerNode"]) + ","\
+ str(jsonOBJ["maxWaitTime"]) + ","\
+ str(jsonOBJ["maxStartupTime"]) + ","\
+ str(jsonOBJ["maxIdleTime"]) + ","\
+ str(jsonOBJ["maxUsageTime"]) + ","\
+ featureStatus + ","\
+ kmlStatus + ","\
+ wmsStatus + ","\
+ wfsStatus + ","\
+ wcsStatus + ","\
+ str(jsonOBJ["properties"]["maxRecordCount"]) + ","\
+ str(jsonOBJ["clusterName"]) + ","\
+ cacheDir + "," + "NA" + ","\
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
# Write the results to the file
serviceResultFile.write(ln)
else:
# Close the connection to the current service
httpConn.close()
# Close the file
serviceResultFile.close()
def getToken(username, password, serverName, serverPort):
# Token URL is typically http://server[:port]/arcgis/admin/generateToken
tokenURL = "/arcgis/admin/generateToken"
params = urllib.urlencode({'username': username, 'password': password, 'client': 'requestip', 'f': 'json'})
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# Connect to URL and post parameters
httpConn = httplib.HTTPConnection(serverName, serverPort)
httpConn.request("POST", tokenURL, params, headers)
# Read response
response = httpConn.getresponse()
if (response.status != 200):
httpConn.close()
print "Error while fetching tokens from admin URL. Please check the URL and try again."
return
else:
data = response.read()
httpConn.close()
# Check that data returned is not an error object
if not assertJsonSuccess(data):
return
# Extract the token from it
token = json.loads(data)
return token['token']
# A function that checks that the input JSON object
# is not an error object.
def assertJsonSuccess(data):
obj = json.loads(data)
if 'status' in obj and obj['status'] == "error":
print "Error: JSON object returns an error. " + str(obj)
return False
else:
return True
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))`
I've managed to get past the error by adding bufsize
#Create the summary file of services
bufsize = 0
serviceResultFile = open(resultFile,'w',bufsize)
Now it's complaining about how to handle the end of the file. Still working on this bit
sys.exit(main(sys.argv[1:]))
File "D:/Cognos_Testing/Esri/python/get_mapsrv_Stats.py", line 378, in main
+ str(jsonOBJ["properties"]["outputDir"]) + "\n"
KeyError: 'maxImageHeight'

Categories