Python: String indices must be integers - python

I'm working on some Python code to automate github merge requests.
I found the following code below. When I run this, I get TypeError: string indices must be integers.
I've found several threads on here refrencing this error, but I'm not quit sure how to implement the fixes in the code.
#!/usr/bin/env python
import json
import requests
import datetime
OAUTH_KEY = "xxxxxxxxxxxx"
repos = ['my_app'] # Add all repo's you want to automerged here
ignore_branches = ['master', 'release', 'staging', 'development'] # Add 'master' here if you don't want to automerge into master
# Print merge/no-merge message to logfile
def print_message(merging):
if merging == True:
message = "Merging: "
else:
message = "Not merging: "
print message + str(pr_id) + " - " + user + " wants to merge " + head_ref + " into " + base_ref
# Merge the actual pull request
def merge_pr():
r = requests.put("https://api.github.com/repos/:owner/%s/pulls/%d/merge"%(repo,pr_id,),
data=json.dumps({"commit_message": "Auto_Merge"}),
auth=('token', OAUTH_KEY))
if "merged" in r.json() and r.json()["merged"]==True:
print "Merged: " + r.json()['sha']
else:
print "Failed: " + r.json()['message']
# Main
print datetime.datetime.now()
for repo in repos:
r = requests.get('https://api.github.com/repos/:owner/%s/pulls' % repo, auth=('token', OAUTH_KEY))
data = r.json()
for i in data:
head_ref=i["head"]["ref"]
base_ref=i["base"]["ref"]
user=i["user"]["login"]
pr_id = i["number"]
if base_ref in ignore_branches:
print_message(False)
else:
print_message(True)
merge_pr()

which line of code is showing a problem?
if it is this line:
'else:
message = "Not merging: "
print message + str(pr_id) + " - " + user + " wants to merge " + head_ref + " into " + base_ref'
then try putting this code right below
if merging == True:
message = "Merging: "
:
elif message == False:
message = "Not merging: "
print message + pr_id + " - " + user + " wants to merge " + head_ref + " into " + base_ref ''

Related

How to write Python and shell console outputs to a file in an order?

I am running an impdp statement form inside a Python File and I want to write the print statements in Python script in the logfile, which is being created in 'abccmd' using '>>' append symbol.
Two issues to be resolved:
The print commands are overwriting the logs generated using impdp commands
I want the logs to be in order in which they come in script (All the print commands are coming at top of the log file. Even the commands that are after the impdp statement in the function being called)
Also, I am using dynamic naming system for logfiles that are created.
def import_command_run(DP_WORKLOAD, dp_workload_cmd, imp_loop, vardate):
abccmd = 'impdp admin/DP_PDB_ADMIN_PASSWORD#DP_PDB_FULL_NAME SCHEMAS=ABC >>' + logdir + '/DP_IMP_' + DP_PDB_FULL_NAME[i] + '_' + DP_WORKLOAD + '_' + str(vardate) + '.log 2>&1'
defcmd = 'impdp admin/DP_PDB_ADMIN_PASSWORD#DP_PDB_FULL_NAME SCHEMAS=DEF >>' + logdir + '/DP_IMP_' + DP_PDB_FULL_NAME[i] + '_' + DP_WORKLOAD + '_' + str(vardate) + '.log 2>&1'
# any of the above commands
run_imp_cmd(eval(abccmd))
def run_imp_cmd(cmd):
output = subprocess.Popen([cmd], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout,stderr = output.communicate()
return stdout
All the Print commands used in imp_workload() and the main() methods should come in exact order in the log file.
def imp_workload(DP_WORKLOAD, DP_DURATION_SECONDS, vardate):
imp_loop = 1
while DP_DURATION_SECONDS > 0:
print("------------------------------------------------------------------------------------------------------------------------------------------")
print(" PDB " + DP_PDB_FULL_NAME[i] + " for Workload " + DP_WORKLOAD + ": Import number " + str(imp_loop) + " Starts")
print("------------------------------------------------------------------------------------------------------------------------------------------")
duration = 0
print("\nImport is running for Time loop : " + str(imp_loop))
startImport = datetime.now()
start = time.time()
print("Start Time for the Import is : " + startImport.strftime("%Y-%m-%d %H:%M:%S"))
# Calling the above function here
import_command_run(DP_WORKLOAD, dp_workload_cmd, imp_loop, vardate)
time.sleep(60)
stop = time.time()
endImport = datetime.now()
print("Stop Time for the Import is : " + endImport.strftime("%Y-%m-%d %H:%M:%S"))
duration = stop - start
print("\nTotal Time elapsed for Data Pump Import Time loop " + str(imp_loop) + ": " + str(int(duration/3600)) + " hours " + str(int(((duration/60)%60))) + " minutes " + str(int((duration % 60))) + " seconds\n")
DP_DURATION_SECONDS = DP_DURATION_SECONDS - duration
if DP_DURATION_SECONDS>0:
print("\nData Pump Import will again run for: " + str(int(DP_DURATION_SECONDS)) + " seconds\n")
else:
print("\nDATA Pump Import has ended for the workload: " + DP_WORKLOAD + "\n")
imp_loop = imp_loop + 1
I am trying to use sys.stdout as you can see. But it is overwriting the logfile created by the impdp statement.
if __name__ == "__main__":
vardate = datetime.now().strftime("%d-%b-%Y-%I_%M_%S_%p")
# Running loop through each workload mentioned in the config file
for i in range((len(DP_PDB_FULL_NAME))):
print("\n==========================================================================================================================================")
print("\n Data Pump Workload has Started \n")
print("==========================================================================================================================================")
# Conversion of time form minutes to seconds
DP_DURATION_SECONDS = int(DP_DURATION) * 60
now = datetime.now()
print("Current Date and Time: " + now.strftime("%Y-%m-%d %H:%M:%S"))
print("\nData Pump Import will run for " + DP_DURATION + " minutes\n")
for DP_WORKLOAD in DP_WORKLOAD_NAME:
sys.stdout = open(logdir + '/DP_IMP_' + DP_PDB_FULL_NAME[i] + '_' + DP_WORKLOAD + '_' + str(vardate) + '.log', 'w')
p1 = multiprocessing.Process(target=imp_workload, args=(DP_WORKLOAD, DP_DURATION_SECONDS, vardate, ))
p1.start()
sys.stdout.close()
Please suggest a way to log them in a proper way.
I wonder this may be the result you want, anyway, you can try this one:
python -m abc.py > d:\output.txt
It makes output.txt in d: and it will record all the print results in the order they are called.

Using data from API in subsequent API calls

I should preface this with I am not a programmer and most of this code was not written by me. I unfortunately have a need and am trying to hack my way through this.
What I am trying to do is chain a few API calls together to ultimately get a list of IPs. What this script does is queries the API and pulls (and prints) a list of device IDs. The device IDs look like this:
akdjlfijoaidjfod
g9jkidfjlskdjf44
3jdhfj4hf9dfiiu4
The device IDs then need to be passed as a parameter in the next API call like this:
https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4 and so on.
I dont know where to begin. Instead of printing the asset ids, I assume they should be stored as a parameter (or variable) and then appended to the URL. I tried doing that with "ID_LIST" but that didnt seem to work. Can you guys point me in the right direction?
import requests
import json
# Define API REST paths
BASE_URL = "https://api.example.com/"
OAUTH_URL_PART = "oauth2/token"
DEVICE_SEARCH = "devices/queries/devices/v1"
DEVICE_DETAILS = "devices/entities/devices/v1"
# Empty auth token to hold value for subsequent request
auth_Token = ""
# Section 1 - Authenticate to Example OAUTH
# Build a dictionary to hold the headers
headers = {
'Content-type': 'application/x-www-form-urlencoded',
'accept': 'application/json'
}
# Build a dictionary to holds the authentication data to be posted to get a token
auth_creds = {}
auth_creds['client_id'] = "<client_id>"
auth_creds['client_secret'] = "<client_secret>"
auth_creds['grant_type'] = "client_credentials"
# Call the API to get a Authentication token - NOTE the authentication creds
print("Requesting token from " + BASE_URL + OAUTH_URL_PART)
auth_response = requests.post(BASE_URL + OAUTH_URL_PART,data=auth_creds, headers=headers)
# Check if successful
if auth_response.status_code != 201:
# Output debug information
print("\n Return Code: " + str(auth_response.status_code) + " " + auth_response.reason)
print("Path: " + auth_response.request.path_url)
print("Headers: ")
print(auth_response.request.headers)
print("Body: " + auth_response.request.body)
print("\n")
print("Trace_ID: " + auth_response.json()['meta']['trace_id'])
else:
# Section 2 - Capture OAUTH token and store in headers for later use
print("Token Created")
# Capture the auth token for reuse in subsequent calls, by pulling it from the response
# Note this token can be reused multiple times until it expires after 30 mins
auth_Token = auth_response.json()['access_token']
headers = {
'authorization':'bearer ' + auth_Token,
'accept': 'application/json'
}
# Section 3 - Reuse authentication token to call other Example OAUTH2 APIs
# Build parameter dictionary
call_params = {}
call_params['offset'] ="" # Non-mandatory param
call_params['limit'] ="5000" # The number of results
call_params['sort'] ="" #
call_params['filter'] ="" # To exclude devices
# Call devices API
print("Searching Asset ID by getting from " + BASE_URL + DEVICE_SEARCH)
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
#DEVICE_DETAILS_response = request.get(BASE_URL + DEVICE_DETAILS,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
###########Part that is not working###########
DEVICE_DETAILS_response = requests.get(BASE_URL + DEVICE_DETAILS,headers=headers)
#ID_LIST = str(len(result['resources']).replace(",", "&ids=")
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)
Hi to convert the strings in result['resources']:
['akdjlfijoaidjfod',
'g9jkidfjlskdjf44',
'3jdhfj4hf9dfiiu4']
to : https://api.example.com/devices/entities/devices/v1?ids=akdjlfijoaidjfod&ids=g9jkidfjlskdjf44&ids=3jdhfj4hf9dfiiu4
try this funciton:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
print(get_modified_url(result['resources'], BASE_URL + DEVICE_DETAILS ))
full code would be:
def get_modified_url(mylist, myurl):
url = myurl + '?'
for idx, b in enumerate(mylist): # enumerate list to get index and element in the list
if idx > 0:
url += '&ids=' + b # append &ids= to url if not first device id
else:
url += 'ids=' + b # append ids= to url if first device id
return url
device_list = []
DEVICE_search_response = requests.get(BASE_URL + DEVICE_SEARCH,params=call_params,headers=headers)
# Check for errors
if DEVICE_search_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_search_response.status_code) + " " + DEVICE_search_response.reason)
print("Path: " + DEVICE_search_response.request.path_url)
print("Headers: ")
print(DEVICE_search_response.request.headers)
print("Body: " + DEVICE_search_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_search_response.json()['meta']['trace_id'])
else:
# Iterate the results and print
result = DEVICE_search_response.json()
print("DEVICE found on " + str(len(result['resources'])) + " the following device id:")
for devices in result['resources']:
print(devices)
device_list.append(devices)
new_url = get_modified_url(device_list, BASE_URL + DEVICE_DETAILS )
DEVICE_DETAILS_response = requests.get(new_url, headers=headers)
if DEVICE_DETAILS_response.status_code != 200:
# Output debug information
print("\n Return Code: " + str(DEVICE_DETAILS_response.status_code) + " " + DEVICE_DETAILS_response.reason)
print("Path: " + DEVICE_DETAILS_response.request.path_url)
print("Headers: ")
print(DEVICE_DETAILS_response.request.headers)
print("Body: " + DEVICE_DETAILS_response.request.body)
print("\n")
print("Trace_ID: " + DEVICE_DETAILS_response.json()['meta']['trace_id'])
else:
result = DEVICE_DETAILS_response.json()
print("Device Details Found")
for details in result['resources']:
print(details)

using a variable from a json.load in another function

I am using an API for receiving TTN data from a device. i have created a function "def on_message(mqttc, obj, msg):", using a json.loads(msg.payload.decode('utf-8')) to receive mqtt data.
I want to pick the variable "node_data" and use in def devdata(). but it seems that i do not get anything but None.
import paho.mqtt.client as mqtt
import json
import pybase64
import binascii
APPEUI = "0018B24441524632"
APPID = "adeunis_fieldtester"
PSW = "ttn-account-v2.vuQczD1bmPoghhaKjlIHR-iHovHIbYMpfWSKosPAGaU"
# Call back functions
# gives connection message
def on_connect(mqttc, mosq, obj, rc):
print("Connected with result code:" + str(rc))
# subscribe for all devices of user
mqttc.subscribe('+/devices/+/up')
# gives message from device
def on_message(mqttc, obj, msg):
try:
x = json.loads(msg.payload.decode('utf-8'))
# metadata
app = x["app_id"]
device = x["dev_id"]
deveui = x["hardware_serial"]
port = x["port"]
confirmed = x["confirmed"]
counter = x["counter"]
payload_fields = x["payload_raw"]
datetime = x["metadata"]["time"]
gateways = x["metadata"]["gateways"]
frequency = x["metadata"]["frequency"]
modulation = x["metadata"]["modulation"]
data_rate = x["metadata"]["data_rate"]
air_time = x["metadata"]["airtime"]
coding_rate = x["metadata"]["coding_rate"]
for gw in gateways:
gateway_id = gw["gtw_id"]
timestamp = gw["timestamp"]
time = gw["time"]
channel = gw["channel"]
gwrssi = gw["rssi"]
gwsnr = gw["snr"]
# decoding the payload_field
payload_r = (pybase64.b64decode(payload_fields + "="))
# decoding the Payload_r to Byte-Ascii string
payload_h = binascii.hexlify(payload_r)
# Byte to tekst
node_data = (payload_h.decode("utf-8)")) #this is the variable i would like to use in devdata()
# Printing data, + str(payload_fields) + ", "
print(str(app) + ", " + str(device) + ", " + str(deveui) + ", " + str(port) + ", " + str(counter) + ", "
+ str(node_data) + ", " + str(modulation) + ", " + str(datetime) + ", " + str(frequency) + ", "
+ str(confirmed) + ", " + str(data_rate) + ", " + str(air_time) + ", " + str(coding_rate) + ", "
+ str(gateway_id) + ", " + str(timestamp) + ", " + str(time) + "," + str(channel) + ", "
+ str(gwrssi) + ", " + str(gwsnr))
return node_data #return data for use in devdata()
except Exception as e:
print(e)
pass
def devdata():
node_data = on_message() # trying to use this as the Variable
temperatur = int(node_data[2:4], 16)
print("temperatur =", temperatur)
`

TypeError: isfile() missing 1 required positional argument : 'remotepath'

with pysftp.Connection(ipaddr, username="uname", password="pass", cnopts=cnopts) as sftp:
sftp.put(uploc + ufile, "/home/pi/PIFTP/dloads/" + ufile)
checkfile = ("/home/pi/PIFTP/dloads/" + ufile)
chfile = pysftp.Connection.isfile(checkfile)
if chfile == True:
print (Style.BRIGHT + "[" + Fore.GREEN + "OK" + Fore.WHITE + "] ")
else:
print (Style.BRIGHT + Fore.RED + ipaddr + " is unacsessible")
As you can see I'm trying to check a file that is just uploaded. In this case "/home/pi/PIFTP/dloads/" + ufile is file's remote download path. What am I missing? Thanks.
Also file arrives before error.
You'll need to use the Connection instance sftp you have, not the class.
chfile = sftp.isfile(checkfile)

broken paths in Arc 10.2 unable to save

I am attempting to "replaceDataSource" with our new sde path. I am using v2.7 in Arc10.2. We have multiple direct connect path names and one service connect that has changed servers from using Oracle to Sql Server. My code works all the way through the print statements but then I get the error msg "unable to saveACopy, check my privileges". Please let me know if I'm on the right track with putting all of the various connect names into a list and then iterating through them the way I have written. Also, I have attempted every indent on mxd.saveACopy and del mxd but nothing after two weeks has seemed to work so I thought I'd finally ask for some geo geek wisdom!
Code:
import arcpy, os, glob
arcpy.env.workspace = "C:\Users\kmetivier\Documents\BrokenPaths\Folder5"
for root, subFolders, files in >os.walk(r"C:\Users\kmetivier\Documents\BrokenPaths\Folder5"):
for filename in files:
fullpath = os.path.join(root, filename)
basename, extension = os.path.splitext(fullpath)
if extension.lower() == ".mxd":
print "------------------------------"
print filename
#open the map document
mxd = arcpy.mapping.MapDocument(fullpath)
#get all the layers
for lyr in arcpy.mapping.ListLayers(mxd):
#get the source from the layer
if lyr.supports("datasource"):
pathList = ["Database Connections\PWDB.arvada.org.sde","Database >Connections\GIS - PWDB.sde","Database Connections\PROD - GIS.sde","Database Connections\DC >- PROD - GIS.sde","Database Connections\GIS to SDE1.sde"]
print "%s -> %s" % (lyr, pathList[0])
basename, extension = os.path.splitext (pathList[0])
if extension.lower() == ".sde":
#NEW SOURCE
datapath = r"Database Connections\GEODATA - GIS.sde"
#overwrite the old path
lyr.replaceDataSource(datapath, "SDE_WORKSPACE", "")
print "replaced " + pathList[0] + " with " + datapath
print "---------finished " + filename
mxd.saveACopy(filename +"_2")
del mxd
for lyr in arcpy.mapping.ListLayers(mxd):
if lyr.supports("SERVICEPROPERTIES"):
pathList1= r"(PWDB.arvada.org, 5151, {sde1}, {Database_authentication}, >{GDS}, {""}, {save_username_password}, {version}, {save_version_info}"
servProp = lyr.serviceProperties
print "Layer name:" + lyr.name
print "-----------------------------------------------------"
if lyr.serviceProperties["ServiceType"] != "SDE":
print "Service Type: " + servProp.get('ServiceType', 'N/A')
print " URL: " + servProp.get('URL', 'N/A')
print " Connection: " + servProp.get('Connection', 'N/A')
print " Server: " + servProp.get('Server', 'N/A')
print " Cache: " + str(servProp.get('Cache', 'N/A'))
print " User Name: " + servProp.get('UserName', 'N/A')
print " Password: " + servProp.get('Password', 'N/A')
print ""
if extension.lower() == ".sde":
#This is the NEW SOURCE that you want to point to
datapath1 = r"Database Connections\GEODATA - GIS.sde"
#replace the old path wih the new
lyr.replaceDataSource(pathList1, "SDE_WORKSPACE", "")
print "replaced " + pathList1 + " with " + datapath1
print "finished " + filename
mxd.saveACopy(filename +"_2")
else:
print "Service Type: " + servProp.get('ServiceType', 'N/A')
print " Database: " + servProp.get('Database', 'N/A')
print " Server: " + servProp.get('Server', 'N/A')
print " Service: " + servProp.get('Instance', 'N/A')
print " Version: " + servProp.get('Version', 'N/A')
print " User name: " + servProp.get('UserName', 'N/A')
print " Authentication: " + servProp.get('AuthenticationMode', >'N/A')
print ""
del mxd>>

Categories