I'm trying to make a bitbucket request from python to return a list of my repos. It's responding, but the formatting is including "\n" characters, I assume my encoding is wrong, but don't know how to fix it.
How do I encode my response to be formatted JSON.
theurl = 'https://api.bitbucket.org/1.0/user/repositories/';
username = 'xxxxx';
password = 'xxxxx';
passman = urllib2.HTTPPasswordMgrWithDefaultRealm();
passman.add_password(None, theurl, username, password);
authhandler = urllib2.HTTPBasicAuthHandler(passman);
opener = urllib2.build_opener(authhandler);
urllib2.install_opener(opener);
pagehandle = urllib2.urlopen(theurl);
output = pagehandle.decode('utf-8');
responseH = output.read();
Why don't you try using python-bitbucket? Example below
from api import API
import datetime
api = API("username", "**password**")
repos = api.get_repositories()
for repo in repos:
print "Name: %s" % repo.name
print "Owner: %s" % repo.owner
print "Website: %s" % repo.website
print "Description: %s" % repo.description
print "Created on: %s" % datetime.datetime.strftime(repo.created_on, "%c")
print "Language: %s" % repo.language
print "SCM: %s" % repo.scm
for issue in repo.get_issues():
# Yes, this works too!
print "Issue title: %s" % issue.title
print "Issue priority: %s" % issue.priority
print "Issue content:\n%s\n\n" % issue.content
for change in repo.get_changesets(limit=5):
print "Revision/Node: %d:%s" % (change.revision, change.node)
# Since change.timestamp is a datetime object, we can use formatting on it.
print "Timestamp: %s" % datetime.datetime.strftime(change.timestamp, "%c")
print "Commit message:\n%s" % change.message
print "Affected files: %s" % len(change.files)
for f in change.files:
print f.filename
print "\n"
Related
I'm calling a REST API with basic authentication in an AWS Lambda function. This is my code
import json, os, base64
from urllib import request
def lambda_handler(event, context):
retStatusCode = 0
retBody = ""
try:
url = os.environ['URL']
username = os.environ['USERNAME']
password = os.environ['PASSWORD']
requestURL = url + "/" + event['queueID'] + "/" + event['cli'];
#print ("QUEUEID IS: " + event['queueID'])
#print ("CLI IS: " + event['cli'])
#print ("URL IS: " + requestURL)
req = request.Request(requestURL, method="POST")
myStr = '%s:%s' % (username, password)
myBytes = myStr.encode("utf-8")
base64string = base64.b64encode(myBytes)
req.add_header("Authorization", "Basic %s" % base64string)
resp = request.urlopen(req)
responseJSON = json.load(resp)
retStatusCode = responseJSON["Result"]
retBody = responseJSON["Message"]
except Exception as e:
retStatusCode = 500
retBody = "An exception occurred: " + str(e)
return {
'statusCode': retStatusCode,
'body': retBody
}
However, I'm getting a "HTTP Error 401: Unauthorized" returned. If I call the API method in Postman with the same credentials, it returns data successfully, so I figure it must be something to do with the format of the header I'm adding, but just can't see what's wrong.
The problem is in this line:
req.add_header("Authorization", "Basic %s" % base64string)
From the documentation, base64.b64encode method is designed to "return the encoded bytes".
If you try to execute this code in REPL, you'll see that your resulting header looks wrong. It concatenates string with bytes:
>>> "Basic %s" % base64string
"Basic b'aGVsbG86d29ybGQ='"
You can read more about Python's b' syntax here.
So you need to decode the string back to utf8.
req.add_header("Authorization", "Basic %s" % base64string.decode('utf-8'))
The result will look a like a valid Auth header now:
>>> "Basic %s" % base64string.decode('utf-8')
'Basic aGVsbG86d29ybGQ='
I am using the pyzabbix module to query some data through the Zabbix API. The script is reading through a text file that I specify using the -f switch and is supposed to return each line with data or that the host does not exist but when I run it, it only returns the last line of the text file.
Example data file would be:
server1
server2
And it would only return:
host server2 exist, hostid : 4517, group: [u'Servergroup1', u'Servergroup2']
My code is:
import optparse
import sys
import traceback
from getpass import getpass
from core import ZabbixAPI
def get_options():
usage = "usage: %prog [options]"
OptionParser = optparse.OptionParser
parser = OptionParser(usage)
parser.add_option("-s","--server",action="store",type="string",\
dest="server",help="(REQUIRED)Zabbix Server URL.")
parser.add_option("-u", "--username", action="store", type="string",\
dest="username",help="(REQUIRED)Username (Will prompt if not given).")
parser.add_option("-p", "--password", action="store", type="string",\
dest="password",help="(REQUIRED)Password (Will prompt if not given).")
parser.add_option("-H","--hostname",action="store",type="string",\
dest="hostname",help="(REQUIRED)hostname for hosts.")
parser.add_option("-f","--file",dest="filename",\
metavar="FILE",help="Load values from input file. Specify - for standard input Each line of file contains whitespace delimited: <hostname>")
options,args = parser.parse_args()
if not options.server:
options.server = raw_input('server http:')
if not options.username:
options.username = raw_input('Username:')
if not options.password:
options.password = getpass()
return options, args
def errmsg(msg):
sys.stderr.write(msg + "\n")
sys.exit(-1)
if __name__ == "__main__":
options, args = get_options()
zapi = ZabbixAPI(options.server,options.username, options.password)
hostname = options.hostname
file = options.filename
if file:
with open(file,"r") as f:
host_list = f.readlines()
for hostname in host_list:
hostname = hostname.rstrip()
try:
hostinfo = zapi.host.get({"filter":{"host":hostname},"output":"hostid", "selectGroups": "extend", "selectParentTemplates": ["templateid","name"]})[0]
hostid = hostinfo["hostid"]
host_group_list = []
host_template_list = []
for l in hostinfo["groups"]:
host_group_list.append(l["name"])
for t in hostinfo["parentTemplates"]:
host_template_list.append(t["name"])
#print "host %s exist, hostid : %s, group: %s, template: %s " % (hostname, hostid, host_group_list, host_template_list)
print "host %s exist, hostid : %s, group: %s" % (hostname, hostid, host_group_list)
except:
print "host not exist: %s" %hostname
else:
try:
hostinfo = zapi.host.get({"filter":{"host":hostname},"output":"hostid", "selectGroups": "extend", "selectParentTemplates": ["templateid","name"]})[0]
hostid = hostinfo["hostid"]
host_group_list = []
host_template_list = []
for l in hostinfo["groups"]:
host_group_list.append(l["name"])
for t in hostinfo["parentTemplates"]:
host_template_list.append(t["name"])
print "host %s exist, hostid : %s, group: %s, template: %s " % (hostname, hostid, host_group_list, host_template_list)
except:
print "host not exist: %s" %hostname
Your try block has incorrect indentation level.
Instead of
for hostname in host_list:
hostname = hostname.rstrip()
try:
...
except:
print "host not exist: %s" %hostname
It should be
for hostname in host_list:
hostname = hostname.rstrip()
try:
...
except:
print "host not exist: %s" %hostname
Your try block, which I think you want executed for each hostname, is not inside your for loop. So it is only executed after the for loop completes, at which point you have the hostname from the last line of the file.
Simplifying your code should make the problem easier to see:
for hostname in host_list:
hostname = hostname.rstrip()
try:
do_stuff(hostname)
except:
print "host not exist: %s" %hostname
You're only doing the stuff in the try block once, with the last hostname that you found in the list.
To fix this, make a list of hostnames and then iterate over that list, performing your computations for each hostname
You can prevent this sort of issue by simplifying your code, in this case by extracting the procedure for processing a hostname, to a well-named function (ie, not do_stuff :) ). This will make the overall structure of the loop more readable, and will make the execution flow more obvious.
I am using the following function to upload mp4 file to ftp
def run(self):
while True:
status = "Failed"
#logger.debug("Queue size: %s" %(str(self.queue.qsize())))
(env, pfile) = self.queue.get()
logger.debug("Thread %s Received %s and %s" %(str(self.threadID), pfile, env))
metaName = "%s/%s.evs.xml" %(self.config[env]["upload_folder"], pfile)
mediaName = "%s/%s.mp4" %(self.config[env]["upload_folder"], pfile)
mediaName2 = "%s.mp4" %(pfile,)
logger.info("Thread %s - Uploading metadata to %s FTP: %s" %(str(self.threadID), env, metaName))
try:
ur = uploadFTPMP(metaName, env)
status = "Success"
except:
logger.debug("Thread %s - Uploading of metadata %s to %s failed" %(str(self.threadID), metaName, env))
uploadComplete(pfile, status, env)
if status == "Success":
logger.info("Thread %s - Sleeping to allow cms to pick up xml" %(str(self.threadID)))
time.sleep(90)
logger.info( "Thread %s - Uploading mediafile %s to %s" %(str(self.threadID), mediaName, env))
ur = uploadFTP(mediaName, env)
status = "Success"
filename = os.path.basename(mediaName)
http_client.HTTPConnection.debuglevel = 1
logging.basicConfig(level=logging.INFO)
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.INFO)
requests_log.propagate = True
url = 'http://www.webdev.com/web/home/testPOST'
headers = {'content-type': 'application/json'}
payload = {'mediaName': mediaName2 ,'status': 'sucsses'}
r = requests.post(url, data=json.dumps(payload), headers=headers)
r.text
r.status_code
r.connection.close()
uploadComplete(pfile, status, env)
logger.info("Thread %s - Upload completed with status %s" %(str(self.threadID), status))
self.queue.task_done()
I'm trying to modify the mp4 file before up, time stamp to end of the file so the file name will be video-%H%M%S.mp4 then it will be uploaded.
Then, when I perform the post request, as you can see, it sends 2 parameters now, I need to add 3rd parameter which is the new name with time stamp. Any tips to achieve this logic?
Here is my ftp function also.
def uploadFTPMP(filepath, env):
global config
ftpsrv = config[env]["active"]
ftpusr = config[env]["ftpuser"]
ftppwd = config[env]["ftppass"]
filename = os.path.basename(filepath)
try:
ftp = ftplib.FTP(ftpsrv)
ftp.login(ftpusr, ftppwd)
except:
logger.error("Ftp connection error has occurred")
raise
else:
f = open(filepath, "r")
cmd = "STOR %s" %(filename)
out = ftp.storbinary(cmd, f)
f.close()
ftp.quit()
return out
You should pass the datetime to the filename.
import datetime
medianame = 'somefile_{timestamp}.mp4'.format(
timestamp=datetime.datetime.now().isoformat()
)
The datetime can also be formatted as you require e.g. datetime.datetime.now().strftime('%Y-%m-%d_%H%M'). The strftime() documentation has a list of accepted formatting directives that can be used.
I'm trying to make a generic python script to retrieve an arbitrary number of lists through the mailchimp api. However, the "pagination" feature isn't working. There are sixteen lists in my account and whatever value I try for offset=n&count=n, I get only those 16 records. Here is part of my code that fetches the lists:
#FETCH ALL REPORTS
baseurl = "https://" + dc + ".api.mailchimp.com/3.0/"
request = urllib2.Request(baseurl + "reports/")
base64string = base64.encodestring('%s:%s' % (username, key)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
try:
output = urllib2.urlopen(request).read()
data = json.loads(output)
except:
print "Error occurred. Make sure you entered the correct api key"
exit()
createfile("allcampaigns.json", output)
psize, i = 10, 0
while(True):
list_url = baseurl + "lists" + '?offset=' + str(psize * i) + '&count=' + str(psize)
#list_url = baseurl + "lists+ '?offset=' + str(psize * i) + '&count=' + str(psize)
print list_url
request = urllib2.Request(list_url)
base64string = base64.encodestring('%s:%s' % (username, key)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
output = urllib2.urlopen(request).read()
createfile("alllists.json", output)
#print output
data.update(json.loads(output))
cnt = len(data)
print cnt, " lists retrieved."
if cnt<psize: break #cnt could also be zero if no records are returned
i += 1
The issue turned out to be in my code only, there was nothing wrong with the mailchimp api, of course. On the returned json object, I was counting len(data) instead of the len(data.lists) object. Fixed it and started working!
While reading through the code samples on Youtube API's in python, I encountered that line of code:
print 'Video category: %s' % entry.media.category[[]0].text
(https://developers.google.com/youtube/1.0/developers_guide_python, in Video entry contents section)
What does [[]0] mean? Or is it syntactically incorrect?
It is definitely the mistype.
The correct piece of their api would be print 'Video category: %s' % entry.media.category[0].text:
def PrintEntryDetails(entry):
print 'Video title: %s' % entry.media.title.text
print 'Video published on: %s ' % entry.published.text
print 'Video description: %s' % entry.media.description.text
print 'Video category: %s' % entry.media.category[0].text
print 'Video tags: %s' % entry.media.keywords.text
print 'Video watch page: %s' % entry.media.player.url
print 'Video flash player URL: %s' % entry.GetSwfUrl()
print 'Video duration: %s' % entry.media.duration.seconds
Also, - https://code.google.com/p/gdata-issues/issues/detail?id=3710 have a look at this issue.
The answer from the support there - "Thanks for the report! Looks like there are a few instances of this in our older docs. We'll look into it."