Evernote Python API - Hitting rate limits - python

I've written a short piece of code that will append the tag names of my notes to the title, then remove all associated tags. When I try to run this on production, I hit the rate limit real quickly. Can someone help me optimise this piece of code? Or should I request for a special rate limit with Evernote?
Also, I get an error when a note has no tags. Any way to efficiently get the number of tags from a note so I don't get the error?
from evernote.api.client import EvernoteClient
from evernote.edam.notestore import NoteStore
dev_token = "dev_token"
client = EvernoteClient(token=dev_token, sandbox = False)
userStore = client.get_user_store()
user = userStore.getUser()
print
print user.username
print
noteStore = client.get_note_store()
notebooks = noteStore.listNotebooks()
for n in notebooks:
print "Notebook = " + n.name + " GUID = " + n.guid
filter = NoteStore.NoteFilter()
filter.ascending = False
filter.notebookGuid=n.guid
spec = NoteStore.NotesMetadataResultSpec()
spec.includeTitle = True
spec.includeNotebookGuid = True
spec.includeTagGuids = True
notesMetadataList = noteStore.findNotesMetadata(filter, 0, 25, spec)
for noteMetadata in notesMetadataList.notes:
print "%s :: %s" % (noteMetadata.title, noteMetadata.guid)
newNoteTitle = noteMetadata.title + " -- "
for tagGuid in noteMetadata.tagGuids:
tag = noteStore.getTag(tagGuid)
tagName = tag.name
print tagName
newNoteTitle = newNoteTitle + " " + tagName
print "newNoteTitle = " + newNoteTitle
noteMetadata.title = newNoteTitle
noteMetadata.tagGuids = []
noteMetadata = noteStore.updateNote(noteMetadata)
print noteMetadata.title

Here's how I deal with rate limiting, by wrapping the EvernoteClient in a rate limiting proxy (based on http://code.activestate.com/recipes/496741-object-proxying/)
from time import sleep
from evernote.api.client import EvernoteClient
from evernote.edam.error.ttypes import (EDAMSystemException, EDAMErrorCode)
def evernote_wait_try_again(f):
"""
Wait until mandated wait and try again
http://dev.evernote.com/doc/articles/rate_limits.php
"""
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except EDAMSystemException as e:
if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:
print("rate limit: {0} s. wait".format(e.rateLimitDuration))
sleep(e.rateLimitDuration)
print("wait over")
return f(*args, **kwargs)
return f2
class RateLimitingEvernoteProxy(object):
__slots__ = ["_obj"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return evernote_wait_try_again(
getattr(object.__getattribute__(self, "_obj"), name))
_client = EvernoteClient(token=auth_token, sandbox=sandbox)
client = RateLimitingEvernoteProxy(_client)

Related

Python access the Login directly without using With Function as Key :

Here is the complete code where i am trying to use ForexConnect().get_history(.... instead of fx.get_history( and i do not want this line of code "with ForexConnect() as fx:" how to achieve this ,the last section of code given produces excpetion issues .
Why i do not want to use "with ForexConnect() as fx:" The reason is once the session "with ForexConnect() as fx:" the function is logged out .My idea is to be in the session after once logged in .So i do not want to try this with "with ForexConnect() as fx:"
import argparse
import pandas as pd
from forexconnect import ForexConnect, fxcorepy
import common_samples
parser = False
def parse_args():
parser = argparse.ArgumentParser(description='Process command parameters.')
common_samples.add_main_arguments(parser)
common_samples.add_instrument_timeframe_arguments(parser)
common_samples.add_date_arguments(parser)
common_samples.add_max_bars_arguments(parser)
args = parser.parse_args()
return args
def main():
if parser == False :
#args = parse_args()
str_user_id = 'Dxxxx'
str_password = 'xxxxx'
str_url = "http://www.fxcorporate.com/Hosts.jsp"
str_connection = 'Demo'
str_session_id = 'None'
str_pin = 'None'
str_instrument = 'USOil'
str_timeframe = 'W1'
quotes_count = 5
date_from = None
date_to = None
else :
args = parse_args()
str_user_id = args.l
str_password = args.p
str_url = args.u
str_connection = args.c
str_session_id = args.session
str_pin = args.pin
str_instrument = args.i
str_timeframe = args.timeframe
quotes_count = args.quotescount
date_from = args.datefrom
date_to = args.dateto
with ForexConnect() as fx:
try:
fx.login(str_user_id, str_password, str_url,
str_connection, str_session_id, str_pin,
common_samples.session_status_changed)
print("")
print("Requesting a price history...")
print(str_instrument,str_timeframe,date_from,date_to,quotes_count)
history = fx.get_history(str_instrument, str_timeframe, date_from, date_to, quotes_count)
current_unit, _ = ForexConnect.parse_timeframe(str_timeframe)
date_format = '%d.%m.%Y %H:%M:%S'
print("print history ",history)
df = pd.DataFrame(history, columns=['Date', 'BidOpen', 'BidHigh','BidLow', 'BidClose', 'AskOpen', 'AskHigh', 'AskLow', 'AskClose', 'Volume'])
print(df)
if current_unit == fxcorepy.O2GTimeFrameUnit.TICK:
print("Date, Bid, Ask")
print(history.dtype.names)
for row in history:
print("{0:s}, {1:,.5f}, {2:,.5f}".format(
pd.to_datetime(str(row['Date'])).strftime(date_format), row['Bid'], row['Ask']))
else:
print("Date, BidOpen, BidHigh, BidLow, BidClose, Volume")
for row in history:
print("{0:s}, {1:,.5f}, {2:,.5f}, {3:,.5f}, {4:,.5f}, {5:d}".format(
pd.to_datetime(str(row['Date'])).strftime(date_format), row['BidOpen'], row['BidHigh'],
row['BidLow'], row['BidClose'], row['Volume']))
except Exception as e:
common_samples.print_exception(e)
try:
fx.logout()
except Exception as e:
common_samples.print_exception(e)
if __name__ == "__main__":
main()
print("")
input("Done! Press enter key to exit\n")
Here i want to login once and be in the logged in session forever.
With the below function this is working fine .But here the problem is once the With section is over the session is disconnected.
with ForexConnect() as fx:
try:
fx.login(str_user_id, str_password, str_url,
str_connection, str_session_id, str_pin,
common_samples.session_status_changed)
history = fx.get_history(str_instrument, str_timeframe, date_from, date_to, quotes_count)
current_unit, _ = ForexConnect.parse_timeframe(str_timeframe)
To stay in the session tried the below code without using the "With" and as :
Here the login is successful but could not get the data in history = ForexConnect().get_history
Error Code :
ForexConnect().login(str_user_id, str_password, str_url,
str_connection, str_session_id, str_pin,
common_samples.session_status_changed)
**history = ForexConnect().get_history(str_instrument, str_timeframe, date_from, date_to, quotes_count)**
How to make it the ** ** code work without any exception error without using With --- as : keyworkds.
what is the alternative for this with and as : why when i try to access as history = ForexConnect().get_history ( ... it is giving error how to overcome this issue.

Flask Python: Unable to get parameters using request.args

I want to request some parameters from a external web app. I create an API with flask and query data from MySQL. I able to query the data if I gave a fix input but not when using request.args. I try both request.args['name'] and request.args.get('name') but return the output of Exception path.
Below is my current code. I comment out the fix input I used.
from flask import Flask,jsonify,abort,make_response,request,render_template
import MySQLdb
import MySQLdb.cursors
#app.route('/KLSE/search', methods=['GET'])
def KLSEsearch():
db = MySQLdb.connect(host='vinus.mysql.pythonanywhere-services.com',user='vinus',passwd='Vindex2016',db='vinus$default',cursorclass=MySQLdb.cursors.DictCursor)
curs = db.cursor()
#name ='P'
#macd = 'H'
#volumeMin = '_'
#volumeMax = '_'
#stoch ='H1'
#bollinger ='H'
#rsi ='H1'
#atr ='LV'
#trade = 'HOLD'
#limit = 3
#offSet = 1
name = request.args.get('name')
volumeMin = request.args['volumeMin']
volumeMax = request.args['volumeMax']
macd = request.args['macd']
stoch = request.args['stoch']
bollinger = request.args['bollinger']
rsi = request.args['rsi']
atr = request.args['atr']
trade = request.args['trade']
limit = request.args['limit']
offSet = request.args['offSet']
query0 = "SELECT * FROM KLSE WHERE Stock LIKE '%s' AND"
#query1 = "(Vindex BETWEEN (IF(%s='_',-5000,%s)) AND (IF(%s='_',5000,%s))) AND "
query2 = "(Volume_changes_pc BETWEEN (IF (%s='_',-5000,%s)) AND (IF(%s='_',5000,%s))) AND "
query3 = "MACD LIKE %s AND "
query4 = "STOCH LIKE %s AND "
query5 = "BOLLINGER LIKE %s AND "
query6 = "RSI LIKE %s AND "
query7 = "ATR LIKE %s AND "
query8 = "TRADE LIKE %s LIMIT %s OFFSET %s"
query = query0+query2+query3+query4+query5+query6+query7+query8
input = name+"%",volumeMin,volumeMin,volumeMax,volumeMax,macd,stoch,bollinger,rsi,atr,trade,limit,offSet
try:
curs.execute(query,(input))
g = curs.fetchall()
except Exception:
return 'Error: unable to fetch items'
#return "hihi"
return jsonify({'Stock': g})
The output with fix value as below. I think it shows the query to MySQL is correct.
http://vinus.pythonanywhere.com/KLSE/search1
For the user input value, which use the args,
http://vinus.pythonanywhere.com/KLSE/search?atr=%&bollinger=%&macd=%&name=M&rsi=%&stoch=%&volumeMax=&volumeMin=&trade=HOLD&limit=5&offSet=1
What is the right way, get the parameters? volumeMin,volumeMax,limit and offSet are in float and integers.
You have to serialize your data first.
def serialize():
return {
"id" : g.id,
"volumeMin" : g.name,
"volumeMax" : g.address,
"macd" : g.city,
"stoch" : g.state,
"zipCode" : g.zipCode,
"bollinger" : g.bollinger,
}
#app.route("/KLSE/search/.json")
def stock_json():
query = your.db.query()
return jsonify(Stock=[i.serialize for i in query])

Scrapy upload file

I am making a form request to a website using scrapy. The form requires to upload a pdf file, How can we do it in Scrapy. I am trying this like -
FormRequest(url,callback=self.parseSearchResponse,method="POST",formdata={'filename':'abc.xyz','file':'path to file/abc.xyz'})
At this very moment Scrapy has no built-in support for uploading files.
File uploading via forms in HTTP was specified in RFC1867. According to the spec, an HTTP request with Content-Type: multipart/form-data is required (in your code it would be application/x-www-form-urlencoded).
To achieve file uploading with Scrapy, you would need to:
Get familiar with the basic concepts of HTTP file uploading.
Start with scrapy.Request (instead of FormRequest).
Give it a proper Content-Type header value.
Build the request body yourself.
See also: How does HTTP file upload work?
I just spent an entire day trying to figure out how to implement this.
Finally, I came upon a Scrapy pull request from 2016 that was never merged, with an implementation of a multipart form request:
from scrapy import FormRequest
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
import six
import string
import random
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class MultipartFormRequest(FormRequest):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
kwargs.setdefault('method', 'POST')
super(MultipartFormRequest, self).__init__(*args, **kwargs)
content_type = self.headers.setdefault(b'Content-Type', [b'multipart/form-data'])[0]
method = kwargs.get('method').upper()
if formdata and method == 'POST' and content_type == b'multipart/form-data':
items = formdata.items() if isinstance(formdata, dict) else formdata
self._boundary = ''
# encode the data using multipart spec
self._boundary = to_bytes(''.join(
random.choice(string.digits + string.ascii_letters) for i in range(20)), self.encoding)
self.headers[b'Content-Type'] = b'multipart/form-data; boundary=' + self._boundary
request_data = _multpart_encode(items, self._boundary, self.encoding)
self._set_body(request_data)
class MultipartFile(object):
def __init__(self, name, content, mimetype='application/octet-stream'):
self.name = name
self.content = content
self.mimetype = mimetype
def _get_form_url(form, url):
if url is None:
return urljoin(form.base_url, form.action)
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _multpart_encode(items, boundary, enc):
body = []
for name, value in items:
body.append(b'--' + boundary)
if isinstance(value, MultipartFile):
file_name = value.name
content = value.content
content_type = value.mimetype
body.append(
b'Content-Disposition: form-data; name="' + to_bytes(name, enc) + b'"; filename="' + to_bytes(file_name,
enc) + b'"')
body.append(b'Content-Type: ' + to_bytes(content_type, enc))
body.append(b'')
body.append(to_bytes(content, enc))
else:
body.append(b'Content-Disposition: form-data; name="' + to_bytes(name, enc) + b'"')
body.append(b'')
body.append(to_bytes(value, enc))
body.append(b'--' + boundary + b'--')
return b'\r\n'.join(body)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[#name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[#id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(#type) or #type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../#checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[#selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(#type, "^submit$", "i")]'
'|descendant::button[not(#type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[#%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
This is the code I used to call the request (in my case I needed to upload an image):
with open(img_path, 'rb') as file:
img = file.read()
file_name = os.path.basename(img_path)
multipart_file = MultipartFile(file_name, img, "image/png")
form_data = {
"param": "value", # this is an example of a text parameter
"PicUpload": multipart_file
}
yield MultipartFormRequest(url=upload_url, formdata=form_data,
callback=self.my_callback)
It's a shame that so much time has passed and Scrapy still doesn't have a built in way to do this, especially since someone wrote a very simple implementation years ago.

Configure auto reload template and enable bytecode cache for jinja2 in appengine

How to configure jinja2 in Appengine to:
Auto reload when template is updated.
Enable bytecode cache, so it can be share among each instances. I prefer jinja2 to produce bytecode when compiling template, and store it to datastore. So next instance will load bytecode instead of repeatedly compile the template.
I have added the bcc like this, using the app engine memcache Client()::
loader = dynloaders.DynLoader() # init Function loader
bcc = MemcachedBytecodeCache(memcache.Client(), prefix='jinja2/bytecode/', timeout=None)
return Environment(auto_reload=True, cache_size=100, loader=FunctionLoader(loader.load_dyn_all),
bytecode_cache=bcc)
My function loader:
def html(self, cid):
def _html_txt_up_to_date(): # closure to check if template is up to date
return CMSUpdates.check_no_update(cid, template.modified)
template = ndb.Key('Templates', cid, parent=self.parent_key).get()
if not template:
logging.error('DynLoader (HTML/TXT): %s' % cid)
return None # raises TemplateNotFound exception
return template.content, None, _html_txt_up_to_date
The template model uses template.modified : ndb.DateTimeProperty(auto_now=True)
The closure function:
class CMSUpdates(ndb.Model):
updates = ndb.JsonProperty()
#classmethod
def check_no_update(cls, cid, cid_modified):
cms_updates = cls.get_or_insert('cms_updates', updates=dict()).updates
if cid in cms_updates: # cid modified has dt microseconds
if cid_modified >= datetime.strptime(cms_updates[cid], '%Y-%m-%d %H:%M:%S'):
if (datetime.now() - timedelta(days=1)) > cid_modified:
del cms_updates[cid]
cls(id='cms_updates', updates=cms_updates).put_async()
return True
return False # reload the template
return True
Been few weeks i looking for the solution. And finally i figured it out, i would like to share my code for everyone. There are 4 python source files in my code.
TemplateEngine.py, ContentRenderer.py, TestContent.py & Update_Template.py
File: TemplateEngine.py
Note:
i use now = datetime.utcnow() + timedelta(hours=8) because my timezone is GMT+8
You must use ndb.BlobProperty to store the bytecode, ndb.TextProperty will not work!
from google.appengine.ext import ndb
from datetime import datetime,timedelta
class SiteTemplates(ndb.Model):
name = ndb.StringProperty(indexed=True, required=True)
data = ndb.TextProperty()
uptodate = ndb.BooleanProperty(required=True)
class SiteTemplateBytecodes(ndb.Model):
key = ndb.StringProperty(indexed=True, required=True)
data = ndb.BlobProperty(required=True)
mod_datetime = ndb.DateTimeProperty(required=True)
class LocalCache(jinja2.BytecodeCache):
def load_bytecode(self, bucket):
q = SiteTemplateBytecodes.query(SiteTemplateBytecodes.key == bucket.key)
if q.count() > 0:
r = q.get()
bucket.bytecode_from_string(r.data)
def dump_bytecode(self, bucket):
now = datetime.utcnow() + timedelta(hours=8)
q = SiteTemplateBytecodes.query(SiteTemplateBytecodes.key == bucket.key)
if q.count() > 0:
r = q.get()
r.data = bucket.bytecode_to_string()
r.mod_datetime = now
else:
r = SiteTemplateBytecodes(key=bucket.key, data=bucket.bytecode_to_string(), mod_datetime=now)
r.put()
def Update_Template_Source(tn, source):
try:
q = SiteTemplates.query(SiteTemplates.name == tn)
if q.count() == 0:
u = mkiniTemplates(name=tn, data=source, uptodate=False)
else:
u = q.get()
u.name=tn
u.data=source
u.uptodate=False
u.put()
return True
except Exception,e:
logging.exception(e)
return False
def Get_Template_Source(tn):
uptodate = False
def Template_Uptodate():
return uptodate
try:
q = SiteTemplates.query(SiteTemplates.name == tn)
if q.count() > 0:
r = q.get()
uptodate = r.uptodate
if r.uptodate == False:
r.uptodate=True
r.put()
return r.data, tn, Template_Uptodate
else:
return None
except Exception,e:
logging.exception(e)
return None
File: ContentRenderer.py
Note: It is very important to set cache_size=0, otherwise bytecode cache function will be disable. I have no idea why.
from TemplateEngine import Get_Template_Source
import jinja2
def Render(tn,tags):
global te
return te.Render(tn, tags)
bcc = LocalCache()
te = jinja2.Environment(loader=jinja2.FunctionLoader(Get_Template_Source), cache_size=0, extensions=['jinja2.ext.autoescape'], bytecode_cache=bcc)
File: Update_Template.py
Note: Use Update_Template_Source() to update template source to datastore.
from TemplateEngine import Update_Template_Source
template_source = '<html><body>hello word to {{title}}!</body></html>'
if Update_Template_Source('my-template.html', template_source):
print 'template is updated'
else:
print 'error when updating template source'
File: TestContent.py
Note: Do some test
from ContentRenderer import Render
print Render('my-template.htmnl', {'title':'human'})
'hello world to human!'
You will realize, even you have more than 20 instances in your application, the latency time will not increase, even you update your template. And the template source will update in 5 to 10 seconds.

Use Class and Class methods from another class in python

I have couple of classes in jira.py, providing 2 for sample
class JiraCommand:
name = "<default>"
aliases = []
summary = "<--- no summary --->"
usage = ""
mandatory = ""
commands = None
def __init__(self, commands):
self.commands = commands
def dispatch(self, logger, jira_env, args):
"""Return the exit code of the whole process"""
if len(args) > 0 and args[0] in ("--help", "-h"):
logger.info("")
alias_text = ''
first_alias = True
for a in self.aliases:
if first_alias:
if len(self.aliases) == 1:
alias_text = " (alias: " + a
else:
alias_text = " (aliases: " + a
first_alias = False
else:
alias_text += ", " + a
if not first_alias:
alias_text += ")"
logger.info("%s: %s%s" % (self.name, self.summary, alias_text))
if self.usage == "":
opts = ""
else:
opts = " [options]"
logger.info("")
logger.info("Usage: %s %s %s%s" % \
(sys.argv[0], self.name, self.mandatory, opts))
logger.info(self.usage)
return 0
results = self.run(logger, jira_env, args)
if results:
return self.render(logger, jira_env, args, results)
else:
return 1
def run(self, logger, jira_env, args):
"""Return a non-zero object for success"""
return 0
def render(self, logger, jira_env, args, results):
"""Return 0 for success"""
return 0
and a second class in the same file "jira.py"
class JiraCat(JiraCommand):
name = "cat"
summary = "Show all the fields in an issue"
usage = """
<issue key> Issue identifier, e.g. CA-1234
"""
def run(self, logger, jira_env, args):
global soap, auth
if len(args) != 1:
logger.error(self.usage)
return 0
issueKey = args[0]
try:
jira_env['fieldnames'] = soap.service.getFieldsForEdit(auth, issueKey)
except Exception, e:
# In case we don't have edit permission
jira_env['fieldnames'] = {}
try:
return soap.service.getIssue(auth, issueKey)
except Exception, e:
logger.error(decode(e))
def render(self, logger, jira_env, args, results):
# For available field names, see the variables in
# src/java/com/atlassian/jira/rpc/soap/beans/RemoteIssue.java
fields = jira_env['fieldnames']
for f in ['key','summary','reporter','assignee','description',
'environment','project',
'votes'
]:
logger.info(getName(f, fields) + ': ' + encode(results[f]))
logger.info('Type: ' + getName(results['type'], jira_env['types']))
logger.info('Status: ' + getName(results['status'], jira_env['statuses']))
logger.info('Priority: ' + getName(results['priority'], jira_env['priorities']))
logger.info('Resolution: ' + getName(results['resolution'], jira_env['resolutions']))
for f in ['created', 'updated',
'duedate'
]:
logger.info(getName(f, fields) + ': ' + dateStr(results[f]))
for f in results['components']:
logger.info(getName('components', fields) + ': ' + encode(f['name']))
for f in results['affectsVersions']:
logger.info(getName('versions', fields) + ': ' + encode(f['name']))
for f in results['fixVersions']:
logger.info('Fix Version/s:' + encode(f['name']))
# TODO bug in JIRA api - attachmentNames are not returned
#logger.info(str(results['attachmentNames']))
# TODO restrict some of the fields that are shown here
for f in results['customFieldValues']:
fieldName = str(f['customfieldId'])
for v in f['values']:
logger.info(getName(fieldName, fields) + ': ' + encode(v))
return 0
Now, JiraCat is using JiraCommand as an argument
How can i use JiraCat to get live results
here is what i tried:
>>> from jira import JiraCommand
>>> dir(JiraCommand)
['__doc__', '__init__', '__module__', 'aliases', 'commands', 'dispatch', 'mandatory', 'name', 'render', 'run', 'summary', 'usage']
>>> jcmd = JiraCommand("http://jira.server.com:8080")
>>> from jira import JiraCat
>>> dir(JiraCat)
['__doc__', '__init__', '__module__', 'aliases', 'commands', 'dispatch', 'mandatory', 'name', 'render', 'run', 'summary', 'usage']
>>> jc = JiraCat(jcmd)
>>> print jc
<jira.JiraCat instance at 0x2356d88>
>>> jc.run("-s", "cat", "QA-65")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jira.py", line 163, in run
logger.error(self.usage)
AttributeError: 'str' object has no attribute 'error'
DonCallisto has got it right.
JiraCat's run method takes three arguments (logger, jira_env, args); the first one is supposed to be a logger object but you're passing a string ("-s").
So the error that reports a string (logger="-s") has no "error" attribute means just that.
Your comment about the command line (subprocess.Popen(['python', 'jira', '-s', 'jira.server.com:8080';, 'catall', 'JIRA-65'])) is not the same as calling the run() method with the same arguments. Have a look at the bottom of jira.py and see what it does with sys.argv...
Edit (1):
Having read the code, the following python should replicate your command line call. It's a bit complicated, and misses out all the exception handling and logic in jira.py itself, which could get flaky, and I can't test it here.
import jira
import os
com = jira.Commands()
logger = jira.setupLogging()
jira_env = {'home':os.environ['HOME']}
command_name = "cat"
my_args = ["JIRA-65"]
server = "http://jira.server.com:8080" + "/rpc/soap/jirasoapservice-v2?wsdl"
class Options:
pass
options = Options()
#You might want to set options.user and options.password here...
jira.soap = jira.Client(server)
jira.start_login(options, jira_env, command_name, com, logger)
com.run(command_name, logger, jira_env, my_args)

Categories