Use Class and Class methods from another class in python - python

I have couple of classes in jira.py, providing 2 for sample
class JiraCommand:
name = "<default>"
aliases = []
summary = "<--- no summary --->"
usage = ""
mandatory = ""
commands = None
def __init__(self, commands):
self.commands = commands
def dispatch(self, logger, jira_env, args):
"""Return the exit code of the whole process"""
if len(args) > 0 and args[0] in ("--help", "-h"):
logger.info("")
alias_text = ''
first_alias = True
for a in self.aliases:
if first_alias:
if len(self.aliases) == 1:
alias_text = " (alias: " + a
else:
alias_text = " (aliases: " + a
first_alias = False
else:
alias_text += ", " + a
if not first_alias:
alias_text += ")"
logger.info("%s: %s%s" % (self.name, self.summary, alias_text))
if self.usage == "":
opts = ""
else:
opts = " [options]"
logger.info("")
logger.info("Usage: %s %s %s%s" % \
(sys.argv[0], self.name, self.mandatory, opts))
logger.info(self.usage)
return 0
results = self.run(logger, jira_env, args)
if results:
return self.render(logger, jira_env, args, results)
else:
return 1
def run(self, logger, jira_env, args):
"""Return a non-zero object for success"""
return 0
def render(self, logger, jira_env, args, results):
"""Return 0 for success"""
return 0
and a second class in the same file "jira.py"
class JiraCat(JiraCommand):
name = "cat"
summary = "Show all the fields in an issue"
usage = """
<issue key> Issue identifier, e.g. CA-1234
"""
def run(self, logger, jira_env, args):
global soap, auth
if len(args) != 1:
logger.error(self.usage)
return 0
issueKey = args[0]
try:
jira_env['fieldnames'] = soap.service.getFieldsForEdit(auth, issueKey)
except Exception, e:
# In case we don't have edit permission
jira_env['fieldnames'] = {}
try:
return soap.service.getIssue(auth, issueKey)
except Exception, e:
logger.error(decode(e))
def render(self, logger, jira_env, args, results):
# For available field names, see the variables in
# src/java/com/atlassian/jira/rpc/soap/beans/RemoteIssue.java
fields = jira_env['fieldnames']
for f in ['key','summary','reporter','assignee','description',
'environment','project',
'votes'
]:
logger.info(getName(f, fields) + ': ' + encode(results[f]))
logger.info('Type: ' + getName(results['type'], jira_env['types']))
logger.info('Status: ' + getName(results['status'], jira_env['statuses']))
logger.info('Priority: ' + getName(results['priority'], jira_env['priorities']))
logger.info('Resolution: ' + getName(results['resolution'], jira_env['resolutions']))
for f in ['created', 'updated',
'duedate'
]:
logger.info(getName(f, fields) + ': ' + dateStr(results[f]))
for f in results['components']:
logger.info(getName('components', fields) + ': ' + encode(f['name']))
for f in results['affectsVersions']:
logger.info(getName('versions', fields) + ': ' + encode(f['name']))
for f in results['fixVersions']:
logger.info('Fix Version/s:' + encode(f['name']))
# TODO bug in JIRA api - attachmentNames are not returned
#logger.info(str(results['attachmentNames']))
# TODO restrict some of the fields that are shown here
for f in results['customFieldValues']:
fieldName = str(f['customfieldId'])
for v in f['values']:
logger.info(getName(fieldName, fields) + ': ' + encode(v))
return 0
Now, JiraCat is using JiraCommand as an argument
How can i use JiraCat to get live results
here is what i tried:
>>> from jira import JiraCommand
>>> dir(JiraCommand)
['__doc__', '__init__', '__module__', 'aliases', 'commands', 'dispatch', 'mandatory', 'name', 'render', 'run', 'summary', 'usage']
>>> jcmd = JiraCommand("http://jira.server.com:8080")
>>> from jira import JiraCat
>>> dir(JiraCat)
['__doc__', '__init__', '__module__', 'aliases', 'commands', 'dispatch', 'mandatory', 'name', 'render', 'run', 'summary', 'usage']
>>> jc = JiraCat(jcmd)
>>> print jc
<jira.JiraCat instance at 0x2356d88>
>>> jc.run("-s", "cat", "QA-65")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "jira.py", line 163, in run
logger.error(self.usage)
AttributeError: 'str' object has no attribute 'error'

DonCallisto has got it right.
JiraCat's run method takes three arguments (logger, jira_env, args); the first one is supposed to be a logger object but you're passing a string ("-s").
So the error that reports a string (logger="-s") has no "error" attribute means just that.
Your comment about the command line (subprocess.Popen(['python', 'jira', '-s', 'jira.server.com:8080';, 'catall', 'JIRA-65'])) is not the same as calling the run() method with the same arguments. Have a look at the bottom of jira.py and see what it does with sys.argv...
Edit (1):
Having read the code, the following python should replicate your command line call. It's a bit complicated, and misses out all the exception handling and logic in jira.py itself, which could get flaky, and I can't test it here.
import jira
import os
com = jira.Commands()
logger = jira.setupLogging()
jira_env = {'home':os.environ['HOME']}
command_name = "cat"
my_args = ["JIRA-65"]
server = "http://jira.server.com:8080" + "/rpc/soap/jirasoapservice-v2?wsdl"
class Options:
pass
options = Options()
#You might want to set options.user and options.password here...
jira.soap = jira.Client(server)
jira.start_login(options, jira_env, command_name, com, logger)
com.run(command_name, logger, jira_env, my_args)

Related

Evernote Python API - Hitting rate limits

I've written a short piece of code that will append the tag names of my notes to the title, then remove all associated tags. When I try to run this on production, I hit the rate limit real quickly. Can someone help me optimise this piece of code? Or should I request for a special rate limit with Evernote?
Also, I get an error when a note has no tags. Any way to efficiently get the number of tags from a note so I don't get the error?
from evernote.api.client import EvernoteClient
from evernote.edam.notestore import NoteStore
dev_token = "dev_token"
client = EvernoteClient(token=dev_token, sandbox = False)
userStore = client.get_user_store()
user = userStore.getUser()
print
print user.username
print
noteStore = client.get_note_store()
notebooks = noteStore.listNotebooks()
for n in notebooks:
print "Notebook = " + n.name + " GUID = " + n.guid
filter = NoteStore.NoteFilter()
filter.ascending = False
filter.notebookGuid=n.guid
spec = NoteStore.NotesMetadataResultSpec()
spec.includeTitle = True
spec.includeNotebookGuid = True
spec.includeTagGuids = True
notesMetadataList = noteStore.findNotesMetadata(filter, 0, 25, spec)
for noteMetadata in notesMetadataList.notes:
print "%s :: %s" % (noteMetadata.title, noteMetadata.guid)
newNoteTitle = noteMetadata.title + " -- "
for tagGuid in noteMetadata.tagGuids:
tag = noteStore.getTag(tagGuid)
tagName = tag.name
print tagName
newNoteTitle = newNoteTitle + " " + tagName
print "newNoteTitle = " + newNoteTitle
noteMetadata.title = newNoteTitle
noteMetadata.tagGuids = []
noteMetadata = noteStore.updateNote(noteMetadata)
print noteMetadata.title
Here's how I deal with rate limiting, by wrapping the EvernoteClient in a rate limiting proxy (based on http://code.activestate.com/recipes/496741-object-proxying/)
from time import sleep
from evernote.api.client import EvernoteClient
from evernote.edam.error.ttypes import (EDAMSystemException, EDAMErrorCode)
def evernote_wait_try_again(f):
"""
Wait until mandated wait and try again
http://dev.evernote.com/doc/articles/rate_limits.php
"""
def f2(*args, **kwargs):
try:
return f(*args, **kwargs)
except EDAMSystemException as e:
if e.errorCode == EDAMErrorCode.RATE_LIMIT_REACHED:
print("rate limit: {0} s. wait".format(e.rateLimitDuration))
sleep(e.rateLimitDuration)
print("wait over")
return f(*args, **kwargs)
return f2
class RateLimitingEvernoteProxy(object):
__slots__ = ["_obj"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return evernote_wait_try_again(
getattr(object.__getattribute__(self, "_obj"), name))
_client = EvernoteClient(token=auth_token, sandbox=sandbox)
client = RateLimitingEvernoteProxy(_client)

Scrapy upload file

I am making a form request to a website using scrapy. The form requires to upload a pdf file, How can we do it in Scrapy. I am trying this like -
FormRequest(url,callback=self.parseSearchResponse,method="POST",formdata={'filename':'abc.xyz','file':'path to file/abc.xyz'})
At this very moment Scrapy has no built-in support for uploading files.
File uploading via forms in HTTP was specified in RFC1867. According to the spec, an HTTP request with Content-Type: multipart/form-data is required (in your code it would be application/x-www-form-urlencoded).
To achieve file uploading with Scrapy, you would need to:
Get familiar with the basic concepts of HTTP file uploading.
Start with scrapy.Request (instead of FormRequest).
Give it a proper Content-Type header value.
Build the request body yourself.
See also: How does HTTP file upload work?
I just spent an entire day trying to figure out how to implement this.
Finally, I came upon a Scrapy pull request from 2016 that was never merged, with an implementation of a multipart form request:
from scrapy import FormRequest
from six.moves.urllib.parse import urljoin, urlencode
import lxml.html
from parsel.selector import create_root_node
import six
import string
import random
from scrapy.http.request import Request
from scrapy.utils.python import to_bytes, is_listlike
from scrapy.utils.response import get_base_url
class MultipartFormRequest(FormRequest):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
kwargs.setdefault('method', 'POST')
super(MultipartFormRequest, self).__init__(*args, **kwargs)
content_type = self.headers.setdefault(b'Content-Type', [b'multipart/form-data'])[0]
method = kwargs.get('method').upper()
if formdata and method == 'POST' and content_type == b'multipart/form-data':
items = formdata.items() if isinstance(formdata, dict) else formdata
self._boundary = ''
# encode the data using multipart spec
self._boundary = to_bytes(''.join(
random.choice(string.digits + string.ascii_letters) for i in range(20)), self.encoding)
self.headers[b'Content-Type'] = b'multipart/form-data; boundary=' + self._boundary
request_data = _multpart_encode(items, self._boundary, self.encoding)
self._set_body(request_data)
class MultipartFile(object):
def __init__(self, name, content, mimetype='application/octet-stream'):
self.name = name
self.content = content
self.mimetype = mimetype
def _get_form_url(form, url):
if url is None:
return urljoin(form.base_url, form.action)
return urljoin(form.base_url, url)
def _urlencode(seq, enc):
values = [(to_bytes(k, enc), to_bytes(v, enc))
for k, vs in seq
for v in (vs if is_listlike(vs) else [vs])]
return urlencode(values, doseq=1)
def _multpart_encode(items, boundary, enc):
body = []
for name, value in items:
body.append(b'--' + boundary)
if isinstance(value, MultipartFile):
file_name = value.name
content = value.content
content_type = value.mimetype
body.append(
b'Content-Disposition: form-data; name="' + to_bytes(name, enc) + b'"; filename="' + to_bytes(file_name,
enc) + b'"')
body.append(b'Content-Type: ' + to_bytes(content_type, enc))
body.append(b'')
body.append(to_bytes(content, enc))
else:
body.append(b'Content-Disposition: form-data; name="' + to_bytes(name, enc) + b'"')
body.append(b'')
body.append(to_bytes(value, enc))
body.append(b'--' + boundary + b'--')
return b'\r\n'.join(body)
def _get_form(response, formname, formid, formnumber, formxpath):
"""Find the form element """
root = create_root_node(response.text, lxml.html.HTMLParser,
base_url=get_base_url(response))
forms = root.xpath('//form')
if not forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[#name="%s"]' % formname)
if f:
return f[0]
if formid is not None:
f = root.xpath('//form[#id="%s"]' % formid)
if f:
return f[0]
# Get form element from xpath, if not found, go up
if formxpath is not None:
nodes = root.xpath(formxpath)
if nodes:
el = nodes[0]
while True:
if el.tag == 'form':
return el
el = el.getparent()
if el is None:
break
encoded = formxpath if six.PY3 else formxpath.encode('unicode_escape')
raise ValueError('No <form> element found with %s' % encoded)
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
try:
formdata = dict(formdata or ())
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
inputs = form.xpath('descendant::textarea'
'|descendant::select'
'|descendant::input[not(#type) or #type['
' not(re:test(., "^(?:submit|image|reset)$", "i"))'
' and (../#checked or'
' not(re:test(., "^(?:checkbox|radio)$", "i")))]]',
namespaces={
"re": "http://exslt.org/regular-expressions"})
values = [(k, u'' if v is None else v)
for k, v in (_value(e) for e in inputs)
if k and k not in formdata]
if not dont_click:
clickable = _get_clickable(clickdata, form)
if clickable and clickable[0] not in formdata and not clickable[0] is None:
values.append(clickable)
values.extend(formdata.items())
return values
def _value(ele):
n = ele.name
v = ele.value
if ele.tag == 'select':
return _select_value(ele, n, v)
return n, v
def _select_value(ele, n, v):
multiple = ele.multiple
if v is None and not multiple:
# Match browser behaviour on simple select tag without options selected
# And for select tags wihout options
o = ele.value_options
return (n, o[0]) if o else (None, None)
elif v is not None and multiple:
# This is a workround to bug in lxml fixed 2.3.1
# fix https://github.com/lxml/lxml/commit/57f49eed82068a20da3db8f1b18ae00c1bab8b12#L1L1139
selected_options = ele.xpath('.//option[#selected]')
v = [(o.get('value') or o.text or u'').strip() for o in selected_options]
return n, v
def _get_clickable(clickdata, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
clickables = [
el for el in form.xpath(
'descendant::*[(self::input or self::button)'
' and re:test(#type, "^submit$", "i")]'
'|descendant::button[not(#type)]',
namespaces={"re": "http://exslt.org/regular-expressions"})
]
if not clickables:
return
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables[0]
return (el.get('name'), el.get('value') or '')
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.get('name'), el.get('value') or '')
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[#%s="%s"]' % c for c in six.iteritems(clickdata))
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].get('name'), el[0].get('value') or '')
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickable element matching clickdata: %r' % (clickdata,))
This is the code I used to call the request (in my case I needed to upload an image):
with open(img_path, 'rb') as file:
img = file.read()
file_name = os.path.basename(img_path)
multipart_file = MultipartFile(file_name, img, "image/png")
form_data = {
"param": "value", # this is an example of a text parameter
"PicUpload": multipart_file
}
yield MultipartFormRequest(url=upload_url, formdata=form_data,
callback=self.my_callback)
It's a shame that so much time has passed and Scrapy still doesn't have a built in way to do this, especially since someone wrote a very simple implementation years ago.

pattern match with in if statement

I would like to list all active stacks in AWS cloud formation that match a regular expression. Stacks name like this 'FeatureEnv-commit123asdfqw212da-3241'. What is the best way to achieve this? whenever I run the script it's throwing an error. Complete script http://www.technobabelfish.com/2013/08/boto-and-cloudformation.html. I've updated that script to work for my requirement.
#!/usr/bin/env python
import sys
import boto
import boto.cloudformation
import argparse
import re
class MyBaseException(Exception):
msg ="MyBaseException"
def __init__(self, value):
self.value = value
def __str__(self):
return "%s: %s" % (self.msg, self.value)
class MissingParamException(MyBaseException):
msg ="Missing param"
class InvalidCommandException(MyBaseException):
msg ="Invalid command"
class InvalidStackException(MyBaseException):
msg ="Invalid stack"
def _create_cf_connection(args):
# Connect to a cloudformation
# Returns a cloudformation connection.
# Throws exception if connect fails
if not args.access_key:
raise MissingParamException("access_key")
if not args.secret_key:
raise MissingParamException("secret_key")
if not args.region:
raise MissingParamException("region")
conn = boto.cloudformation.connect_to_region(args.region,
aws_access_key_id = args.access_key,
aws_secret_access_key = args.secret_key)
return conn
def get_stacks(args):
conn = _create_cf_connection(args)
return conn.list_stacks()
def get_stack(args, stack):
conn = _create_cf_connection(args)
stacks = conn.describe_stacks(stack)
if not stacks:
raise InvalidStackException(stack)
return stacks[0]
def print_stack(stack):
print "---"
print "Name: %s" % stack.stack_name
print"ID: %s"% stack.stack_id
print "Status: %s" % stack.stack_status
print "Creation Time: %s" % stack.creation_time
print"Outputs: %s"% stack.outputs
print "Parameters: %s" % stack.parameters
print"Tags: %s"% stack.tags
print "Capabilities: %s" % stack.capabilities
def list_stacks(args):
stacks = get_stacks(args)
for stackSumm in stacks:
pattern = re.compile("^FeatureEnv-commit([a-z][0-9]+)*-([0-9]*)")
match = pattern.match(stackSumm.stack_name)
print match.string
if stackSumm.stack_status in "CREATE_COMPLETE" and match and stackSumm.stack_name in match.string:
print_stack(get_stack(args, stackSumm.stack_id))
def list_regions(args):
regions = boto.cloudformation.regions()
for r in regions:
print r.name
command_list = { 'list-regions' : list_regions,
'list-stacks' : list_stacks,
}
def parseArgs():
parser = argparse.ArgumentParser()
parser.add_argument("--region" )
parser.add_argument("--command" )
parser.add_argument("--access-key" )
parser.add_argument("--secret-key" )
args = parser.parse_args()
if not args.command:
raise MissingParamException("command")
if args.command not in command_list:
raise InvalidCommandException(args.command)
command_list[args.command](args)
if __name__=='__main__':
try:
parseArgs()
except Exception, e:
print e
Error:
'NoneType' object has no attribute 'string'
The error is in this statement stackSumm.stack_name in pattern. pattern in this case is a _sre.SRE_Pattern object, not a string. The string that contains the result of the match is:
match = pattern.match(stackSumm.stack_name)
print match.string
And string is iterable. So you can safely check if a stack name is contained in the match of your re expression:
if stackSumm.stack_status in "CREATE_COMPLETE" and match and stackSumm.stack_name in match.string:
print_stack(get_stack(args, stackSumm.stack_id))

Configure auto reload template and enable bytecode cache for jinja2 in appengine

How to configure jinja2 in Appengine to:
Auto reload when template is updated.
Enable bytecode cache, so it can be share among each instances. I prefer jinja2 to produce bytecode when compiling template, and store it to datastore. So next instance will load bytecode instead of repeatedly compile the template.
I have added the bcc like this, using the app engine memcache Client()::
loader = dynloaders.DynLoader() # init Function loader
bcc = MemcachedBytecodeCache(memcache.Client(), prefix='jinja2/bytecode/', timeout=None)
return Environment(auto_reload=True, cache_size=100, loader=FunctionLoader(loader.load_dyn_all),
bytecode_cache=bcc)
My function loader:
def html(self, cid):
def _html_txt_up_to_date(): # closure to check if template is up to date
return CMSUpdates.check_no_update(cid, template.modified)
template = ndb.Key('Templates', cid, parent=self.parent_key).get()
if not template:
logging.error('DynLoader (HTML/TXT): %s' % cid)
return None # raises TemplateNotFound exception
return template.content, None, _html_txt_up_to_date
The template model uses template.modified : ndb.DateTimeProperty(auto_now=True)
The closure function:
class CMSUpdates(ndb.Model):
updates = ndb.JsonProperty()
#classmethod
def check_no_update(cls, cid, cid_modified):
cms_updates = cls.get_or_insert('cms_updates', updates=dict()).updates
if cid in cms_updates: # cid modified has dt microseconds
if cid_modified >= datetime.strptime(cms_updates[cid], '%Y-%m-%d %H:%M:%S'):
if (datetime.now() - timedelta(days=1)) > cid_modified:
del cms_updates[cid]
cls(id='cms_updates', updates=cms_updates).put_async()
return True
return False # reload the template
return True
Been few weeks i looking for the solution. And finally i figured it out, i would like to share my code for everyone. There are 4 python source files in my code.
TemplateEngine.py, ContentRenderer.py, TestContent.py & Update_Template.py
File: TemplateEngine.py
Note:
i use now = datetime.utcnow() + timedelta(hours=8) because my timezone is GMT+8
You must use ndb.BlobProperty to store the bytecode, ndb.TextProperty will not work!
from google.appengine.ext import ndb
from datetime import datetime,timedelta
class SiteTemplates(ndb.Model):
name = ndb.StringProperty(indexed=True, required=True)
data = ndb.TextProperty()
uptodate = ndb.BooleanProperty(required=True)
class SiteTemplateBytecodes(ndb.Model):
key = ndb.StringProperty(indexed=True, required=True)
data = ndb.BlobProperty(required=True)
mod_datetime = ndb.DateTimeProperty(required=True)
class LocalCache(jinja2.BytecodeCache):
def load_bytecode(self, bucket):
q = SiteTemplateBytecodes.query(SiteTemplateBytecodes.key == bucket.key)
if q.count() > 0:
r = q.get()
bucket.bytecode_from_string(r.data)
def dump_bytecode(self, bucket):
now = datetime.utcnow() + timedelta(hours=8)
q = SiteTemplateBytecodes.query(SiteTemplateBytecodes.key == bucket.key)
if q.count() > 0:
r = q.get()
r.data = bucket.bytecode_to_string()
r.mod_datetime = now
else:
r = SiteTemplateBytecodes(key=bucket.key, data=bucket.bytecode_to_string(), mod_datetime=now)
r.put()
def Update_Template_Source(tn, source):
try:
q = SiteTemplates.query(SiteTemplates.name == tn)
if q.count() == 0:
u = mkiniTemplates(name=tn, data=source, uptodate=False)
else:
u = q.get()
u.name=tn
u.data=source
u.uptodate=False
u.put()
return True
except Exception,e:
logging.exception(e)
return False
def Get_Template_Source(tn):
uptodate = False
def Template_Uptodate():
return uptodate
try:
q = SiteTemplates.query(SiteTemplates.name == tn)
if q.count() > 0:
r = q.get()
uptodate = r.uptodate
if r.uptodate == False:
r.uptodate=True
r.put()
return r.data, tn, Template_Uptodate
else:
return None
except Exception,e:
logging.exception(e)
return None
File: ContentRenderer.py
Note: It is very important to set cache_size=0, otherwise bytecode cache function will be disable. I have no idea why.
from TemplateEngine import Get_Template_Source
import jinja2
def Render(tn,tags):
global te
return te.Render(tn, tags)
bcc = LocalCache()
te = jinja2.Environment(loader=jinja2.FunctionLoader(Get_Template_Source), cache_size=0, extensions=['jinja2.ext.autoescape'], bytecode_cache=bcc)
File: Update_Template.py
Note: Use Update_Template_Source() to update template source to datastore.
from TemplateEngine import Update_Template_Source
template_source = '<html><body>hello word to {{title}}!</body></html>'
if Update_Template_Source('my-template.html', template_source):
print 'template is updated'
else:
print 'error when updating template source'
File: TestContent.py
Note: Do some test
from ContentRenderer import Render
print Render('my-template.htmnl', {'title':'human'})
'hello world to human!'
You will realize, even you have more than 20 instances in your application, the latency time will not increase, even you update your template. And the template source will update in 5 to 10 seconds.

adding an attribute in suds

I have to do soap request with suds and Python
<soap:Body>
<registerOrder>
<order merchantOrderNumber="" description="" amount="" currency="" language="" xmlns="">
<returnUrl>http://mysafety.com</returnUrl>
</order>
</registerOrder>
</soap:Body>
How to add an attribute in registerOrder?
A more dynamic version of the MessagePlugin would be:
from suds.sax.attribute import Attribute
from suds.plugin import MessagePlugin
class _AttributePlugin(MessagePlugin):
"""
Suds plug-in extending the method call with arbitrary attributes.
"""
def __init__(self, **kwargs):
self.kwargs = kwargs
def marshalled(self, context):
method = context.envelope.getChild('Body')[0]
for key, item in self.kwargs.iteritems():
method.attributes.append(Attribute(key, item))
Usage:
client = Client(url)
# method 1
client.options.plugins = [_AttributePlugin(foo='bar')]
response = client.service.method1()
client.options.plugins = []
# method 2
response = client.service.method2()
In suds documentation search for a MessagePlugin. The marshalled option is what you're searching for. You need to add it into your client as a plugin:
self.client = Client(url, plugins=[MyPlugin()])
In marshalled method search for context.envelope childs. The python's vars() function is very useful in this place. As I think, It should like something like this for you:
from suds.sax.attribute import Attribute
from suds.plugin import MessagePlugin
class MyPlugin(MessagePlugin):
def marshalled(self, context):
foo = context.envelope.getChild('Body').getChild('registerOrder')[0]
foo.attributes.append(Attribute("foo", "bar"))
I was sitting at this for last week, so might it'll save some time for you :)
You can use the __inject Client option to inject a particular xml
raw_xml = """<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope>
<SOAP-ENV:Body>
...
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>"""
print client.service.example(__inject={'msg':raw_xml})
Also, I prefer using suds-jurko https://pypi.python.org/pypi/suds-jurko/0.6 which is a fork of suds that is actively maintained.
I have reused https://fedorahosted.org/suds/ticket/21 and have adapted the code to use the idea. Change SUDS as below and use
Client.<method>(param1=value1, ... , attributes={'attrName1':'attrVal1'} )
to call 'method' with the 'attrName1' attribute as wanted.
--- a/website/suds/bindings/binding.py
+++ b/website/suds/bindings/binding.py
## -24,6 +24,7 ## from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
+from suds.sax.attribute import Attribute
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
## -101,7 +102,7 ## class Binding:
"""
raise Exception, 'not implemented'
- def get_message(self, method, args, kwargs):
+ def get_message(self, method, args, kwargs, attributes=None):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
## -115,11 +116,23 ## class Binding:
#rtype: L{Document}
"""
+ if attributes:
+ pass
+ # moved to suds/bindings/document.py
+
+ #print method
+ #for name, val in attributes.items():
+ # method.attributes.append(Attribute(name, val))
+
+
content = self.headercontent(method)
header = self.header(content)
- content = self.bodycontent(method, args, kwargs)
+ content = self.bodycontent(method, args, kwargs, attributes=attributes)
body = self.body(content)
env = self.envelope(header, body)
+ #if attributes:
+ # print content
+ # 1/0
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
## -535,4 +548,4 ## class PartElement(SchemaElement):
return self
else:
return self.__resolved
-
\ No newline at end of file
+
diff --git a/website/suds/bindings/document.py b/website/suds/bindings/document.py
index edd9422..0c84753 100644
--- a/website/suds/bindings/document.py
+++ b/website/suds/bindings/document.py
## -38,7 +38,7 ## class Document(Binding):
(multiple message parts), must present a I{document} view for that method.
"""
- def bodycontent(self, method, args, kwargs):
+ def bodycontent(self, method, args, kwargs, attributes=None):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
## -54,6 +54,12 ## class Document(Binding):
else:
root = []
n = 0
+
+ if attributes:
+ #print root.__class__
+ for name, val in attributes.items():
+ root.set(name, val)
+
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
diff --git a/website/suds/client.py b/website/suds/client.py
index 8b4f258..f80e36a 100644
--- a/website/suds/client.py
+++ b/website/suds/client.py
## -592,7 +592,10 ## class SoapClient:
timer.start()
result = None
binding = self.method.binding.input
- soapenv = binding.get_message(self.method, args, kwargs)
+ attributes = kwargs.get('attributes', None)
+ if attributes:
+ del kwargs['attributes']
+ soapenv = binding.get_message(self.method, args, kwargs, attributes)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
## -841,4 +844,4 ## class RequestContext:
#type error: A suds I{TransportError}.
"""
return self.client.failed(self.binding, error)
-
\ No newline at end of file
+

Categories