I am writing a program that, depending on a certain values from an Excel table, makes an API call. There are 2 conditions from the table that will be checked:
Language
Provider
Depending on those two values a different set of constants is needed for the API call:
def run_workflow(provider, language, workflow):
if provider == 'xxxx' and language == 0:
wf_ready = provider_ready
wf_unverified = provider_unverified
wf_active = provider_active
wf_another = provider_another
wf_closed = provider_closed
wf_wrongid = provider_wrongid
elif provider == 'yyyy' and language == 0:
wf_ready = provider_ready
wf_unverified = provider_unverified
wf_active = provider_active
wf_another = provider_another
wf_closed = provider_closed
wf_wrongid = provider_wrongid
elif ...
if workflow == 'ready':
response = requests.post(API + wf_ready),headers=header, data=json.dumps(conversation))
elif workflow == 'unverified':
response = requests.post(API + wf_unverified),headers=header, data=json.dumps(conversation))
elif ...
There are 2 provider and 7 different languages and I am trying to figure out the most efficient (and Pythonic way) to handle this scenario and came up with creating a class for each language:
class Workflow_Language():
def english(self):
self.provider_unverified = 1112
self.provider_ready = 1113
self.provider_active = 1114
self.provider_vip = 1115
def russian(self):
self.provider_unverified = 1116
self.provider_ready = 1117
self.provider_active = 1118
self.provider_vip = 1119
def ...
...
Is there maybe a better way to handle this?
One way is to map constants to appropriate handlers:
class LanguageData:
def __init__(self, unverified, ready, active, vip):
self.unverified = unverified
self.ready = ready
self.active = active
self.vip = vip
def english():
return LanguageData(1,2,3,4)
def russian():
return LanguageData(5,6,7,8)
LANGUAGE_MAP = {'en': english, 'ru': russian}
I've made up 'en', 'ru' values for clarity. It seems that 0 is in your case? Also note that english and russian are standalone functions. Finally the LanguageData class is not mandatory, you can simply return a dictionary from those functions. But workin with attributes instead of string keys seems easier to maintain.
And then in the code:
def run_workflow(provider, language, workflow):
lang_data = LANGUAGE_MAP[language]()
if workflow == 'ready':
url = API + data.ready
elif workflow == 'unverified':
url = API + data.unverified
response = requests.post(url, headers=header, data=json.dumps(conversation))
Of course workflow can be wrapped in a similar way if there are more than 2 possible values.
Analogously for provider. Unless the action depends on both provider and language at the same time in which case you need a double map:
LANG_PROV_MAP = {
('en', 'xxxx'): first,
('ru', 'yyyy'): second,
}
def run_workflow(provider, language, workflow):
data = LANG_PROV_MAP[(provider, language)]()
...
The original code can be simplified with a tricky decorator:
LANGUAGE_MAP = {}
def language_handler(lang):
def wrapper(fn):
LANGUAGE_MAP[lang] = fn
return fn
return wrapper
#language_handler('en')
def handler():
return LanguageData(1,2,3,4)
#language_handler('ru')
def handler():
return LanguageData(5,6,7,8)
Also note that if the data is "constant" (i.e. doesn't depend on the context) then you can completely omit callables to make everything even simplier:
LANGUAGE_MAP = {
'en': LanguageData(1,2,3,4),
'ru': LanguageData(5,6,7,8),
}
def run_workflow(provider, language, workflow):
data = LANGUAGE_MAP[language]
...
The combination of the language and provider can compose the method name and the call will be invoked dynamically.
Example:
import sys
def provider1_lang2():
pass
def provider2_lang4():
pass
# get the provider / lang and call the method dynamically
provider = 'provider2'
lang = 'lang4'
method_name = '{}_{}'.format(provider,lang)
method = getattr(sys.modules[__name__], method_name)
method()
Related
Most python profilers are made for python programs or scripts, in my case I'm working with a python plugin for a third-party app (blender 3d), therefore the profiling needs to be sampled in real-time while the user is interacting with the plugin.
I'm currently trying an injection strategy, which consists of procedurally searching through all plugin modules, & injecting a profiler wrapper to every single function.
see below, this is what my current profiler looks like
I'm wondering if there are other profilers out there that can be used for run-time scenarios such as plugins
class ModuleProfiler:
#profiler is running?
allow = False #must be True in order to start the profiler
activated = False #read only indication if profiler has been activated
#please define your plugin main module here
plugin_main_module = "MyBlenderPlugin"
#function calls registry
registry = {}
#ignore parameters, typically ui functions/modules
ignore_fcts = [
"draw",
"foo",
]
ignore_module = [
"interface_drawing",
]
event_prints = True #print all event?
#classmethod
def print_registry(cls):
"""print all registered benchmark"""
#generate total
for k,v in cls.registry.copy().items():
cls.registry[k]["averagetime"] = v["runtime"]/v["calls"]
print("")
print("PROFILER: PRINTING OUTCOME")
sorted_registry = dict(sorted(cls.registry.items(), key=lambda item:item[1]["runtime"], reverse=False))
for k,v in sorted_registry.items():
print("\n",k,":")
for a,val in v.items():
print(" "*6,a,":",val)
return None
#classmethod
def update_registry(cls, fct, exec_time=0):
"""update internal benchmark with new data"""
key = f"{fct.__module__}.{fct.__name__}"
r = cls.registry.get(key)
if (r is None):
cls.registry[key] = {}
cls.registry[key]["calls"] = 0
cls.registry[key]["runtime"] = 0
r = cls.registry[key]
r["calls"] +=1
r["runtime"] += exec_time
return None
#classmethod
def profile_wrap(cls, fct):
"""wrap any functions with our benchmark & call-counter"""
#ignore some function?
if (fct.__name__ in cls.ignore_fcts):
return fct
import functools
import time
#functools.wraps(fct)
def inner(*args,**kwargs):
t = time.time()
r = fct(*args,**kwargs)
cls.update_registry(fct, exec_time=time.time()-t)
if cls.event_prints:
print(f"PROFILER : {fct.__module__}.{fct.__name__} : {time.time()-t}")
return r
return inner
#classmethod
def start(cls):
"""inject the wrapper for every functions of every sub-modules of our plugin
used for benchmark or debugging purpose only"""
if (not cls.allow):
return None
cls.activated = True
import types
import sys
def is_function(obj):
"""check if given object is a function"""
return isinstance(obj, types.FunctionType)
print("")
#for all modules in sys.modules
for mod_k,mod in sys.modules.copy().items():
#separate module componments names
mod_list = mod_k.split('.')
#fileter what isn't ours
if (mod_list[0]!=cls.plugin_main_module):
continue
#ignore some modules?
if any([m in cls.ignore_module for m in mod_list]):
continue
print("PROFILER_SEARCH : ",mod_k)
#for each objects found in module
for ele_k,ele in mod.__dict__.items():
#if it does not have a name, skip
if (not hasattr(ele,"__name__")):
continue
#we have a global function
elif is_function(ele):
print(f" INJECT LOCAL_FUNCTION: {mod_k}.{ele_k}")
mod.__dict__[ele_k] = cls.profile_wrap(ele)
#then we have a homebrewed class? search for class.fcts
#class.fcts implementation is not flawless, need to investigate issue(s)
elif repr(ele).startswith(f"<class '{cls.plugin_main_module}."):
for class_k,class_e in ele.__dict__.items():
if is_function(class_e):
print(f" INJECT CLASS_FUNCTION: {mod_k}.{ele_k}.{class_k}")
setattr( mod.__dict__[ele_k], class_k, cls.profile_wrap(class_e),) #class.__dict__ are mapping proxies, need to assign this way,
continue
print("")
return None
ModuleProfiler.allow = True
ModuleProfiler.plugin_main_module = "MyModule"
ModuleProfiler.start()
I want to achieve the below:
def do_something(request):
company_name = request.get("company_name", DEFAULT_COMPANY)
data = request.get("data")
response = transform_data_according_to(data, company_name)
return response
I did the following for it:
class Transform(ABC):
def __init__(self, data):
self.data = data
#abstractmethod
def transform(self):
pass
class CompanyA(Transform):
def transform(self):
# do_transformation
return transformed_data
def do_something(request):
company_name = request.get("company_name", DEFAULT_COMPANY)
data = request.get("data")
if company_name == CompanyA:
response = CompanyA.transform(data)
return response
Instead i would like to do something like this using correct object oriented principles:
def do_something(request):
company_name = request.get("company_name", DEFAULT_COMPANY)
data = request.get("data")
response = Transform(data, company_name)
return response
I want to know where I might be thinking wrong in terms of the desired approach versus the implemented approach. Is the implemented approach correct, the if else checks can grow quite big in that case.
Thanks to teraflop
The simple, idiomatic way to do this in Python would be to look up the Transform subclass in a dictionary:
transform_classes = {
"CompanyA": CompanyA,
# ...
}
def do_something(request):
company_name = request.get("company_name", DEFAULT_COMPANY)
data = request.get("data")
transformer = transform_classes[company_name](data)
return transformer.transform()
If you prefer to be more rigorously object-oriented, you could wrap the dictionary in an object (e.g. TransformLookupByName) instead of accessing it directly.
There are also various kinds of metaprogramming magic you can use to build the dictionary automatically without having to name each subclass explicitly. For example, this will collect all of the Transform subclasses in the current source file:
transform_classes = {
k:v for k,v in globals().items()
if isinstance(v, type) and issubclass(v, Transform) and v != Transform
}
I try to create simple DSL with Python SLY . But, I can't get the result as I expected because the parser can't read it properly. So here the code :
Lexer
from sly import Lexer
class ConfigLexer(Lexer):
tokens = { ANIMALS, BLOOD, SKIN, BREATHE, ANIMAL_NAME, VALUE, ASSIGN }
ignore = " \t\r"
ignore_newline = r'\n+'
ANIMALS = "ANIMALS"
BLOOD = "BLOOD"
SKIN = "SKIN"
BREATHE = "BREATHE"
ANIMAL_NAME = r'\{[a-zA-Z_][a-zA-Z0-9_]*\}'
VALUE = r'[a-zA-Z_][a-zA-Z0-9_,.: ]*'
ASSIGN = r'\='
Parser
from sly import Parser
class ConfigParser(Parser):
tokens = ConfigLexer.tokens
def __init__(self):
self.config = dict()
self.dict_attribute = dict()
self.animal_name = ""
#_("ANIMALS animaldetails")
def animals(self, p):
pass
#_("ANIMAL_NAME animalnamedetails")
def animaldetails(self, p):
self.animal_name = p.ANIMAL_NAME.replace("{", "").replace("}","")
if self.animal_name not in self.config:
self.config[self.animal_name] = self.dict_attribute
#_("BLOOD ASSIGN VALUE")
def animalnamedetails(self, p):
if p.BLOOD not in self.dict_attribute:
self.dict_attribute[p.BLOOD] = p.VALUE
#_("SKIN ASSIGN VALUE")
def animalnamedetails(self, p):
if p.SKIN not in self.dict_attribute:
self.dict_attribute[p.SKIN] = p.VALUE
#_("BREATHE ASSIGN VALUE")
def animalnamedetails(self, p):
if p.BREATHE not in self.dict_attribute:
self.dict_attribute[p.BREATHE] = p.VALUE
def get_config(self):
return self.config
but when I run it.
import json
import ConfigLexer
import ConfigParser
if __name__ == '__main__':
lexer = ConfigLexer()
parser = ConfigParser()
long_string = """ANIMALS
{MAMMALS}
BLOOD = WARM
SKIN = FUR
BREATHE = LUNGS
{FISH}
BLOOD = COLD
SKIN = SCALY
BREATHE = GILLS"""
result = parser.parse(lexer.tokenize(long_string))
cfg = parser.get_config()
data_json = json.dumps(cfg, indent=3)
print(data_json)
as I expected, the result would be like this.
data_json = {
'MAMMALS': {'BLOOD': 'WAMR': 'SKIN': 'FUR OR HAIR', 'BREATHE': 'LUNGS'},
'FISH': {'BLOOD': 'COLD', 'SKIN': 'SCALY', 'BREATHE': 'GILLS'}
}
but I only get something like this.
data_json = {
'MAMMALS': {
'BLOOD': 'WARM'
}
}
result of executing :
sly: Syntax error at line 1, token=SKIN
{
"MAMMALS": {
"BLOOD": "WARM"
}
}
I guess I have to edit the Parser, but I can't think how, and would appreciate any pointers you can give me.
You have non-terminals named animals, animaldetails, and animalnameddetails, in plural, which would normally lead one to expect that the grammar for each of them would allow a sequence of things. But they don't. Each of these categories parses a single thing. You've implemented the singular, and although it's named in plural, there's no repetition.
That this was not your intent is evident from your example, which does have multiple sections and multiple attributes in each section. But since the grammar only describes one attribute and value, the second one is a syntax error.
Traditionally, grammars will implement sequences with pairs of non-terminals; a singular non-terminal which describes a single thing, and a plural non-terminal which describes how lists are formed (simple concatenation, or separated by punctuation). So you might have:
file: sections
sections: empty
| sections section
section: category attributes
settings: empty
| settings setting
setting: attribute '=' value
You probably should also look fora description of how to manage semantic values. Storing intermediate results in class members, as you do, works only when the grammar doesn't allow nesting, which is relatively unusual. It's a technique which will almost always get you into trouble. The semantic actions of each production should manage these values:
A singular object syntax should create and return a representation of the object.
A plural→empty production should create and return a representation of an empty collection.
Similarly, a production of the form things→ things thing should append the new thing to the aggregate of things, and then return the augmented aggregate.
Cheers...
from json import dumps
from sly import Lexer, Parser
class MyLexer(Lexer):
tokens = {ANIMALS, ANIMAL_NAME, BLOOD, SKIN, BREATHE, ASSIGN, ASSIGN_VALUE}
ignore = ' \t'
ANIMALS = r'ANIMALS'
BLOOD = r'BLOOD'
SKIN = r'SKIN'
BREATHE = r'BREATHE'
ASSIGN = r'='
ASSIGN_VALUE = r'[a-zA-Z_][a-zA-Z0-9_]*'
#_(r'\{[a-zA-Z_][a-zA-Z0-9_]*\}')
def ANIMAL_NAME(self, t):
t.value = str(t.value).lstrip('{').rstrip('}')
return t
#_(r'\n+')
def NEWLINE(self, t):
self.lineno += t.value.count('\n')
class MyParser(Parser):
tokens = MyLexer.tokens
def __init__(self):
self.__config = {}
def __del__(self):
print(dumps(self.__config, indent=4))
#_('ANIMALS animal animal')
def animals(self, p):
pass
#_('ANIMAL_NAME assignment assignment assignment')
def animal(self, p):
if p.ANIMAL_NAME not in self.__config:
self.__config[p.ANIMAL_NAME] = {}
animal_name, *assignments = p._slice
for assignment in assignments:
assignment_key, assignment_value = assignment.value
self.__config[p.ANIMAL_NAME][assignment_key] = assignment_value
#_('key ASSIGN ASSIGN_VALUE')
def assignment(self, p):
return p.key, p.ASSIGN_VALUE
#_('BLOOD', 'SKIN', 'BREATHE')
def key(self, p):
return p[0]
if __name__ == '__main__':
lexer = MyLexer()
parser = MyParser()
text = '''ANIMALS
{MAMMALS}
BLOOD = WARM
SKIN = FUR
BREATHE = LUNGS
{FISH}
BLOOD = COLD
SKIN = SCALY
BREATHE = GILLS
'''
parser.parse(lexer.tokenize(text))
Output:
{
"MAMMALS": {
"BLOOD": "WARM",
"SKIN": "FUR",
"BREATHE": "LUNGS"
},
"FISH": {
"BLOOD": "COLD",
"SKIN": "SCALY",
"BREATHE": "GILLS"
}
}
Is it possible to somehow to have 2 functions with the same name, but only one of the gets defined.
Something like:
version='revA'
def RevA():
if (version=='revA'):
return lambda x: x
else:
return lambda x: None
def RevB():
if (version=='revB'):
return lambda x: x
else:
return lambda x: None
#RevA
def main():
print("RevA")
#RevB
def main():
print("RevB")
main()
How about classes and inheritance:
class Base:
def main(self):
print("base")
class RevA(Base):
def main(self):
print("RevA")
class RevB(Base):
def main(self):
print("RevB")
if version == 'revA':
obj = RevA()
elif version == 'revB:
obj = RevB()
else:
obj = Base()
obj.main()
Also typical are factory functions like:
def get_obj(version, *args, **kwargs):
omap = { 'revA': revA, 'revB': revB }
return omap[version](*args, **kwargs)
This allows you to call for example:
obj = get_obj('revA', 23, fish='burbot')
Which will be equivalent to:
if version == 'revA':
obj = revA(23, fish='burbot')
You can, but doing literally that would be very uncommon:
if version == 'revA':
def main():
print("RevA")
elif version == 'revB':
def main():
print("RevB")
main()
More usually, you'd define both functions then choose which one to use by assigning it to a variable:
def main_A():
print("RevA")
def main_B():
print("RevB")
# select the right version using a dispatch table
main = {
'RevA': main_A,
'RevB': main_B,
}[version]
main()
Variants of this latter approach are quite common; both web applications and graphical applications often work this way, with a table mapping URLs or user actions to functions to be called. Often the table is maintained by the framework and your code adds entries to it in multiple places in the code, sometimes in bulk (eg Django), sometimes one by one (eg Flask).
Having both functions defined (not just the selected one) means that you can also call each version directly; that's useful if the main program uses a dispatch table but various subsidiary code (such as the tests) needs to call a particular one of the functions
I want to build a python client on top of a REST API that uses authentication with a api_token. Hence all api calls require the api_token. As it is pretty ugly to add a field
'token=...'
e.g.
a = f1(5, token='token')
b = f2(6, 12, token='token')
c = f3(2, 'a', token='token')
where internally f1 and f2 delegate to the REST api
to each function call. What I would like to have is something like:
auth = authenticate('token')
a = f1(5)
b = f2(6, 12,)
c = f3(2, 'a')
What I can do is to create a class and make all functions member functions. Hence, we would have:
auth = calculator('token')
a = auth.f1(5)
b = auth.f2(6, 12,)
c = auth.f3(2, 'a')
but that would also be somewhat ugly. I am trying to get this to work with decorators, but to no avail so far.
class authenticate:
def __init__(self, token):
self.token = token
def __call__(self, func):
def functor(*args, **kwargs):
return func(*args, **kwargs, key=self.authentication)
return functor
#authenticate
def f1(a, key):
data = a
result = requests.get(1, data, key)
return result
However, this seems to be going nowhere. I am also wondering whether this might work at all as decorators are executed at import time and the token is added at runtime.
Any suggestions on how to make this work or anyone know if there is another standard pattern for this?
So after some hacking around we came up with the following:
class authenticate:
# start empty key
key = None
#classmethod
""" add the token """
def set_key(cls, token):
cls.token = token
def __init__(self, func=None):
if func is not None:
self.func = func
else:
print('no function')
def __call__(self, *arg):
"""
add authentication to function func
"""
ret = self.func(*arg, auth_key=self.key)
return ret
#authenticate
def f1(a, key):
data = a
result = requests.get(1, data, key)
return result
Then you can run code like:
authentication_key = 'token'
print('Initiate class')
authenticate().set_key(key=authentication_key)
print('Run f1(5)')
a1 = f1(5) # no token needed!
a2 = f2(6, 12) # again no token needed as it is in the decorator
print(a1)
This works more or less as I hoped and I find it cleaner than the class methods. If anyone has a better suggestion or improvements let me know.