Reset specific slot and ask question again in Rasa NLU - python

I am building a chatbot for table reservation in a hotel.
I have written a custom action to validate and modify the extracted user input. I want to reset my slot and ask the question again if the flag value is False.
Here's my actions.py file:
from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.events import SlotSet, UserUtteranceReverted, EventType, AllSlotsReset
from rasa_sdk.executor import CollectingDispatcher
from word2number import w2n
from .functions import util
class ValidateRestaurantForm(Action):
def name(self) -> Text:
return "user_details_form"
def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict) -> List[EventType]:
required_slots = ["number_table", "table_type", "reserve_time"]
for slot_name in required_slots:
if tracker.slots.get(slot_name) is None:
# The slot is not filled yet. Request the user to fill this slot next.
return [SlotSet("requested_slot", slot_name)]
# All slots are filled.
return [SlotSet("requested_slot", None)]
class ActionSubmit(Action):
def name(self) -> Text:
return "action_submit"
def run(
self,
dispatcher,
tracker: Tracker,
domain: "DomainDict",
) -> List[Dict[Text, Any]]:
number_table = tracker.get_slot("number_table")
number_table = w2n.word_to_num(number_table)
table_type = tracker.get_slot("table_type")
reserve_time = tracker.get_slot("reserve_time")
flag, reserve_time_modified = util(reserve_time)
if flag == False:
dispatcher.utter_message(response="utter_deny_reserve_time")
dispatcher.utter_message(response="utter_ask_reserve_time")
return [SlotSet("reserve_time", None)] <-------Resetting the slot and ask
reservation time again
else:
dispatcher.utter_message(response="utter_submit",
number_table=number_table,
table_type=tracker.get_slot("table_type"),
reserve_time=reserve_time_modified)
return [AllSlotsReset()]
I am not able to find any answers on Rasa forums. Please suggest some ideas to solve this problem. I am a beginner in Rasa.
Thanks in advance.

This looks like something that can be done by writing stories that implement branching logic.
You can make flag a slot and have it's value set by ActionSubmit. Then, you can write a story where if after calling ActionSubmit and the flag slot is false, you activate the form.
Update 2021-05-08
This part of ActionSubmit:
flag, reserve_time_modified = util(reserve_time)
could be a separate action named e.g. is_valid_reserve_time that ends with a call to set the flag and reserve_time_modified slot.
Then, you could write a story that branches depending the value of the flag slot.
Alternatively, you could use a form to collect all the required booking information and validate the value users provide to reserve_time as it gets filled. The docs has an example of validating an input that user provides to a form and asking the user to fill again if the input is invalid.

Related

Pythonic Classes and the Zen of Python

The Zen of Python says:
“There should be one—and preferably only one—obvious way to do it.”
Let’s say I want to create a class that builds a financial transaction. The class should allow the user to build a transaction and then call a sign() method to sign the transaction in preparation for it to be broadcast via an API call.
The class will have the following parameters:
sender
recipient
amount
signer (private key for signing)
metadata
signed_data
All of these are strings, except for the amount which is an int, and all are required except for the last two: metadata which is an optional parameter, and signed_data which is created when the method sign() is called.
We would like all of the parameters to undergo some kind of validation before the signing happens so we can reject badly formatted transactions by raising an appropriate error for the user.
This seems straight-forward using a classic Python class and constructor:
class Transaction:
def __init__(self, sender, recipient, amount, signer, metadata=None):
self.sender = sender
self.recipient = recipient
self.amount = amount
self.signer = signer
if metadata:
self.metadata = metadata
def is_valid(self):
# check that all required parameters are valid and exist and return True,
# otherwise return false
def sign(self):
if self.is_valid():
# sign transaction
self.signed_data = "pretend signature"
else:
# raise InvalidTransactionError
Or with properties:
class Transaction:
def __init__(self, sender, recipient, amount, signer, metadata=None):
self._sender = sender
self._recipient = recipient
self._amount = amount
self._signer = signer
self._signed_data = None
if metadata:
self._metadata = metadata
#property
def sender(self):
return self._sender
#sender.setter
def sender(self, sender):
# validate value, raise InvalidParamError if invalid
self._sender = sender
#property
def recipient(self):
return self._recipient
#recipient.setter
def recipient(self, recipient):
# validate value, raise InvalidParamError if invalid
self._recipient = recipient
#property
def amount(self):
return self._amount
#amount.setter
def amount(self, amount):
# validate value, raise InvalidParamError if invalid
self._amount = amount
#property
def signer(self):
return self._signer
#signer.setter
def signer(self, signer):
# validate value, raise InvalidParamError if invalid
self._signer = signer
#property
def metadata(self):
return self._metadata
#metadata.setter
def metadata(self, metadata):
# validate value, raise InvalidParamError if invalid
self._metadata = metadata
#property
def signed_data(self):
return self._signed_data
#signed_data.setter
def signed_data(self, signed_data):
# validate value, raise InvalidParamError if invalid
self._signed_data = signed_data
def is_valid(self):
return (self.sender and self.recipient and self.amount and self.signer)
def sign(self):
if self.is_valid():
# sign transaction
self.signed_data = "pretend signature"
else:
# raise InvalidTransactionError
print("Invalid Transaction!")
We can now validate each value when it’s set so by the time we go to sign we know we have valid parameters and the is_valid() method only has to check that all required parameters have been set. This feels a little more Pythonic to me than doing all the validation in the single is_valid() method but I am unsure if all the extra boiler plate code is really worth it.
With dataclasses:
#dataclass
class Transaction:
sender: str
recipient: str
amount: int
signer: str
metadata: str = None
signed_data: str = None
def is_valid(self):
# check that all parameters are valid and exist and return True,
# otherwise return false
def sign(self):
if self.is_valid():
# sign transaction
self.signed_data = "pretend signature"
else:
# raise InvalidTransactionError
print("Invalid Transaction!")
Comparing this to Approach
1, this is pretty nice. It’s concise, clean, and readable and already has __init__(), __repr__() and __eq__() methods built-in. On the other hand, compared to Approach
2 we’re back to validating all the inputs via a massive is_valid() method.
We could try to use properties with dataclasses but that's actually harder than it sounds. According to this blog post it can be done something like this:
#dataclass
class Transaction:
sender: str
_sender: field(init=False, repr=False)
recipient: str
_recipient: field(init=False, repr=False)
. . .
# properties for all parameters
def is_valid(self):
# if all parameters exist, return True,
# otherwise return false
def sign(self):
if self.is_valid():
# sign transaction
self.signed_data = "pretend signature"
else:
# raise InvalidTransactionError
print("Invalid Transaction!")
Is there one and only one obvious way to do this? Are dataclasses recommended for this kind of application?
As a general rule, and not limited to Python, it is a good idea to write code which "fails fast": that is, if something goes wrong at runtime, you want it to be detected and signalled (e.g. by throwing an exception) as early as possible.
Especially in the context of debugging, if the bug is that an invalid value is being set, you want the exception to be thrown at the time the value is set, so that the stack trace includes the method setting the invalid value. If the exception is thrown at the time the value is used, then you can't signal which part of the code caused the invalid value.
Of your three examples, only the second one allows you to follow this principle. It may require more boilerplate code, but writing boilerplate code is easy and doesn't take much time, compared to debugging without a meaningful stack trace.
By the way, if you have setters which do validation, then you should call these setters from your constructor too, otherwise it's possible to create an object with an invalid initial state.
Given your constraints, I think your dataclass approach can be improved to produce an expressive and idiomatic solution with very strong runtime assertions about the resulting Transaction instances, mostly by leveraging the __post_init__ mechanism:
from dataclasses import dataclass, asdict, field
from typing import Optional
#dataclass(frozen=True)
class Transaction:
sender: str
recipient: str
amount: int
signer: str
metadata: Optional[str] = None
signed_data: str = field(init=False)
def is_valid(self) -> bool:
... # implement your validity assertion logic
def __post_init__(self):
if self.is_valid():
object.__setattr__(self, "signed_data", "pretend signature")
else:
raise ValueError(f"Invalid transaction with parameter list "
f"{asdict(self)}.")
This reduces the amount of code you have to maintain and understand to a degree where every written line relates to a meaningful part of your requirements, which is the essence of pythonic code.
Put into words, instances of this Transaction class may specify metadata but don't need to and may not supply their own signed_data, something which was possible in your variant #3. Attributes can't be mutated any more after initialization (enforced by frozen=True), so that an instance that is valid cannot be altered into an invalid state. And most importantly, since the validation is now part of the constructor, it is impossible for an invalid instance to exist. Whenever you are able to refer to a Transaction in runtime, you can be 100% sure that it passed the validity check and would do so again.
Since you based your question on python-zen conformity (referring to Beautiful is better than ugly and Simple is better than complex in particular), I'd say this solution is preferable to the property based one.

How to change a variable value in a python parent class from sub class method just for class instance

ok, I am not even entirely sure if my title is completely accurate as I completely do not understand class inheritance and instances at that moment but understand it is something that I need or should grasp moving forward.
Background: attempting to create a custom importer for my bank to be used with the popular Beancount/fava double entry ledger accounting system. I originally reported to fava as a bug but then realized its not a bug and its more my lack of general understanding of Python classes so thought it would be better to post here.
So...I have created the following import script file which as I understand is a sub class of beancount csv.Importer (https://github.com/beancount/beancount/blob/master/beancount/ingest/importers/csv.py) which is a sub class of beancount Importer (https://github.com/beancount/beancount/blob/master/beancount/ingest/importer.py)
In my importer I over ride 2 methods of csv.Importer, name() and file_account(). My goal is to derive the source account associated to input file based on file name and dictionary look-up. The extract() method I do not wish to over-ride in my sub class, however in the csv.Importer extract() method there is reference to self.account that represents the source account to use for extracted transactions. Currently the way my script is if I feed it a file named 'SIMPLII_9999_2018-01-01.csv' the account will be properly derived as 'Assets:Simplii:Chequing-9999'. However, if I stop short of actually importing the transactions in fava and instead attempt to extract the transactions again from the same file the derived account then becomes 'Assets:Simplii:Chequing-9999 :Chequing-9999'.
What I am trying to do is derive the source account from the input file and pass this information as the self.account variable in the parent class (csv.Importer) for my class instance (I think). What is it that I am doing wrong in my class that is causing the derived source account to be carried over to the next instance?
#!/usr/bin/env python3
from beancount.ingest import extract
from beancount.ingest.importers import csv
from beancount.ingest import cache
from beancount.ingest import regression
import re
from os import path
from smart_importer.predict_postings import PredictPostings
class SimpliiImporter(csv.Importer):
'''
Importer for the Simplii bank.
Note: This undecorated class can be regression-tested with
beancount.ingest.regression.compare_sample_files
'''
config = {csv.Col.DATE: 'Date',
csv.Col.PAYEE: 'Transaction Details',
csv.Col.AMOUNT_DEBIT: 'Funds Out',
csv.Col.AMOUNT_CREDIT: 'Funds In'}
account_map = {'9999':'Chequing-9999'}
def __init__(self, *, account, account_map=account_map):
self.account_map = account_map
self.account = 'Assets:Simplii'
super().__init__(
self.config,
self.account,
'CAD',
['Filename: .*SIMPLII_\d{4}_.*\.csv',
'Contents:\n.*Date, Transaction Details, Funds Out, Funds In'],
institution='Simplii'
)
def name(self):
cls = self.__class__
return '{}.{}'.format(cls.__module__, cls.__name__)
def file_account(self, file):
__account = None
if file:
m = re.match(r'.+SIMPLII_(\d{4})_.*', file.name)[1]
if m:
sub_account = self.account_map.get(m)
if sub_account:
__account = self.account + ':' + sub_account
return __account
def extract(self, file):
self.account = self.file_account(file)
return super().extract(file)
#PredictPostings(training_data='/beancount/personal.beancount')
class SmartSimpliiImporter(SimpliiImporter):
'''
A smart version of the Simplii importer.
'''
pass
so I have managed to get this working however I don't think its the proper way to do it...
I changed the extract function like this
def extract(self, file):
self.account = self.file_account(file)
postings = super().extract(file)
self.account = 'Assets:Simplii'
return postings
basically I set the self.account to the value I need to, call the parent class extract function saving results to variable, reset the self.account variable and return results. Seems more of a work around than the proper way but at least its here in case it helps someone else out...

Customize Maya's addCheckCallback pop up message

When the user saves a file I want a check to happen prior to saving. If the check fails then it doesn't save. I got this working with mSceneMessage and kBeforeSaveCheck, but I don't know how to customize the pop-up message when it fails. Is this possible?
import maya.OpenMaya as om
import maya.cmds as cmds
def func(retCode, clientData):
objExist = cmds.objExists('pSphere1')
om.MScriptUtil.setBool(retCode, (not objExist) ) # Cancel save if there's pSphere1 in the scene
cb_id = om.MSceneMessage.addCheckCallback(om.MSceneMessage.kBeforeSaveCheck, func)
Right now it displays
File operation cancelled by user supplied callback.
I'm a bit slow to the question, but I needed something similar today so I figured I'd respond. I cannot decide if I would recommend this in the general case, but strictly speaking, it is possible to change a considerable number of static strings in the Maya interface using the displayString command. The easy part is, you know the string you are looking for
import maya.cmds as cmds
message = u"File operation cancelled by user supplied callback."
keys = cmds.displayString("_", q=True, keys=True)
for k in keys:
value = cmds.displayString(k, q=True, value=True)
if value == message:
print("Found matching displayString: {}".format(k))
Running this on Maya 2015 finds over 30000 registered display strings and returns a single matching key: s_TfileIOStrings.rFileOpCancelledByUser. Seems promising to me.
Here's your initial code modified to change the display string:
import maya.OpenMaya as om
import maya.cmds as cmds
def func(retCode, clientData):
"""Cancel save if there is a pSphere1 in the scene"""
objExist = cmds.objExists('pSphere1')
string_key = "s_TfileIOStrings.rFileOpCancelledByUser"
string_default = "File operation cancelled by user supplied callback."
string_error = "There is a pSphere1 node in your scene"
message = string_error if objExist else string_default
cmds.displayString(string_key, replace=True, value=message)
om.MScriptUtil.setBool(retCode, (not objExist))
cb_id = om.MSceneMessage.addCheckCallback(om.MSceneMessage.kBeforeSaveCheck, func)

How to format an integer widget on the controlpanel using plone.app.registry

I have an add-on configuration page/form generated by plone.app.registry.browser.controlpanel
Using these Docs:
http://plone.org/documentation/kb/how-to-create-a-plone-control-panel-with-plone.app.registry
https://pypi.python.org/pypi/plone.app.registry#control-panel-widget-settings
On this form, I have an integer field:
from zope import schema
from plone.app.registry.browser import controlpanel
class MyAddonSettings(Interface):
partnerId = schema.Int(title=u"Partner Id",
description=u"enter your Partner ID",
required=True,
default=54321)
class SettingsEditForm(controlpanel.RegistryEditForm):
schema = MyAddonSettings
label = u"My settings"
description = u""""""
def updateFields(self):
super(SettingsEditForm, self).updateFields()
def updateWidgets(self):
super(SettingsEditForm, self).updateWidgets()
class SettingsControlPanel(controlpanel.ControlPanelFormWrapper):
form = SettingsEditForm
When the form renders, I get the integer field auto-filled with '54,321' I don't want the comma.
How to I specify "Don't do that!"
So, I think I went pretty deep down the rabbit hole, but here is what I came up with.
1) The default widget for zope.schema.Int is the TextWidget
2) z3c.form.converter.IntegerDataConverter Adapts itself to zope.schema.interfaces.IInt and ITextWidget
3) the IntegerDataConverter calls upon the locale to 'format the integer' for you, giving you a nice pretty representation of an int - with commas.
My choice was to create a new widget 'IntWidget' and a new converter 'NoFormatIntegerDataConverter', adapt these. Then manually set the field in question to my new widget:
I'm sure there is a less 'rabbit hole' way to do this, but I found myself at the bottom, so I completed the journey. I'll let a zope guru follow up with the 'right' way to do it.
=========================
create the new widget based on TextWidget
so we don't tie our new converter to everyone's TextWidget and break someone else's stuff
import zope.interface
import zope.component
import zope.schema.interfaces
import z3c.form.interfaces
from z3c.form.widget import FieldWidget
from z3c.form.browser.text import TextWidget
from z3c.form import converter
class IIntWidget(z3c.form.interfaces.ITextWidget):
"""Int Widget"""
class IntWidget(TextWidget):
zope.interface.implementsOnly(IIntWidget)
klass = u'int-widget'
value = u''
#zope.component.adapter(zope.schema.interfaces.IField,
z3c.form.interfaces.IFormLayer)
#zope.interface.implementer(z3c.form.interfaces.IFieldWidget)
def IntFieldWidget(field, request):
"""IFieldWidget factory for IntWidget."""
return FieldWidget(field, IntWidget(request))
zope.component.provideAdapter(IntFieldWidget)
Create the 'dumb' converter, and adapt it to our new widget 'IntWidget'
class NoFormatIntegerDataConverter(converter.IntegerDataConverter):
""" data converter that ignores the formatter,
simply returns the unicode representation of the integer value
The base class for this calls upon the locale for a formatter.
This completely avoids calling the locale.
"""
zope.component.adapts(zope.schema.interfaces.IInt, IIntWidget)
def toWidgetValue(self, value):
if value is self.field.missing_value:
return u''
#go look at z3c.form.converter.IntegerDataConverter
#to see what it used to return here.
return unicode(value)
zope.component.provideAdapter(NoFormatIntegerDataConverter)
Finally, update the field widget factory to use our new widget
class SettingsEditForm(controlpanel.RegistryEditForm):
...
def updateFields(self):
super(SettingsEditForm, self).updateFields()
self.fields['partnerId'].widgetFactory = IntFieldWidget #<----- Here
...

App Engine (Python) Datastore Precall API Hooks

Background
So let's say I'm making app for GAE, and I want to use API Hooks.
BIG EDIT: In the original version of this question, I described my use case, but some folks correctly pointed out that it was not really suited for API Hooks. Granted! Consider me helped. But now my issue is academic: I still don't know how to use hooks in practice, and I'd like to. I've rewritten my question to make it much more generic.
Code
So I make a model like this:
class Model(db.Model):
user = db.UserProperty(required=True)
def pre_put(self):
# Sets a value, raises an exception, whatever. Use your imagination
And then I create a db_hooks.py:
from google.appengine.api import apiproxy_stub_map
def patch_appengine():
def hook(service, call, request, response):
assert service == 'datastore_v3'
if call == 'Put':
for entity in request.entity_list():
entity.pre_put()
apiproxy_stub_map.apiproxy.GetPreCallHooks().Append('preput',
hook,
'datastore_v3')
Being TDD-addled, I'm making all this using GAEUnit, so in gaeunit.py, just above the main method, I add:
import db_hooks
db_hooks.patch_appengine()
And then I write a test that instantiates and puts a Model.
Question
While patch_appengine() is definitely being called, the hook never is. What am I missing? How do I make the pre_put function actually get called?
Hooks are a little low level for the task at hand. What you probably want is a custom property class. DerivedProperty, from aetycoon, is just the ticket.
Bear in mind, however, that the 'nickname' field of the user object is probably not what you want - per the docs, it's simply the user part of the email field if they're using a gmail account, otherwise it's their full email address. You probably want to let users set their own nicknames, instead.
The issue here is that within the context of the hook() function an entity is not an instance of db.Model as you are expecting.
In this context entity is the protocol buffer class confusingly referred to as entity (entity_pb). Think of it like a JSON representation of your real entity, all the data is there, and you could build a new instance from it, but there is no reference to your memory-resident instance that is waiting for it's callback.
Monkey patching all of the various put/delete methods is the best way to setup Model-level callbacks as far as I know†
Since there doesn't seem to be that many resources on how to do this safely with the newer async calls, here's a BaseModel that implements before_put, after_put, before_delete & after_delete hooks:
class HookedModel(db.Model):
def before_put(self):
logging.error("before put")
def after_put(self):
logging.error("after put")
def before_delete(self):
logging.error("before delete")
def after_delete(self):
logging.error("after delete")
def put(self):
return self.put_async().get_result()
def delete(self):
return self.delete_async().get_result()
def put_async(self):
return db.put_async(self)
def delete_async(self):
return db.delete_async(self)
Inherit your model-classes from HookedModel and override the before_xxx,after_xxx methods as required.
Place the following code somewhere that will get loaded globally in your applicaiton (like main.py if you use a pretty standard looking layout). This is the part that calls our hooks:
def normalize_entities(entities):
if not isinstance(entities, (list, tuple)):
entities = (entities,)
return [e for e in entities if hasattr(e, 'before_put')]
# monkeypatch put_async to call entity.before_put
db_put_async = db.put_async
def db_put_async_hooked(entities, **kwargs):
ents = normalize_entities(entities)
for entity in ents:
entity.before_put()
a = db_put_async(entities, **kwargs)
get_result = a.get_result
def get_result_with_callback():
for entity in ents:
entity.after_put()
return get_result()
a.get_result = get_result_with_callback
return a
db.put_async = db_put_async_hooked
# monkeypatch delete_async to call entity.before_delete
db_delete_async = db.delete_async
def db_delete_async_hooked(entities, **kwargs):
ents = normalize_entities(entities)
for entity in ents:
entity.before_delete()
a = db_delete_async(entities, **kwargs)
get_result = a.get_result
def get_result_with_callback():
for entity in ents:
entity.after_delete()
return get_result()
a.get_result = get_result_with_callback
return a
db.delete_async = db_delete_async_hooked
You can save or destroy your instances via model.put() or any of the db.put(), db.put_async() etc, methods and get the desired effect.
†would love to know if there is an even better solution!?
I don't think that Hooks are really going to solve this problem. The Hooks will only run in the context of your AppEngine application, but the user can change their nickname outside of your application using Google Account settings. If they do that, it won't trigger any logic implement in your hooks.
I think that the real solution to your problem is for your application to manage its own nickname that is independent of the one exposed by the Users entity.

Categories