Unable to import module - Lambda handler Error - python

When I am trying to run my python code in lambda passing the handler to the function.module getting the below error, any suggestions how i could resolve this?
the below file test_client_visitor is triggered to call the client_visitor and send an email to the clients accordingly, when i run thd python file test_client_visitor in my local i get the email triggered successfully but in lambda facing the issue.
file_name: test_client_visitor
function = __import__('client_visitor')
handler = function.scan_clients
class TestFunction(unittest.TestCase):
def test_function(self):
file = open('event.json', 'rb')
try:
ba = bytearray(file.read())
event = jsonpickle.decode(ba)
print('## EVENT')
print(jsonpickle.encode(event))
context = {'requestid': '1234'}
result = handler(event, context)
print(result)
self.assertTrue(result, 'Emails could not be sent!')
finally:
file.close()
file.close()
if __name__ == '__main__':
unittest.main()
file_name: client_visitor.py
import datetime
import boto3
from aws_ses import send_bulk_templated_email
# boto3.set_stream_logger('botocore', level='DEBUG')
from mongodb import get_mongo_db
def process_clients(clients, developers, clients_to_be_notified, days):
if not clients:
pass
check_date = datetime.datetime.now() + datetime.timedelta(days)
for client in clients:
client_id_ = client['client_id']
if 'developer_id' in client:
developers[client_id_] = client['developer_id']
else:
if 'secrets' in client:
secrets = client['secrets']
for secret in secrets:
if 'not_on_or_after' in secret and secret['not_on_or_after'] < check_date.timestamp():
clients_to_be_notified.append({'client_id': client_id_,
'expiration_date': datetime.datetime.fromtimestamp(
secret['not_on_or_after']).strftime('%m/%d/%Y')})
print("adding client to notify List", client_id_, ":", client['sort'])
def notify_clients(clients_to_be_notified, developers):
developer_id_list = []
for client_secret in clients_to_be_notified:
developer_id_list.append(developers[client_secret['client_id']])
if developer_id_list:
db = get_mongo_db()
if db:
users = list(db.users.find({'guid': {'$in': developer_id_list}}, {'email', 'guid'}))
need_to_send_email = False
for user in users:
for client_secret in clients_to_be_notified:
if developers[client_secret['client_id']] == user['guid']:
client_secret['email'] = user['email']
need_to_send_email = True
break
if need_to_send_email:
return send_bulk_templated_email(clients_to_be_notified)
else:
return False
return True
def scan_clients(event, context):
local = False
if 'local' in event:
local = event['local'] == 'True'
if local:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
else:
dynamodb = boto3.resource('dynamodb')
days = 30
if 'days' in event:
days = int(event['days'])
print(f"Scanning Clients with {days} or less to secret expiration")
table = dynamodb.Table('****')
scan_kwargs = {
'ProjectionExpression': 'client_id, sort, developer_id, secrets, approved'
}
test = False
if 'test' in event:
test = event['test'] == 'True'
done = False
start_key = None
developers = {}
clients_to_be_notified = []
if test:
developers['idm-portal1'] = '***'
clients_to_be_notified = [{'client_id': 'idm-portal1', 'expiration_date': '04/17/2021'}]
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
process_clients(response.get('Items', []), developers, clients_to_be_notified, days)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
print("total developers ", len(developers), " total clients_to_be_notified ", len(clients_to_be_notified))
return notify_clients(clients_to_be_notified, developers)
if __name__ == '__main__':
scan_clients(event={'days': 30, 'local': False, 'test': True}, context=None)
Response
{
"errorMessage": "Unable to import module 'test_client_visitor': No module named 'test_client_visitor'",
"errorType": "Runtime.ImportModuleError",
"stackTrace": []
}

Your file must be named test_client_visitor.py. The way lambda runs the code is by trying to import the main file and call the handler function. See the AWS docs to set up a handler for Python.
The reason you didn't run into this issue locally is because I assume you are calling python directly on the command line — python test_client_visitor. When you import a module in Python, the file has to end in the .py extension.

Able to fix this issue with right packaging of the contents to zip, avoided the creation of extra folder with the below command.
Command:
cd folder; zip -r ../filename.zip *
Thankyou everyone for your inputs.

Related

Python script to post tweet runs fine in VS Code but Cronjob fails

The script runs perfectly fine in VS Code, but cronjob fails.. However, after I run code in VS Code, and then run the cronjob again, cron works fine.. Really not sure what I am missing..
It seems like
media = self.api.media_upload(filename=media_upload[i])
didn't run properly.
my_tweet.py
import keyring
import tweepy as tw
class Tweet:
def __init__(self):
#self.consumer_key = keyring.get_password("PY_INFO", "tw_consumer_key")
#self.consumer_secret = keyring.get_password("PY_INFO", "tw_consumer_secret")
#self.access_token = keyring.get_password("PY_INFO", "tw_access_token")
#self.access_token_secret = keyring.get_password("PY_INFO", "tw_access_token_secret")
self.consumer_key = '...'
self.consumer_secret = '...'
self.access_token = '...'
self.access_token_secret = '...'
self.auth = tw.OAuth1UserHandler(
self.consumer_key,
self.consumer_secret,
self.access_token,
self.access_token_secret
)
self.api = tw.API(self.auth)
def post(self,status,in_reply_to_status_id=None,media_upload=None):
try:
print(media_upload)
if not media_upload == None:
media_ids = []
print("upload media...")
if isinstance(media_upload, list):
for i in range(len(media_upload)):
print(media_upload[i])
media = self.api.media_upload(filename=media_upload[i])
print(f'added media {media_upload[i]}.....')
media_ids.append(media.media_id_string)
print(f'appended media_id_string.....')
else:
media = self.api.media_upload(filename=media_upload)
media_ids.append(media.media_id_string)
print("before update status: ")
my_tweet = self.api.update_status(status=status,
media_ids= media_ids,
in_reply_to_status_id=in_reply_to_status_id)
else:
my_tweet = self.api.update_status(status=status,
in_reply_to_status_id=in_reply_to_status_id)
return my_tweet.id_str
except:
print(f"Tweet: {status} - Failed")
tw = my_tweet.Tweet()
medias = []
for i in MEDIAS_TO_ADD:
print(i)
p = tr.get_priv_data_path(i,date=date)
assert len(p) > 0
medias.append(p)
tw.post(f"{date} - Market Internals",media_upload=medias)

Python: _pickle.PicklingError: Can't pickle <class 'wtforms.form.Meta'>: attribute lookup Meta on wtforms.form failed

Background:
I built an app in which, as soon as a user browse a file and clicks on the "upload" button, a behind-the-scenes calculation takes place, which can take long minutes, and at the end the output is uploaded to GCP. I want the calculation (after clicking) not to stuck the app and that in the meantime the user will be moved to another page. At the end of the process he will receive a success/failure email.
To achieve this goal I try to use multiprocessing.
I'm having much trouble trying to turn it to multi-processed app - I keep gettin the following error message when I'm trying to start a process:
_pickle.PicklingError: Can't pickle <class 'wtforms.form.Meta'>: attribute lookup Meta on wtforms.form failed
app.py -
The main function is `upload_file:
#app.route('/')
def home():
return redirect(url_for('upload_file', page_num=1))
#app.route('/<int:page_num>', methods=['GET', 'POST'])
def upload_file(page_num=1):
form = CreateXslxForm()
traffic_data = get_page_of_traffic_data(page_num)
queue = Queue()
p = Process(target=main_func, args=(queue, form))
p.start()
proc_ret = queue.get()
if proc_ret:
upload_success(*proc_ret)
else:
return render_template(constants.GeneralConstants.LANDING_PAGE, title='URL Comparison', form=form,
traffic_data=traffic_data)
def get_page_of_traffic_data(page_num):
traffic_data = urls_comparison_users_traffic.query.paginate(per_page=5, page=int(page_num), error_out=True)
return traffic_data
def upload_success(link, traffic_row, blob_name):
add_data_to_db(traffic_row)
return render_template('upload_success.html', title='Upload File', file_path=link, filename=blob_name)
def add_data_to_db(traffic_row):
# MANUAL PRE PING
try:
db.session.execute("SELECT 1;")
db.session.commit()
except:
db.session.rollback()
finally:
db.session.close()
# SESSION COMMIT, ROLLBACK, CLOSE
try:
db.session.add(traffic_row)
db.session.commit()
send_response_mail_to_user(traffic_row)
except Exception as e:
db.session.rollback()
raise e
finally:
db.session.close()
def send_response_mail_to_user(traffic_data):
sendgrid_obj = sendgrid_handler.SendgridHandler(sendgrid_key=SENDGRID_KEY)
sendgrid_obj.mails_to_send = URLComparisonEmailBuilder.build_mails(traffic_data)
sendgrid_obj.send()
if __name__ == "__main__":
app.run(port=5000, debug=True, threaded=True)
main.py
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'gcp-cre.json'
storage_client = storage.Client()
def get_file_from_form(form):
# form = forms.CreateXslxForm()
timestr = time.strftime(constants.DatesFormats.DATETIME)
custom_name = f"{constants.GeneralConstants.DEST_DEFAULT_NAME}-{timestr}"
# traffic_data = get_page_of_traffic_data(page_num)
if form.validate_on_submit():
load_filename = secure_filename(form.input_file.data.filename)
valid_file_name = get_valid_filename(request.form["filename"])
if valid_file_name:
custom_name = f"{valid_file_name}"
url_comp_obj = URLCompare()
result = url_comp_obj.compare_all_urls(form.input_file.data)
row_data = {
"upload_file": form.input_file.data.filename,
"output_file": custom_name,
"gcp_link": None,
"user": 'nki-tov'
}
return custom_name, result, constants.GeneralConstants.BUCKET_NAME, row_data
def get_valid_filename(s):
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w]', '', s)
def upload_to_bucket(blob_name, file, bucket_name, row_data):
'''
Upload file to a bucket
: blob_name (str) - object name
: file_path (str)
: bucket_name (str)
'''
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.upload_from_file(file)
link = f'https://console.cloud.google.com/storage/browser/_details/{bucket_name}/{blob_name};tab=live_object?authuser=0'
row_data["gcp_link"] = link
traffic_row = urls_comparison_users_traffic(**row_data)
return link, traffic_row, blob_name
def main_func(queue: Queue, form):
res = get_file_from_form(form)
if res:
queue.put(upload_to_bucket(*res))
else:
queue.put(res) # None
What am I doing wrong?
Is it something to do with the architecture?

AWS chalice local works but not chalice deploy

I am pretty new to coding and aws chalice. I tried writing a code that gets messages from trading-view and executes orders depending on the signals.
I tested the code locally and everything worked fine, but when I test the Rest API I get the following error:
{"message":"Missing Authentication Token"}
I set up my credentials via "aws configure" as explained here: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html
I also created a config.txt file in my aws folder and checked my settings via "aws configure get" and they were fine.
The index function in the beginning worked too, so there should be a problem within my code?
I changed some values and cut some functions and the strategy part out, but the code looks somewhat like this:
from chalice import Chalice
from datetime import datetime
from binance.client import Client
from binance.enums import *
import ccxt
exchange = ccxt.binance({
'apiKey': 'KEY',
'secret': 'SECRET',
'enableRateLimit': True,
'options': {
'defaultType': 'future',
},
})
def buy_order(quantity, symbol, order_type = ORDER_TYPE_MARKET,side=SIDE_BUY,recvWindow=5000):
try:
print("sending order")
order = client.futures_create_order(symbol = symbol, type = order_type, side = side, quantity = quantity,recvWindow=recvWindow)
print(order)
except Exception as e:
print("an exception occured - {}".format(e))
return False
return True
app = Chalice(app_name='tradingview-webhook-alert')
indicator1 = "x"
indicator2 = "y"
TRADE_SYMBOL = "Test123"
in_position = False
def diff_time(time1, time2):
fmt = '%Y-%m-%dT%H:%M:%SZ'
tstamp1 = datetime.strptime(time1, fmt)
tstamp2 = datetime.strptime(time2, fmt)
if tstamp1 > tstamp2:
td = tstamp1 - tstamp2
else:
td = tstamp2 - tstamp1
td_mins = int(round(td.total_seconds() / 60))
return td_mins
#app.route('/test123', methods=['POST'])
def test123():
global indicator1, indicator2
request = app.current_request
message = request.json_body
indicator = message["indicator"]
price = message["price"]
value = message["value"]
if indicator == "indicator1":
indicator1 = value
if indicator == "indicator2":
indicator2 = value
if in_position == False:
if (indicator1 >123) & (indicator2 < 321):
balance = exchange.fetch_free_balance()
usd = float(balance['USDT'])
TRADE_QUANTITY = (usd / price)*0.1
order_succeeded = buy_order(TRADE_QUANTITY, TRADE_SYMBOL)
if order_succeeded:
in_position = True
return {"test": "123"}
I tested it locally with Insomnia and tried the Rest API link there and in my browser, both with the same error message. Is my testing method wrong or is it the code? But even then, why isn't the Rest API link working, when I include the index function from the beginning again? If I try the index function from the beginning, I get the {"message": "Internal server error"} .
This is probably a very very basic question but I couldn't find an answer online.
Any help would be appreciated!
I am not pretty sure if that helps you because I don't really understand your question but:
You are using a POST-request which will not be executed by opening a URL.
Try something like #app.route('/test123', methods=['POST', 'GET']) so that if you just open the URL, it will execute a GET-request
Some more information:
https://www.w3schools.com/tags/ref_httpmethods.asp

iot edge direct method handler in python

I have created a module for a Bacnet scan and it will respond with a list of devices and its address as a result. But I'm having trouble implementing a direct method handler in python. When i first tried implementing it myself i got this error. Which could mean I didn't successfully register the direct method callback. I have some references but it was from C# and azure docs is not helping me figure out the right method to register the callback. for IoTHubModuleClient there's a on_method_request_received and a receive_method_request. appreciate any help!
def iothub_client_scan_run():
try:
iot_client = iothub_client_init()
bacnet_scan_listener_thread = threading.Thread(target=device_method_listener, args=(iot_client,))
bacnet_scan_listener_thread.daemon = True
bacnet_scan_listener_thread.start()
while True:
time.sleep(1000)
def device_method_listener(iot_client):
while True:
# Receive the direct method request
method_request = iot_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "runBacnetScan":
response = bacnet_scan_device(method_request)
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# Send a method response indicating the method request was resolved
print('Sending method response')
iot_client.send_method_response(response)
print('Message sent!')
Edit:
Here is my route config
I was able to resolve my issue or at least find the root cause and it was my network configuration under the createOptions. It seems like there's an issue when I'm trying to do NetworkMode: host and connects to the IotModuleClient.connect_from_edge_environment via connect with connection string. I'm still trying to tweak the connection configuration but at least i know its not on the code.
async def method_request_handler(module_client):
while True:
method_request = await module_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "method1":
payload = {"result": True, "data": "some data"} # set response payload
status = 200 # set return status code
print("executed method1")
elif method_request.name == "method2":
payload = {"result": True, "data": 1234} # set response payload
status = 200 # set return status code
print("executed method2")
else:
payload = {"result": False, "data": "unknown method"} # set response payload
status = 400 # set return status code
print("executed unknown method: " + method_request.name)
# Send the response
method_response = MethodResponse.create_from_method_request(method_request, status, payload)
await module_client.send_method_response(method_response)
print('Message sent!')
def stdin_listener():
while True:
try:
selection = input("Press Q to quit\n")
if selection == "Q" or selection == "q":
print("Quitting...")
break
except:
time.sleep(10)
# Schedule task for C2D Listener
listeners = asyncio.gather(input1_listener(module_client), twin_patch_listener(module_client), method_request_handler(module_client))

How to detect changes in firebase child with python?

I have some troubles with this application. What I need is that If I detect a change in the database (FIREBASE) particularly in 'sala' and 'ventilacion' nodes the function do what it have to do. If there isn't any change in the database it would not do nothing. I am using python and pyrebase library. Here is the code. Thank you very much for you help.
import pyrebase
import serial
import time
config = {
#firebase configurations
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
def ReconfiguracionFabrica():
ser.write('AT')
time.sleep(0.2)
ser.write('AT+RENEW')
time.sleep(0.3)
def ConfiguracionMaster():
time.sleep(0.5)
ser.write('AT+IMME1')
time.sleep(0.350)
ser.write('AT+ROLE1')
time.sleep(0.2)
ser = serial.Serial(port="/dev/ttyAMA0", baudrate=9600, timeout=1)
ReconfiguracionFabrica()
time.sleep(0.1)
ConfiguracionMaster()
time.sleep(0.1)
print "********** INICIO *************"
ser.flushInput()
contador = 0
prender = ''
ventilacion1 = ''
checkeo = ''
while True:
#if db.child("sala").: # It is the line where would be the conditional that allows me to detect any change only in the sala's node.
salidaLed1 = db.child("sala").get()
ser.write('AT')
time.sleep(0.1)
ser.write('AT+CON508CB16A7014')
time.sleep(0.1)
if salidaLed1.val() == True:
prender = ";"
if salidaLed1.val() == False:
prender = ","
ser.write('luz: %s \n' %(prender))
print ('luz: %s \n' %(prender))
time.sleep(1)
ser.read(checkeo)
if checkeo == 'j':
ReconfiguracionFabrica()
time.sleep(0.1)
ConfiguracionMaster()
Question: How to detect changes in firebase child
Note: All Examples use Public Access
Setup Example Data and verify it's readable.
This hase to be done once!
temperature_c = 30
data = {'date':time.strftime('%Y-%m-%d'),
'time':time.strftime('%H:%M:%S'),
'temperature':temperature_c}
db.child('public').child('Device_1').set(data)
response = db.child('public').child('Device_1').get()
print(response.val())
Create First Script doing Updates:
for t in [25, 26, 27, 28, 29, 30, 31, 32, 33, 35]:
temperature_c = t
data = {'date':time.strftime('%Y-%m-%d'),
'time':time.strftime('%H:%M:%S'),
'temperature':temperature_c}
db.child('public').child('Device_1').update(data)
time.sleep(60)
Create Second Script with Stream Handler
def stream_handler(message):
print('event={m[event]}; path={m[path]}; data={m[data]}'
.format(m=message))
my_stream =db.child('public').child('Device_1').stream(stream_handler)
# Run Stream Handler forever
while True:
data = input("[{}] Type exit to disconnect: ".format('?'))
if data.strip().lower() == 'exit':
print('Stop Stream Handler')
if my_stream: my_stream.close()
break
Run Stream Handler Script:
Response Output from def stream_handler after startup (Initial Data):
event="put"; path=/; data={'Device_1': {'temperature': 30, 'time': '13:34:24', 'date': '2017-07-20'}}
Run Updater Script:
Watch Output from Stream Handler Script
Response Output from def stream_handler after First Update Data:
event=patch; path=/Device_1; data={'temperature': 25, 'time': '13:49:12'}
Tested with Python: 3.4.2
Pyrebase
streaming
You can listen to live changes to your data with the stream() method.
def stream_handler(message):
print(message["event"]) # put
print(message["path"]) # /-K7yGTTEp7O549EzTYtI
print(message["data"]) # {'title': 'Pyrebase', "body": "etc..."}
my_stream = db.child("posts").stream(stream_handler)
You should at least handle put and patch events. Refer to "Streaming from the REST API" for details.
I know this post is 2 years old but hope this helps. Try using firebase_admin module.
Use this command - pip install firebase-admin
I too had a requirement where I needed to check for changes made to the Firebase database. I referred here
Following is a sample code based on your question which you can refer from and try it out.
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
cred = credentials.Certificate("path/to/serviceAccountKey.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://example.firebaseio.com',
'databaseAuthVariableOverride': None
})
def ignore_first_call(fn):
called = False
def wrapper(*args, **kwargs):
nonlocal called
if called:
return fn(*args, **kwargs)
else:
called = True
return None
return wrapper
#ignore_first_call
def listener(event):
print(event.event_type) # can be 'put' or 'patch'
print(event.path) # relative to the reference, it seems
print(event.data) # new data at /reference/event.path. None if deleted
node = str(event.path).split('/')[-2] #you can slice the path according to your requirement
property = str(event.path).split('/')[-1]
value = event.data
if (node=='sala'):
#do something
elif (node=='ventilacion'):
#do something
else:
#do something else
db.reference('/').listen(listener)
I was working on the same thing so according to current updates on pyrebase and learning from above posted answers, I got this running perfectly.(Please make sure your python is upgraded from python2 to python3 for running pyrebase and firebase-admin)
import firebase_admin
import pyrebase
from firebase_admin import credentials
config = {
"apiKey": "",
"authDomain": "",
"databaseURL": "",
"projectId": "",
"storageBucket": "",
"serviceAccount": "path to the service account json file you downloaded",
"messagingSenderId": "",
"appId": "",
"measurementId": ""
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
cred = credentials.Certificate("path to downloaded json file")
firebase_admin.initialize_app(cred, {
"databaseURL": "same as config",
"databaseAuthVariableOverride": None
})
db = firebase.database()
def ignore_first_call(fn):
called = False
def wrapper(*args, **kwargs):
nonlocal called
if called:
return fn(*args, **kwargs)
else:
called = True
return None
return wrapper
def stream_handler(message):
ab = str(1)
all_videos = storage.child("videos/").list_files() #node where files are
path_on_local = "local path to save the downloads"
print(message["event"]) # put
print(message["path"]) # /-K7yGTTEp7O549EzTYtI
print(message["data"]) # {'title': 'Pyrebase', "body": "etc..."}
node = str(message["path"]).split('/')[-2]
property = str(message["path"]).split('/')[-1]
value = message["data"]
if (message["event"] == "put"):
for videos in all_videos:
try:
print(videos.name)
z = storage.child(videos.name).get_url(None)
storage.child(videos.name).download(path_on_local + "/" + ab + ".mp4")
x = int(ab)
ab = str(x + 1)
except:
print('Download Failed')
else:
print("error")
my_stream = db.child("videos").stream(stream_handler)

Categories