I have a program, which starts a tweepy streamListener, which adds new tweets to a database and starts another process which monitors said database for tweets older than X and does something with them.
The code below is structurally similar, just without the database interaction code, as it is not the thing causing my problem.
The problem, from what I understand is, that you can't share python modules that hold some kind of state between the different processes.
How do I work around this? I need both processes to have access to tweepy for twitter interaction.
Any help or ideas are much appreciated!
import multiprocessing
import time
import tweepy
class App():
def __init__(self):
keys = {
"consumer_key": "",
"consumer_secret": "",
"access_token": "",
"access_token_secret": ""
}
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
self.api = tweepy.API(auth)
self.worker = Worker(self.api)
def run(self):
self.worker.start()
myStreamListener = MyStreamListener()
stream = tweepy.Stream(auth=self.api.auth, listener=myStreamListener)
stream.filter(track=['python'], async=True)
class Worker(multiprocessing.Process):
def __init__(self, api):
multiprocessing.Process.__init__(self)
self.api = api
def run(self):
# Filler for debugging purposes
print(self.api.me(), flush=True)
time.sleep(1)
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
# Filler for debugging purposes
print(status.text, flush=True)
if __name__ == "__main__":
APP = App()
APP.run()
Running this produces the following error
Traceback (most recent call last):
File "[PATH_REMOVED]", line 46, in <module>
APP.run()
File "[PATH_REMOVED]", line 22, in run
self.worker.start()
File "E:\Programs\Python 3.x\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "E:\Programs\Python 3.x\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "E:\Programs\Python 3.x\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "E:\Programs\Python 3.x\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "E:\Programs\Python 3.x\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle module objects
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "E:\Programs\Python 3.x\lib\multiprocessing\spawn.py", line 99, in spawn_main
new_handle = reduction.steal_handle(parent_pid, pipe_handle)
File "E:\Programs\Python 3.x\lib\multiprocessing\reduction.py", line 87, in steal_handle
_winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
PermissionError: [WinError 5] Access is denied
Related
I use celery 4.3.0 and rabbitmq 3.6.6. There is also a Haproxy for balancing rabbitmq, but right now it only uses one rabbitmq node.
Timeout client/server in Haproxy 24 hours.
Sometimes I get an error and the task execution status is not returned, while the tasks continue to run:
Received 0x00 while expecting 0xce
Traceback (most recent call last):
File "/var/www/serv/app/mod_axmsg/dispatch.py", line 376, in execute
result_value = func_meta.func(*task.args_list, **task.kwargs_dict)
File "/var/www/serv/app/mod_rmc/libs/health.py", line 91, in update_all_health
health.update_all_health()
File "/var/www/serv/app/mod_core/libs/health.py", line 515, in update_all_health
result.join()
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 765, in join
interval=interval, no_ack=no_ack, on_interval=on_interval,
File "/usr/local/serv/lib/python3.5/site-packages/celery/result.py", line 226, in get
on_message=on_message,
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 188, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 255, in _wait_for_pending
on_interval=on_interval):
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 56, in drain_events_until
yield self.wait_for(p, wait, timeout=1)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/asynchronous.py", line 65, in wait_for
wait(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/celery/backends/rpc.py", line 63, in drain_events
return self._connection.drain_events(timeout=timeout)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/connection.py", line 323, in drain_events
return self.transport.drain_events(self.connection, **kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/kombu/transport/pyamqp.py", line 103, in drain_events
return connection.drain_events(**kwargs)
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 505, in drain_events
while not self.blocking_read(timeout):
File "/usr/local/serv/lib/python3.5/site-packages/amqp/connection.py", line 510, in blocking_read
frame = self.transport.read_frame()
File "/usr/local/serv/lib/python3.5/site-packages/amqp/transport.py", line 280, in read_frame
'Received {0:#04x} while expecting 0xce'.format(ch))
amqp.exceptions.UnexpectedFrame: Received 0x00 while expecting 0xce
My code: celery.py
import logging
from app import db
from celery import Celery
LOG = logging.getLogger(__name__)
def make_celery(flask_app):
broker = flask_app.config['AMQP_URL']
celery = Celery('mod_celery', broker=broker, backend="rpc://")
celery.conf.update(flask_app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with flask_app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
return celery
def in_tx(task_function):
def tx_wrapper(*args):
try:
task_function(*args)
db.session.commit()
except Exception as ex:
msg = 'Could not execute task {}. Reason: {}.' \
.format(task_function.__name__, str(ex))
LOG.error(msg)
db.session.rollback()
return tx_wrapper
And file tasks.py
from app import celery
from .celery import in_tx
#celery.task(name='app.mod_celery.update_client_health_task')
#in_tx
def update_client_health_task(client_id):
from app.mod_core.libs.health import update_client_health
update_client_health(client_id)
What could be the problem? Please don't offer me redis.
I'm not a developer and I didn't write it the code, but I really need to figure it out. I'm new to python
Thanks.
I'm trying to control a robot with ROS and flask. The problem is that when i kill ROS with ctrl-c (SIGINT) flask is slowing this process down because it is not closing right away. I have implemented a signal_handler to handle the ctrl-c and close flask.
The problem is that when i run this and press
ctrl-c i closes everything right away but i get the following error:
RuntimeError: Working outside of request context.
How can i fix this error?
#!/usr/bin/env python
import rospy
from raspimouse_ros.msg import MotorFreqs
from time import sleep
from flask import Flask, request
from os.path import join, dirname
from signal import signal, SIGINT
cwd = dirname(__file__)
open(join(cwd, "file.html"))
app = Flask(__name__)
deltaX = 0
deltaY = 0
pub = rospy.Publisher('/motor_raw', MotorFreqs, queue_size=10)
rospy.init_node('control')
msg = MotorFreqs()
def signal_handler(signal_received, frame):
msg.left = 0
msg.right = 0
pub.publish(msg)
print("Quitting .......")
func = request.environ.get('werkzeug.server.shutdown')
func()
signal(SIGINT,signal_handler)
#app.route("/")
def main():
with open(join(cwd, "file.html"), 'r') as f:
program = f.read()
return program
#app.route("/SetSpeed")
def SetSpeed():
global deltaX
global deltaY
deltaX = int(float(request.args.get('x')) * 4)
deltaY = int(float(request.args.get('y')) * 10)
publisher()
return ""
def publisher():
msg.left = int(-deltaY+deltaX)
msg.right = int(-deltaY-deltaX)
rospy.loginfo(msg)
pub.publish(msg)
app.run(host="0.0.0.0")
[control-1] killing on exit
Quitting .......
Traceback (most recent call last):
File "/home/pi/workspace/src/manual_control/scripts/control.py", line 54, in <module>
app.run(host="0.0.0.0")
File "/usr/lib/python2.7/dist-packages/flask/app.py", line 841, in run
run_simple(host, port, self, **options)
File "/usr/lib/python2.7/dist-packages/werkzeug/serving.py", line 708, in run_simple
inner()
File "/usr/lib/python2.7/dist-packages/werkzeug/serving.py", line 673, in inner
srv.serve_forever()
File "/usr/lib/python2.7/dist-packages/werkzeug/serving.py", line 511, in serve_forever
HTTPServer.serve_forever(self)
File "/usr/lib/python2.7/SocketServer.py", line 231, in serve_forever
poll_interval)
File "/usr/lib/python2.7/SocketServer.py", line 150, in _eintr_retry
return func(*args)
File "/home/pi/workspace/src/manual_control/scripts/control.py", line 28, in signal_handler
func = request.environ.get('werkzeug.server.shutdown')
File "/usr/lib/python2.7/dist-packages/werkzeug/local.py", line 343, in __getattr__
return getattr(self._get_current_object(), name)
File "/usr/lib/python2.7/dist-packages/werkzeug/local.py", line 302, in _get_current_object
return self.__local()
File "/usr/lib/python2.7/dist-packages/flask/globals.py", line 37, in _lookup_req_object
raise RuntimeError(_request_ctx_err_msg)
RuntimeError: Working outside of request context.
This typically means that you attempted to use functionality that needed
an active HTTP request. Consult the documentation on testing for
information about how to avoid this problem.
shutting down processing monitor...
... shutting down processing monitor complete
done
The request context is active when a request arrives and is destroyed after it finishes, so it is not availale in the signal_handler function, you can, however, access app_context.
What I want to do is set up an XMPP receiver using the Slixmpp library which listens for messages throughout the script execution. I think the threading library might provide a solution so tried that.
This script is for an online PvP game and I need to receive information from the opponent continuously.
This is the code I have tried:
import time
import threading as td
import slixmpp as slix
import logging
import asyncio
my_jid = "test#xmpp.jp"
my_pass = "test"
class receiver(slix.ClientXMPP):
def __init__(self, jid, password):
slix.ClientXMPP.__init__(self, jid, password)
self.add_event_handler("session_start", self.start)
self.add_event_handler("message", self.message)
def start(self, event):
self.send_presence()
self.get_roster()
def message(self, msg):
if msg['type'] in ('chat', 'normal'):
msg.reply("Thanks for sending\n%(body)s" % msg).send()
print(msg['body'])
def function():
recv = receiver(my_jid, my_pass)
recv.connect()
recv.process()
newthread = threading.Thread(target=function)
logging.basicConfig(level="DEBUG", format='%(levelname)-8s %(message)s')
newthread.start()
input("Press Enter to continue")
This returns the following error:
Exception in thread Thread-1:
Press Enter to continueTraceback (most recent call last):
File "C:\Program Files (x86)\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Program Files (x86)\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:/Users/dps/Documents/GitHub/Python-Problems/UltimateTicTacToe-scripts/xmppp.py", line 46, in good
recv = receiver(my_jid, my_pass)
File "C:/Users/dps/Documents/GitHub/Python-Problems/UltimateTicTacToe-scripts/xmppp.py", line 32, in __init__
slix.ClientXMPP.__init__(self, jid, password)
File "C:\Users\dps\venv\lib\site-packages\slixmpp\clientxmpp.py", line 70, in __init__
BaseXMPP.__init__(self, jid, 'jabber:client', **kwargs)
File "C:\Users\dps\venv\lib\site-packages\slixmpp\basexmpp.py", line 48, in __init__
XMLStream.__init__(self, **kwargs)
File "C:\Users\dps\venv\lib\site-packages\slixmpp\xmlstream\xmlstream.py", line 219, in __init__
self.disconnected = asyncio.Future()
File "C:\Program Files (x86)\Python37-32\lib\asyncio\events.py", line 644, in get_event_loop
% threading.current_thread().name)
RuntimeError: There is no current event loop in thread 'Thread-1'.
In my case executing:
newthread.daemon = True
Before calling:
newthread.start()
Works as expected creating a non-blocking thread.
This is the code and I am loading configuration from app.json file as you can see file_load is getting file from location and using dictConfig I am getting information about logging handlers, formatters etc. I am trying to write in same file named log.json from multiple instances but having windowspermission error. Any body knows anything please help me.
# Try to importing modules
try:
import logging
from logging import config, Formatter, Handler
import json
import os
import traceback
from threading import Thread
import time
import sys
import multiprocessing
from multiprocessing import Process, Semaphore, SimpleQueue, Lock, Event, Manager, current_process, BoundedSemaphore
# if modules can not be installed than raise an Exception.
except Exception as e:
print("Exception occured installing modules", e)
# class logger to print the data in file calling methods.
class Logger:
# when create any object of logger class it will
def __init__(self):
# self.semaphore = Semaphore(10)
self.queue = multiprocessing.Queue()
file_load = open('C:/Users/desaijb/Desktop/gitpractice/gitpractice/test_log/app.json')
config_file = json.load(file_load)
final_config_file = config_file['logging']
config.dictConfig(final_config_file)
self.logger = logging.getLogger()
def debug(self, message):
logging.getLogger('debug_module')
self.queue.put(message)
# self.semaphore.acquire()
# self.logger.debug(message)
# self.semaphore.release()
return True
def info(self, message):
logging.getLogger('info_module')
self.logger.info(message)
return True
def warning(self, message):
logging.getLogger('warning_module')
self.logger.warning(message)
def error(self,message):
logging.getLogger('error_module')
self.logger.error(message)
def critical(self, message):
logging.getLogger('critical_module')
self.logger.critical(message)
def receive(self):
while self.queue:
received_message = self.queue.get()
if __name__ == "__main__":
logg = Logger()
proc = Process(target=logg.debug, args=('message',))
proc.start()
following is the error I am getting after running the code.
Traceback (most recent call last):
File "C:/Users/desaijb/Desktop/gitpractice/gitpractice/test_log/logger.py", line 63, in <module>
proc.start()
File "C:\Program Files\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.RLock objects
I'm trying to run two servers using web.py and initiating calls from one to another. Both servers start normally but when I try to call a url the below stack trace is thrown.
import web
urls = (
'/ping', 'Ping',
'/acqlock/+(.*)', 'Acquire',
)
class MSA(web.application):
def run(self, port=8081, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('127.0.0.1', port))
app = MSA(urls, globals())
if __name__ == "__main__":
app.run(port=8081)
class Acquire:
def GET(self, resource_name):
print resource_name
response = app.request('http://127.0.0.1:8080/acqlock/' + resource_name, method='GET')
return response
But I keep getting this error after calling the /acqlock.
Traceback (most recent call last):
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 1245, in communicate
req.respond()
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 775, in respond
self.server.gateway(self).respond()
File "C:\Python27\lib\site-packages\web\wsgiserver\__init__.py", line 2018, in respond
response = self.req.server.wsgi_app(self.env, self.start_response)
File "C:\Python27\lib\site-packages\web\httpserver.py", line 306, in __call__
return self.app(environ, xstart_response)
File "C:\Python27\lib\site-packages\web\httpserver.py", line 274, in __call__
return self.app(environ, start_response)
File "C:\Python27\lib\site-packages\web\application.py", line 279, in wsgi
result = self.handle_with_processors()
File "C:\Python27\lib\site-packages\web\application.py", line 249, in handle_with_processors
return process(self.processors)
File "C:\Python27\lib\site-packages\web\application.py", line 246, in process
raise self.internalerror()
File "C:\Python27\lib\site-packages\web\application.py", line 515, in internalerror
parent = self.get_parent_app()
File "C:\Python27\lib\site-packages\web\application.py", line 500, in get_parent_app
if self in web.ctx.app_stack:
AttributeError: 'ThreadedDict' object has no attribute 'app_stack'
Use requests library for this.
import requests
response = requests.request(method='GET', url ='http://127.0.0.1:8080/acqlock/' + resource_name)
Note: You have used port 8080 in url even though you have hosted the web.py in 8081