I have a multithreading client-server software and it's a bit hard to debug when 10 client is connected. I have a few questions regarding the tracing.
How can I trace a only a certain file or class in the software?
Is there a possibility to have a log with a timestamp and the function name, without any other information and separated by threads?
Is there a graphical trace generator?
import logging
import time
import pymongo
import hashlib
import random
DEBUG_MODE = True
class logger(object):
def __new__(cls, *args, **kwargs):
if DEBUG_MODE:
return object.__new__(cls, *args, **kwargs)
else:
return args[0]
def __init__(self, foo):
self.foo = foo
logging.basicConfig(filename='exceptions.log', format='%(levelname)s %(asctime)s: %(message)s')
self.log = logging.getLogger(__name__)
def __call__(self, *args, **kwargs):
def _log():
try:
t = time.time()
func_hash = self._make_hash(t)
col = self._make_db_connection()
log_record = {'func_name':self.foo.__name__, 'start_time':t, 'func_hash':func_hash}
col.insert(log_record)
res = self.foo(*args, **kwargs)
log_record = {'func_name':self.foo.__name__, 'exc_time':round(time.time() - t,4), 'end_time':time.time(),'func_hash':func_hash}
col.insert(log_record)
return res
except Exception as e:
self.log.error(e)
return _log()
def _make_db_connection(self):
connection = pymongo.Connection()
db = connection.logger
collection = db.log
return collection
def _make_hash(self, t):
m = hashlib.md5()
m.update(str(t)+str(random.randrange(1,10)))
return m.hexdigest()
Its use mongo as storage but you can write any backend. Just wrap function which you need and follow the log.
Related
Here is my way,but I feel it is not very simple, any better way?
import asyncio
import time
def timer_all(f):
if asyncio.iscoroutinefunction(f):
async def wrapper(*args, **kwargs):
now = time.time()
result = await f(*args, **kwargs)
print('used {}'.format(time.time() - now))
return result
else:
def wrapper(*args, **kwargs):
now = time.time()
result = f(*args, **kwargs)
print('used {}'.format(time.time() - now))
return result
return wrapper
there is a lot of decorator, retry, add log etc,all will write this way,a bit ugly,right?
While there is no real problems with repeating the same code in specialized decorators.
Here is how I'll approach a refactoring.
I will use a class decorator that keeps the accepts a pre-call function and a post-call function,
both of which will be called with an instance of the decorator.
The result of the pre-call function will be saved to an attribute of the decorator.
This is necessary for the special timing case where a delta needs to be computed.
I guess there may be other examples that may require the return value of a pre-call function execution.
I also save the result of the decorated function executed to the result attribute of the decorator instance. This allows the post call function to read this value for logging.
Here is an example implementation:
import asyncio
class WrapAll(object):
def __init__(self, pre=lambda _: None, post=lambda _: None):
self.pre = lambda : pre(self)
self.pre_val = None
self.result = None
self.post = lambda : post(self)
def __call__(self, fn):
if asyncio.iscoroutinefunction(fn):
async def wrap(*args, **kwargs):
self.pre_val = self.pre()
self.result = await fn(*args, *kwargs)
self.post()
return self.result
else:
def wrap(*args, **kwargs):
self.pre_val = self.pre()
self.result = fn(*args, *kwargs)
self.post()
return self.result
return wrap
Timer Example
import asyncio
import time
timer = dict(
pre=lambda self: time.time(),
post=lambda self: print('used {}'.format(time.time()-self.pre_val))
)
#WrapAll(**timer)
def add(x, y):
return x + y
#WrapAll(**timer)
async def async_add(x, y):
future = asyncio.Future()
future.set_result(x+y)
await future
return future.result()
Running sync adder
>>> add(3, 4)
used 4.76837158203125e-06
7
Running async adder
>>> loop = asyncio.get_event_loop()
>>> task = asyncio.ensure_future(async_add(3, 4))
>>> try:
... loop.run_until_complete(task)
... except RuntimeError:
... pass
used 2.193450927734375e-05
Logging Example
import asyncio
import logging
FORMAT = '%(message)s'
logging.basicConfig(format=FORMAT)
logger = dict(
post=lambda self: logging.warning('subtracting {}'.format(self.result))
)
#WrapAll(**logger)
def sub(x, y):
return x - y
#WrapAll(**logger)
async def async_sub(x, y):
future = asyncio.Future()
future.set_result(x-y)
await future
return future.result()
Running sync subtractor:
>>> sub(5, 6)
subtracting -1
Running async subtractor:
>>> loop = asyncio.get_event_loop()
>>> task = asyncio.ensure_future(async_sub(5, 6))
>>> try:
... loop.run_until_complete(task)
... except RuntimeError:
... pass
subtracting -1
I'm having a strange phenomena in Python with callback functions and handlers.
I use ZMQ to handle my communication and use a stream for the socket. I have the base class:
import multiprocessing
import zmq
from concurrent.futures import ThreadPoolExecutor
from zmq.eventloop import ioloop, zmqstream
from zmq.utils import jsonapi as json
# Types of messages
TYPE_A = 'type_a'
TYPE_B = 'type_b'
class ZmqProcess(multiprocessing.Process):
def __init__(self):
super(ZmqProcess, self).__init__()
self.context = None
self.loop = None
self.handle_stream = None
def setup(self):
self.context = zmq.Context()
self.loop = ioloop.IOLoop.instance()
def send(self, msg_type, msg, host, port):
sock = zmq.Context().socket(zmq.PAIR)
sock.connect('tcp://%s:%s' % (host, port))
sock.send_json([msg_type, msg])
def stream(self, sock_type, addr):
sock = self.context.socket(sock_type)
if isinstance(addr, str):
addr = addr.split(':')
host, port = addr if len(addr) == 2 else (addr[0], None)
if port:
sock.bind('tcp://%s:%s' % (host, port))
else:
port = sock.bind_to_random_port('tcp://%s' % host)
stream = zmqstream.ZMQStream(sock, self.loop)
return stream, int(port)
class MessageHandler(object):
def __init__(self, json_load=-1):
self._json_load = json_load
self.pool = ThreadPoolExecutor(max_workers=10)
def __call__(self, msg):
i = self._json_load
msg_type, data = json.loads(msg[i])
msg[i] = data
if msg_type.startswith('_'):
raise AttributeError('%s starts with an "_"' % msg_type)
getattr(self, msg_type)(*msg)
And I have a class that inherits from it:
import zmq
import zmq_base
class ZmqServerMeta(zmq_base.ZmqProcess):
def __init__(self, bind_addr, handlers):
super(ZmqServerMeta, self).__init__()
self.bind_addr = bind_addr
self.handlers = handlers
def setup(self):
super(ZmqServerMeta, self).setup()
self.handle_stream, _ = self.stream(zmq.PAIR, self.bind_addr)
self.handle_stream.on_recv(StreamHandler(self.handle_stream, self.stop,
self.handlers))
def run(self):
self.setup()
self.loop.start()
def stop(self):
self.loop.stop()
class StreamHandler(zmq_base.MessageHandler):
def __init__(self, handle_stream, stop, handlers):
super(StreamHandler, self).__init__()
self._handle_stream = handle_stream
self._stop = stop
self._handlers = handlers
def type_a(self, data):
if zmq_base.TYPE_A in self._handlers:
if self._handlers[zmq_base.TYPE_A]:
for handle in self._handlers[zmq_base.TYPE_A]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def type_b(self, data):
if zmq_base.TYPE_B in self._handlers:
if self._handlers[zmq_base.TYPE_B]:
for handle in self._handlers[zmq_base.TYPE_B]:
self.pool.submit(handle, data)
else:
pass
else:
pass
def endit(self):
self._stop()
Additionally, I have a class that I want to use as storage. And here is where the trouble starts:
import threading
import zmq_server_meta as server
import zmq_base as base
class Storage:
def __init__(self):
self.list = []
self.list_lock = threading.RLock()
self.zmq_server = None
self.host = '127.0.0.1'
self.port = 5432
self.bind_addr = (self.host, self.port)
def setup(self):
handlers = {base.TYPE_A: [self. remove]}
self.zmq_server = server.ZmqServerMeta(handlers=handlers, bind_addr=self.bind_addr)
self.zmq_server.start()
def add(self, data):
with self.list_lock:
try:
self.list.append(data)
except:
print "Didn't work"
def remove(self, msg):
with self.list_lock:
try:
self.list.remove(msg)
except:
print "Didn't work"
The idea is that that class stores some global information that it receives.
It is all started in a file to test:
import sys
import time
import storage
import zmq_base as base
import zmq_server_meta as server
def printMsg(msg):
print msg
store = storage.Storage()
store.setup()
handlers = {base.TYPE_B: [printMsg]}
client = server.ZmqServerMeta(handlers=handlers, bind_addr=('127.0.0.1', 5431))
client.start()
message = "Test"
store.add(message)
client.send(base.TYPE_A, message, '127.0.0.1', 5432)
I simplified it to reduce clutter. Instead of just adding it, it is usually send and then a response comes back. The response, the client sending, should be processed by the correct callback, remove(), and it should remove something out of the list. The problem that occurs is, that the remove() function sees an empty list, although there should be an element in the list. If I check from the testing file, I can see the element after it was added, and if I call remove() from there, I see a non-empty list and can remove it. My question is, why does the callback sees an empty list and how can I make sure it does see the correct elements in the list?
Kind regards
Patrick
I believe the problem lays in the fact that the ZmqProcess class inherits from multiprocessing.Process. Multiprocessing does not allow to share objects among different processes, except by using a shared memory map using Value or Array ( as can be seen in the documentation: https://docs.python.org/3/library/multiprocessing.html#sharing-state-between-processes )
If you want to use your custom object, you can use a Server process / proxy object, which can be found in on the same page of the documentation.
So you can, for instance, define a manager in the init function of the Storage class like: self.manager = Manager() Afterwards you put self.list = self.manager.list(). This should do the trick.
I need to pass a kwarg to the parent class of my equivalent of FingerFactoryFromService using super.
I know I am actually passing the kwarg to IFingerFactory because that is also where I pass the service that ends up in init FingerFactoryFromService and I can understand that it is getting tripped up somewhere in the component system but I cannot think of any other way.
The error I keep getting is
exceptions.TypeError: 'test' is an invalid keyword argument for this function
Versions of code in my virtualenv are:
pip (1.4.1)
setuptools (1.1.6)
Twisted (13.1.0)
wsgiref (0.1.2)
zope.interface (4.0.5)
This is a cutdown example from the finger tutorial demonstrating the issue:
from twisted.protocols import basic
from twisted.application import internet, service
from twisted.internet import protocol, reactor, defer
from twisted.python import components
from zope.interface import Interface, implements # #UnresolvedImport
class IFingerService(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def getUsers(): # #NoSelf
"""
Return a deferred returning a list of strings.
"""
class IFingerFactory(Interface):
def getUser(user): # #NoSelf
"""
Return a deferred returning a string.
"""
def buildProtocol(addr): # #NoSelf
"""
Return a protocol returning a string.
"""
def catchError(err):
return "Internal error in server"
class FingerProtocol(basic.LineReceiver):
def lineReceived(self, user):
d = self.factory.getUser(user)
d.addErrback(catchError)
def writeValue(value):
self.transport.write(value + '\r\n')
self.transport.loseConnection()
d.addCallback(writeValue)
class FingerService(service.Service):
implements(IFingerService)
def __init__(self, filename):
self.filename = filename
self.users = {}
def _read(self):
self.users.clear()
for line in file(self.filename):
user, status = line.split(':', 1)
user = user.strip()
status = status.strip()
self.users[user] = status
self.call = reactor.callLater(30, self._read) # #UndefinedVariable
def getUser(self, user):
print user
return defer.succeed(self.users.get(user, "No such user"))
def getUsers(self):
return defer.succeed(self.users.keys())
def startService(self):
self._read()
service.Service.startService(self)
def stopService(self):
service.Service.stopService(self)
self.call.cancel()
class FingerFactoryFromService(protocol.ServerFactory):
implements(IFingerFactory)
protocol = FingerProtocol
#def __init__(self, srv):
def __init__(self, srv, test=None):
self.service = srv
## I need to call super here because my equivalent of ServerFactory requires
## a kwarg but this cutdown example doesnt so I just assign it to a property
# super(FingerFactoryFromService, self).__init__(test=test)
self.test_thing = test or 'Default Something'
def getUser(self, user):
return self.service.getUser(user)
components.registerAdapter(FingerFactoryFromService,
IFingerService,
IFingerFactory)
application = service.Application('finger')
serviceCollection = service.IServiceCollection(application)
finger_service = FingerService('/etc/passwd')
finger_service.setServiceParent(serviceCollection)
#line_finger_factory = IFingerFactory(finger_service)
line_finger_factory = IFingerFactory(finger_service, test='Something')
line_finger_server = internet.TCPServer(1079, line_finger_factory)
line_finger_server.setServiceParent(serviceCollection)
This has nothing to do with the component system. What you want to do is override the Factory's buildProtocol method, as documented here:
https://twistedmatrix.com/documents/current/core/howto/servers.html#auto9
I'm using the tornado framework and all of my functions have been written synchronously. How would I make these async?
class AuthLoginHandler(BaseHandler):
#tornado.web.asynchronous
def get(self):
self.render("login.html")
def post(self):
username = self.get_argument("UserName",strip = True)
password = self.get_argument("Password",strip = True)
user = auth_actions.login(username,password)
if not user:
self.redirect("/auth/login")
return
#user = user_actions.get_my_data(self.db,user['_id'])
self.set_secure_cookie("userdata", tornado.escape.json_encode(dumps(user.to_mongo())))
self.redirect("/")
def login(username,password,callback=None):
m = hashlib.md5()
m.update(password)
hashed = m.hexdigest()
login = User.objects(UserName=username.lower(),Password=hashed).exclude("Password","Wall","FriendsRequested","Friends","FriendsRequesting")
if login.first() is None:
login = User.objects(Email=username.lower(),Password=hashed).exclude("Password","Wall","FriendsRequested","Friends","FriendsRequesting")
if login.first() is None:
return None
logger.info(username + " has logged in")
if callback != None:
return callback(login.first())
return login.first()
As andy boot mentions, you are going to need a separate thread. The code below uses the generator-coroutine approach from tornado-gen.
Also note the run_async decorator that wraps your function up in its own thread.
import tornado.ioloop
import tornado.web
import tornado.gen
import time
from threading import Thread
from functools import wraps
def run_async(func):
#wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
#run_async
def login(username,password,callback=None):
m = hashlib.md5()
m.update(password)
hashed = m.hexdigest()
login = User.objects(UserName=username.lower(),Password=hashed).exclude("Password","Wall","FriendsRequested","Friends","FriendsRequesting")
if login.first() is None:
login = User.objects(Email=username.lower(),Password=hashed).exclude("Password","Wall","FriendsRequested","Friends","FriendsRequesting")
if login.first() is None:
return None
logger.info(username + " has logged in")
if callback != None:
return callback(login.first())
return login.first()
class AuthLoginHandler(BaseHandler):
#tornado.web.asynchronous
def get(self):
self.render("login.html")
#tornado.web.asynchronous
#tornado.gen.coroutine
def post(self):
username = self.get_argument("UserName",strip = True)
password = self.get_argument("Password",strip = True)
user = yield tornado.gen.Task(auth_actions.login, username, password)
if not user:
self.redirect("/auth/login")
return
self.set_secure_cookie("userdata", tornado.escape.json_encode(dumps(user.to_mongo())))
self.redirect("/")
If you are going to be hashing passwords you are best of doing it in a separate thread. This is because a hash takes a long time to execute and will block the main thread.
Here is an example of hashing on a separate thread
Note that Tornado's maintainer Ben Darnell recommends using a separate thread to do password hashes: see this google group.
Python's futures package allows us to enjoy ThreadPoolExecutor and ProcessPoolExecutor for doing tasks in parallel.
However, for debugging it is sometimes useful to temporarily replace the true parallelism with a dummy one, which carries out the tasks in a serial way in the main thread, without spawning any threads or processes.
Is there anywhere an implementation of a DummyExecutor?
Something like this should do it:
from concurrent.futures import Future, Executor
from threading import Lock
class DummyExecutor(Executor):
def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdownLock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = Future()
try:
result = fn(*args, **kwargs)
except BaseException as e:
f.set_exception(e)
else:
f.set_result(result)
return f
def shutdown(self, wait=True):
with self._shutdownLock:
self._shutdown = True
if __name__ == '__main__':
def fnc(err):
if err:
raise Exception("test")
else:
return "ok"
ex = DummyExecutor()
print(ex.submit(fnc, True))
print(ex.submit(fnc, False))
ex.shutdown()
ex.submit(fnc, True) # raises exception
locking is probably not needed in this case, but can't hurt to have it.
Use this to mock your ThreadPoolExecutor
class MockThreadPoolExecutor():
def __init__(self, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
pass
def submit(self, fn, *args, **kwargs):
# execute functions in series without creating threads
# for easier unit testing
result = fn(*args, **kwargs)
return result
def shutdown(self, wait=True):
pass
if __name__ == "__main__":
def sum(a, b):
return a + b
with MockThreadPoolExecutor(max_workers=3) as executor:
future_result = list()
for i in range(5):
future_result.append(executor.submit(sum, i + 1, i + 2))