I'd like access to the same value in my protocol and in my factory so I made a test with this code:
import time
from multiprocessing import Process
from twisted.internet import reactor, protocol
class MyServer(protocol.Protocol):
def connectionMade(self):
self.factory.clients.append("client")
print self.factory.clients
class MyServerFactory(protocol.Factory):
def __init__(self):
self.protocol = MyServer
self.clients = []
def printClient(self):
print self.clients
if __name__ == '__main__':
factory = MyServerFactory()
reactor.listenTCP(4433, factory)
processTwisted = Process(target=reactor.run)
processTwisted.start()
time.sleep(10)
factory.printClient()
During the sleep I connect client to the server.
This is the console's log :
['client']
[]
And I expected:
['client']
['client']
How can do it ?
Twisted and multiprocessing don't work together like this. Also, lists and multiprocessing don't work together like this.
If you remove the use of multiprocessing you'll get the behavior you want.
Related
one script(datamanger.py)
from multiprocessing import Manager
q = Manager().Queue()
The other two scripts are like this
from datamanager import q
import time
while True:
time.sleep(1)
q.put(1)
from datamanager import q
while True:
if not q.empty():
data = q.get()
print(data)
Is it possible to realize the function only use queue instead of message queue such as kafka?
An alternative to Ahmed's answer, which uses a simpler singleton, is below.
server.py:
from multiprocessing.managers import BaseManager
from multiprocessing import Queue
address = ('127.0.0.1', 50000) # you can change this
authkey = b"abc" # you should change this
class SharedQueue:
def __init__(self):
self._queue = Queue()
self._queue.put("Something really important!")
def __call__(self):
return self._queue
if __name__ == "__main__":
# Register our queue
shared_queue = SharedQueue()
BaseManager.register("get_queue", shared_queue)
# Start server
manager = BaseManager(address=address, authkey=authkey)
srv = manager.get_server()
srv.serve_forever()
client.py
from server import address, authkey
from multiprocessing.managers import BaseManager
if __name__ == "__main__":
BaseManager.register("get_queue")
manager = BaseManager(authkey=authkey, address=address)
manager.connect()
queue = manager.get_queue()
print(queue.get())
in order to have the queue alive and not tied to either process, you need to spawn a server that manages it, this server should have a singleton queue, and everyone that contacts it will get a proxy to this queue, the server code looks as follows:
# queue_server.py
from multiprocessing.managers import SyncManager
from multiprocessing.managers import BaseProxy
import multiprocessing
address = ('127.0.0.1', 50000) # you can change this
authkey = b"abc" # you should change this
class SingletonQueue:
instance = None
def __new__(cls, *args, **kwargs):
if SingletonQueue.instance is None:
SingletonQueue.instance = object.__new__(SingletonQueue)
return SingletonQueue.instance
else:
return SingletonQueue.instance
def get_queue(self):
if not hasattr(self, "queue"):
manager = SyncManager(address=address, authkey=authkey)
manager.connect()
self.queue = manager.Queue()
return self.queue
class CustomQueueProxy(BaseProxy):
_exposed_ = ['get_queue']
def get_queue(self):
queue = self._callmethod('get_queue')
return queue
def connect_manager():
multiprocessing.current_process().authkey = authkey
manager = SyncManager(address=address, authkey=authkey)
manager.register("SingletonQueue", SingletonQueue, CustomQueueProxy)
manager.connect()
return manager
def start_server():
manager = SyncManager(address=address, authkey=authkey)
manager.register("SingletonQueue", SingletonQueue, CustomQueueProxy)
server = manager.get_server()
print(f"running on ip = {server.address[0]}, and port {server.address[1]}")
multiprocessing.current_process().authkey = authkey
server.serve_forever()
if __name__ == "__main__":
start_server()
you need to run the server, after running the server you can connect to it with a client, the client code will look like this:
import multiprocessing
import queue_server # the server python file
manager = queue_server.connect_manager()
queue: multiprocessing.Queue = manager.SingletonQueue().get_queue()
queue.put(1)
print(queue.get())
note that this sets the authentication key of your python process to a certain value, so you cannot use it for doing multiple connections with different authentication keys, you have to have a fixed authentication key.
Edit: i'd probably go with Charchit Agarwal answer if anyone is reading this in the future, or a mix of both answers. depending on whether you want to allow connection over network/docker boundaries, which my answer allows.
I am trying to setup a program with twisted to emulate a serial devices that sends/receives AT commands. Different devices open a different amount of serial ports and use these ports for different things.
I would like my application to be able to open as many SerialPorts as needed and to know what serial Device is writing to dataReceived. I did not want to run each port on a different reactor or thread.
Is there anyway to do this ?
class VirtualDeviceBase(LineReceiver):
def __init__(self, reactor, serial_address):
[...]
def open_port(self):
self.serial_device = SerialPort(self, serial_address, reactor)
self.serial_device2 = SerialPort(? , serial_address, reactor)
def dataReceived(self,data):
[...]
I have tried this:
class VirtualDeviceBase(LineReceiver):
class Protocol(LineReceiver):
def __init__(self,reactor,address):
[...]
def open_port(self):
new_protocol = self.Protocol()
self.serial_device = SerialPort(self, serial_address, reactor)
self.serial_device2 = SerialPort(new_protocol , serial_address, reactor)
and it does not throw any errors but than neither of them call dataRecevied any more.
Just instantiate two SerialPorts. Give them whatever protocols you like. They can share a reactor. A single reactor can handle many different event sources.
from twisted.internet.protocol import Protocol
from twisted.internet.serial import SerialPort
from twisted.internet.task import react
class Echo(Protocol):
def dataReceived(self, data):
print("Received: {}".format(data))
def main(reactor):
SerialPort(Echo(), "com0", reactor)
SerialPort(Echo(), "com1", reactor)
react(main, [])
I need to receive connections by sockets, read input data, do hard and long calculations and then send an answer. Queries at the same time may be a lot (i.e. 100)
I understood, that because of GIL I can't use normal threads, and tried to use C++ with boost:threads and boost:python, and running subinterpreter of python in each thread. But anyway it's not utilised all cores 100% at the same time.
So I decided to use multiprocessing, but create a static count pool of workers to serve these requests with a queue. This way, we don't waste time to fork a process, and we will not have 100 or more processess at the same time, only static count.
I am new to Python, mostly I utilised C++
So now I have this code, but it is not working. The connection opens and immediately closes, I don't know why:
#!/usr/bin/env python
import os
import sys
import SocketServer
import Queue
import time
import socket
import multiprocessing
from multiprocessing.reduction import reduce_handle
from multiprocessing.reduction import rebuild_handle
class MultiprocessWorker(multiprocessing.Process):
def __init__(self, sq):
self.SLEEP_INTERVAL = 1
# base class initialization
multiprocessing.Process.__init__(self)
# job management stuff
self.socket_queue = sq
self.kill_received = False
def run(self):
while not self.kill_received:
try:
h = self.socket_queue.get_nowait()
fd=rebuild_handle(h)
client_socket=socket.fromfd(fd,socket.AF_INET,socket.SOCK_STREAM)
#client_socket.send("hellofromtheworkerprocess\r\n")
received = client_socket.recv(1024)
print "Recieved on client: ",received
client_socket.close()
except Queue.Empty:
pass
#Dummy timer
time.sleep(self.SLEEP_INTERVAL)
class MyTCPHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def handle(self):
# self.request is the TCP socket connected to the client
#self.data = self.request.recv(1024).strip()
#print "{} wrote:".format(self.client_address[0])
#print self.data
# just send back the same data, but upper-cased
#self.request.sendall(self.data.upper())
#Either pipe it to worker directly like this
#pipe_to_worker.send(h) #instanceofmultiprocessing.Pipe
#or use a Queue :)
h = reduce_handle(self.request.fileno())
socket_queue.put(h)
if __name__ == "__main__":
#Mainprocess
address = ('localhost', 8082)
server = SocketServer.TCPServer(address, MyTCPHandler)
socket_queue = multiprocessing.Queue()
for i in range(5):
worker = MultiprocessWorker(socket_queue)
worker.start()
try:
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
Is there a reason why you do not use
def reduce_socket(s):
...
def rebuild_socket(ds):
...
?
It seems like you could do this:
import copyreg
copyreg.pickle(type(socket.socket), reduce_socket, rebuild_socket)
and then pass the socket to the queue.
These are suggestions. Do they help?
try this:
def handle(self):
h = reduce_handle(self.request.fileno())
socket_queue.put(h)
self.request.close()
note the self.request.close() addition.
Im using twisted to make a simple server that accepts multiple connections and i want to count the numbers of clients who have been connected. This counting im doing in the factory (as is logical) using clientConnectionMade() but doesn't update the value of the counter, really i dont know where it is my mistake. I appreciate a little help.
My Server code: (also in http://bpaste.net/show/26789/)
import socket
import datetime
from twisted.internet import reactor, protocol
from twisted.internet.protocol import Factory, Protocol
class Echo(protocol.Protocol):
def connectionMade(self):
print "New client connected"
def dataReceived(self, data):
print "Msg from the client received"
if data == "datetime":
now = datetime.datetime.now()
self.transport.write("Date and time:")
self.transport.write(str(now))
elif data == "clientes":
self.transport.write("Numbers of clients served: %d " % (self.factory.numClients))
else:
self.transport.write("msg received without actions")
class EchoFactory(Factory):
protocol = Echo
def __init__(self):
self.numClients = 0
def clientConnectionMade(self):
self.numClients = self.numClients+1
def main():
factory = EchoFactory()
factory.protocol = Echo
reactor.listenTCP(9000,factory)
reactor.run()
# this only runs if the module was *not* imported
if __name__ == '__main__':
main()
Doesnt show any error, just doesnt update the counter 'numClients' and i dont know why.
Thanks
clientConnectionMade (where you increment self.numClients) is not a valid method on the Factory class, so it will never be called by the framework.
Calling self.factory.numClients += 1 from inside of your Echo.connectionMade() method would work:
class Echo(protocol.Protocol):
def connectionMade(self):
print "New client connected"
self.factory.numClients += 1
You could also override your Factory's buildProtocol() method to do something similar.
I am trying to write an application with twistd library written for Python. The application file ends like the following:
factory = protocol.ServerFactory()
factory.protocol = EchoServer
application = service.Application("Echo")
internet.TCPServer(8001, factory).setServiceParent(application)
I want to run something before my appication terminates (e.g. close a file). Does anyone know how to do that? because this is an event-handler and I don't know where the clean-up function is called.
You need to add code to the startService and stopService methods of the Service.
One way would be something like this:
from twisted.application import service
from twisted.internet import protocol
class MyService(service.Service):
def __init__(self,port=8001):
self.port = port
def startService(self):
self.factory = protocol.ServerFactory()
self.factory.protocol = EchoServer
from twisted.internet import reactor
reactor.callWhenRunning(self.startListening)
def startListening(self):
from twisted.internet import reactor
self.listener = reactor.listenTCP(self.port,self.factory)
print "Started listening"
def stopService(self):
self.listener.stopListening()
# Any other tidying
application = service.Application("Echo")
MyService().setServiceParent(application)