I have the following in my run.py script.
import time
NUM_PACKETS = 500
import random
import argparse
import threading
from scapy.all import sniff
from scapy.all import Ether, IP, IPv6, TCP
parser = argparse.ArgumentParser(description='run_test.py')
parser.add_argument('--random-dport',
help='Use a random TCP dest port for each packet',
action="store_true", default=False)
args = parser.parse_args()
class PacketQueue:
def __init__(self):
self.pkts = []
self.lock = threading.Lock()
self.ifaces = set()
def add_iface(self, iface):
self.ifaces.add(iface)
def get(self):
self.lock.acquire()
if not self.pkts:
self.lock.release()
return None, None
pkt = self.pkts.pop(0)
self.lock.release()
return pkt
def add(self, iface, pkt):
if iface not in self.ifaces:
return
self.lock.acquire()
self.pkts.append( (iface, pkt) )
self.lock.release()
queue = PacketQueue()
def pkt_handler(pkt, iface):
if IPv6 in pkt:
return
queue.add(iface, pkt)
class SnifferThread(threading.Thread):
def __init__(self, iface, handler = pkt_handler):
threading.Thread.__init__(self)
self.iface = iface
self.handler = handler
def run(self):
sniff(
iface = self.iface,
prn = lambda x: self.handler(x, self.iface)
)
class PacketDelay:
def __init__(self, bsize, bdelay, imin, imax, num_pkts = 100):
self.bsize = bsize
self.bdelay = bdelay
self.imin = imin
self.imax = imax
self.num_pkts = num_pkts
self.current = 1
def __iter__(self):
return self
def next(self):
if self.num_pkts <= 0:
raise StopIteration
self.num_pkts -= 1
if self.current == self.bsize:
self.current = 1
return random.randint(self.imin, self.imax)
else:
self.current += 1
return self.bdelay
pkt = Ether()/IP(dst='10.0.0.1', ttl=64)/TCP()
port_map = {
1: "veth3",
2: "veth5",
3: "veth7"
}
iface_map = {}
for p, i in port_map.items():
iface_map[i] = p
queue.add_iface("veth3")
queue.add_iface("veth5")
for p, iface in port_map.items():
t = SnifferThread(iface)
t.daemon = True
t.start()
import socket
send_socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW,
socket.htons(0x03))
send_socket.bind((port_map[3], 0))
delays = PacketDelay(10, 5, 25, 100, NUM_PACKETS)
ports = []
print "Sending", NUM_PACKETS, "packets ..."
for d in delays:
# sendp is too slow...
# sendp(pkt, iface=port_map[3], verbose=0)
if args.random_dport:
pkt["TCP"].dport = random.randint(1025, 65535)
send_socket.send(str(pkt))
time.sleep(d / 1000.)
time.sleep(1)
iface, pkt = queue.get()
while pkt:
ports.append(iface_map[iface])
iface, pkt = queue.get()
print ports
print "DISTRIBUTION..."
for p in port_map:
c = ports.count(p)
print "port {}: {:>3} [ {:>5}% ]".format(p, c, 100. * c / NUM_PACKETS)
I tried running the script using the below command
./run_test.py '--random-dport' 2
It is throwing an unrecognized arguments error with the following message.
ubuntu#workshop:~/p4lang/tutorials/workshop_/mp$ sudo ./run.py 2
WARNING: No route found for IPv6 destination :: (no default route?)
usage: run.py [-h] [--random-dport]
run_test.py: error: unrecognized arguments: 2
ubuntu#workshop:~/p4lang/tutorials/workshop/mp$
What could be the problem here. I assume the rest of the run.py code is fine and the problem is in the above lines. I can add the rest of the code if asked for. I am stuck with this for a long time. Any inputs will help me!
If you want to specify a specific port (e.g. 2)
I'm not sure why you're using action="store_true".
Just do (instead of your add_argument, the rest of the code is the same):
parser.add_argument("--random-dport", type=int,
help='Use a random TCP dest port for each packet',
default=1) #default to port 1
Then you can access the port by doing:
./run_test.py '--random-dport' 2
args.random-dport
>> 2
If you want to choose a random port
If you really want a true/false flag for a random port, as it seems your code is trying to achieve, you can use your original code, but then passing the 2 is pointless, as the presence of the random-dport flag will store true into the variable:
./run_test.py
args.random-dport
>>False
It was not present->false
./run_test.py '--random-dport'
args.random-dport
>>True
It was present->true
./run_test.py '--random-dport' 2
>>error
It had an extra value->error.
The 2 is meaningless here, you are specifying you want a RANDOM dport, which means you can't pick a specific one. The previous section let's you pick a specific port.)
Either way check out the argparse tutorial. It is very helpful for giving you info on what you want to do.
Related
I have the following Simulink model:
Simulink Model with UDP
With this model and a joystick I want to provide inputs to the UDP port which I will read from another application; in my case Blender.
So at Blender I have the following code that works perfectly for 1 provided input from Simulink.
import bpy
import math
import socket
import struct
port = 12009
address = "127.0.0.1"
base = 0
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((address, port))
cube = bpy.data.objects['Cube']
cube.location = (base, 0, 0)
class ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.modal_timer_operator"
bl_label = "Modal Timer Operator"
_timer = None
def modal(self, context, event):
if event.type in {'RIGHTMOUSE', 'ESC'}:
self.cancel(context)
s.close()
cube.location = (base, 0, 0)
return {'CANCELLED'}
if event.type == 'TIMER':
data, addr = s.recvfrom(1024)
data = struct.unpack('!d', data)
x = data[0]
print(data)
cube.location = (x, 0, 0)
print("X:", x)
return {'PASS_THROUGH'}
def execute(self, context):
wm = context.window_manager
self._timer = wm.event_timer_add(0.001, window = context.window)
wm.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
wm = context.window_manager
wm.event_timer_remove(self._timer)
def register():
bpy.utils.register_class(ModalTimerOperator)
def unregister():
bpy.utils.unregister_class(ModalTimerOperator)
if __name__ == "__main__":
register()
# test call
bpy.ops.wm.modal_timer_operator()
When I try to run this, I get the following error:
struct.error: unpack requires a buffer of 8 bytes
How can I unpack an array instead of just a double using struct.unpack?
What changes should I make in the code to achieve this?
I found the answer myself.
There is no need to use matrix concatenation; just simply connect the mux singal of your desired sent signals via UDP. Then in the code just change the line of the struct.unpack to the following (if you have 3 signals):
data = struct.unpack('!ddd', data)
x = data[0]*100
y = data[1]*100
z = data[2]*100
I have a class MyLogger for sending messages to log server by using PUBhandler.
An exception gets raised when MyLogger is instanced in LogWorker.init() method (like version 1), however, it is ok if MyLogger is instanced in LogWorker.log_worker() method (version 2).
Any suggestions would be appreciated.
import logging
from multiprocessing import Process
import os
import random
import sys
import time
import zmq
from zmq.log.handlers import PUBHandler
class MyLogger(object):
''''''
def __init__(self, port, handler=None):
self.port = port
self.handler = handler or self._construct_sock_handler()
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
if not self.logger.handlers:
self.logger.addHandler(self.handler)
def _construct_sock_handler(self):
context = zmq.Context()
log_sock = context.socket(zmq.PUB)
log_sock.connect("tcp://127.0.0.1:%i" % self.port)
time.sleep(0.1)
handler = PUBHandler(log_sock)
return handler
def get_logger(self):
return self.logger
def sub_logger(port, level=logging.DEBUG):
ctx = zmq.Context()
sub = ctx.socket(zmq.SUB)
sub.bind('tcp://127.0.0.1:%i' % port)
sub.setsockopt(zmq.SUBSCRIBE, "")
logging.basicConfig(level=level)
while True:
level, message = sub.recv_multipart()
if message.endswith('\n'):
# trim trailing newline, which will get appended again
message = message[:-1]
log = getattr(logging, level.lower())
log(message)
class LogWorker(object):
def __init__(self):
- pass # version 1
+ self.logger = MyLogger(port).get_logger() # version 2
def log_worker(self, port):
- self.logger = MyLogger(port).get_logger() # version 1
print "starting logger at %i with level=%s" % (os.getpid(), logging.DEBUG)
while True:
level = logging.INFO
self.logger.log(level, "Hello from %i!" % os.getpid())
time.sleep(1)
if __name__ == '__main__':
if len(sys.argv) > 1:
n = int(sys.argv[1])
else:
n = 2
port = 5555
workers = [Process(target=LogWorker().log_worker, args=(port,)) for _ in range(n)]
[w.start() for w in workers]
try:
sub_logger(port)
except KeyboardInterrupt:
pass
finally:
[ w.terminate() for w in workers ]
answer from pyzmq owner minrk:
You cannot pass zmq contexts or sockets across the fork boundary that happens when you instantiate a subprocess with multiprocessing. You have to make sure that you create your Context after you are in the subprocess.
solution:
def work():
worker = LogWorker(port)
worker.log_worker()
workers = [ Process(target=work) for _ in range(n) ]
i am trying to receive about 1000 connections to my server but it cannot receive more than 512. What can i do to increase the amount of open connections? I am running windows 8.1
Not: I am very new to this stuff so, thanks for help
Here is my code;
import asyncore
import socket
import uuid
import time
import threading
class statistics(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
while True:
entry = raw_input("")
zaman = int(time.time())
cmd = receivedCmd
print "calculating.."
time.sleep(1)
if entry == 'istatistik':
print str(receivedCmd-cmd) + " command/second"
print "total received commands: " + str(receivedCmd)
entry = ""
class tcpClient:
def __init__(self):
self.clientid = uuid.uuid1(int(time.time()))
self.buffer = ""
self.buffer_size = 0
self.conn_time = time.time()
self.overflow = 0
#print str(self.clientid) + " assingned"
def recv_msg(self, msg):
global receivedCmd
self.buffer = msg
self.buffer_size = len(self.buffer)
receivedCmd = receivedCmd + 1
if self.buffer_size >= 1024:
self.overflow = 1
def __del__(self):
print str(self.clientid) + " has left."
class TCPHandler(asyncore.dispatcher_with_send):
global clist
def handle_read(self):
data = self.recv(1024)
if data:
if clist[self].overflow:
self.send("overflow")
self.handle_close()
else:
self.send(data)
clist[self].recv_msg(data)
def handle_close(self):
del clist[self]
self.close()
def handle_error(self):
del clist[self]
self.close()
class TCPServer(asyncore.dispatcher):
global clist
def __init__(self, host, port):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
self.clist = clist
pair = self.accept()
if pair is None:
pass
else:
sock, addr = pair
#print 'Connection : %s' % repr(addr)
clist[TCPHandler(sock)] = tcpClient()
if __name__ == '__main__':
clist = {}
receivedCmd = 0
server = TCPServer('', 5000)
server2 = TCPServer('',5001)
StaticsThread = statistics()
StaticsThread.start()
asyncore.loop()
Note: I still cannot receive more than 512 connections with the Twisted Framework, i don't know what to do. There have to be thousands of connected clients. Please help.
The asyncore module relies in the select OS function, which only supports a limited number of file descriptors.
As an alternative use a multi-threading server (I won't recommend this) or, better, the Twisted framework which is event-driven (highly recommended!).
Hope this helps!
Since Twisted's default reactor under Windows is also select-based then you should consider using the IOCP reactor instead.
from twisted.internet import iocpreactor
iocpreactor.install()
from twisted.internet import reactor
But also take into account that Twisted prefers Linux systems (where the default reactor is epoll-based) rather than Windows. Maybe switching to Linux is a better choice.
I'm working on server written in python. When the client sends a cmd the server will call a function with unknown running time. So to avoid blocking I used threading. But when looking at the child process it seems that they're not terminating, causing a lot of memory usage.
EDIT : Here is the tree of the directory : http://pastebin.com/WZDxLquC
Following answers I found on stackoverflow I implemented a custom Thread class:
sThreads.py :
import threading
class Thread(threading.Thread):
def __init__(self, aFun, args = ()):
super(Thread, self).__init__(None, aFun, None, args)
self.stopped = threading.Event()
def stop(self):
self.stopped.set()
def isStopped(self):
return self.stopped.isSet()
Then here is the server's loop:
some where in mainServer.py:
def serve_forever(self, aCustomClass, aSize = 1024):
while True:
self.conn, self.addr = self.sock.accept()
msg = self.recvMSG(4096)
if(msg):
self.handShake(msg)
print 'Accepted !'
while True:
msg = self.recvMSG(aSize)
if(msg):
t = sThreads.Thread(self.handle, (aCustomClass,))
t.start()
self.currentThreads.append(t)
if(self.workers > 0):
tt = sThreads.Thread(self.respond)
tt.start()
if(self.workers == 0 and len(self.currentThreads) > 0):
for th in self.currentThreads:
th.stop()
Using a custom Thread class will not solve the issue and it still does not stop the terminated threads!
EDIT : added the handle() and respond() methods :
def handle(self, aClass):
self.workers += 1
self.queue.put(aClass._onRecieve(self.decodeStream()))
def respond(self):
while self.workers > 0:
msgToSend, wantToSend = self.queue.get()
self.workers -= 1
if(wantToSend):
print 'I want to send :', msgToSend
continue #Send is not yet implemented !
It seems that self.queue.get() was causing all the issue ...
I'm trying to start a data queue server under a managing process (so that it can later be turned into a service), and while the data queue server function works fine in the main process, it does not work in a process created using multiprocessing.Process.
The dataQueueServer and dataQueueClient code is based on the code from the multiprocessing module documentation here.
When run on its own, dataQueueServer works well. However, when run using a multiprocessing.Process's start() in mpquueue, it doesn't work (when tested with the client). I am using the dataQueueClient without changes to test both cases.
The code does reach the serve_forever in both cases, so I think the server is working, but something is blocking it from communicating back to the client in the mpqueue case.
I have placed the loop that runs the serve_forever() part under a thread, so that it can be stoppable.
Here is the code:
mpqueue # this is the "manager" process trying to spawn the server in a child process
import time
import multiprocessing
import threading
import dataQueueServer
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueServer(multiprocessing.Process):
def __init__(self, name = '', printer = None):
multiprocessing.Process.__init__(self)
self.name = name
self.printer = printer
self.ml = dataQueueServer.MainLoop(name = 'ml', printer = self.printer)
def run(self):
self.printer.tsprint(self.ml)
self.ml.start()
def stop(self):
self.ml.stop()
if __name__ == '__main__':
printer = Printer()
qs = QueueServer(name = 'QueueServer', printer = printer)
printer.tsprint(qs)
printer.tsprint('starting')
qs.start()
printer.tsprint('started.')
printer.tsprint('Press Ctrl-C to quit')
try:
while True:
time.sleep(60)
except KeyboardInterrupt:
printer.tsprint('\nTrying to exit cleanly...')
qs.stop()
printer.tsprint('stopped')
dataQueueServer
import time
import threading
from multiprocessing.managers import BaseManager
from multiprocessing import Queue
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
## Define some helper functions for use by the main process loop
class Printer():
def __init__(self):
self.lock = threading.Lock()
def tsprint(self, text):
with self.lock:
print text
class QueueManager(BaseManager):
pass
class MainLoop(threading.Thread):
"""A thread based loop manager, allowing termination signals to be sent
to the thread"""
def __init__(self, name = '', printer = None):
threading.Thread.__init__(self)
self._stopEvent = threading.Event()
self.daemon = True
self.name = name
if printer is None:
self.printer = Printer()
else:
self.printer = printer
## create the queue
self.queue = Queue()
## Add a function to the handler to return the queue to clients
self.QM = QueueManager
self.QM.register('get_queue', callable=lambda:self.queue)
self.queue_manager = self.QM(address=(HOST, PORT), authkey=AUTHKEY)
self.queue_server = self.queue_manager.get_server()
def __del__(self):
self.printer.tsprint( 'closing...')
def run(self):
self.printer.tsprint( '{}: started serving'.format(self.name))
self.queue_server.serve_forever()
def stop(self):
self.printer.tsprint ('{}: stopping'.format(self.name))
self._stopEvent.set()
def stopped(self):
return self._stopEvent.isSet()
def start():
printer = Printer()
ml = MainLoop(name = 'ml', printer = printer)
ml.start()
return ml
def stop(ml):
ml.stop()
if __name__ == '__main__':
ml = start()
raw_input("\nhit return to stop")
stop(ml)
And a client:
dataQueueClient
import datetime
from multiprocessing.managers import BaseManager
n = 0
N = 10**n
HOST = ''
PORT = 50010
AUTHKEY = 'authkey'
def now():
return datetime.datetime.now()
def gen(n, func, *args, **kwargs):
k = 0
while k < n:
yield func(*args, **kwargs)
k += 1
class QueueManager(BaseManager):
pass
QueueManager.register('get_queue')
m = QueueManager(address=(HOST, PORT), authkey=AUTHKEY)
m.connect()
queue = m.get_queue()
def load(msg, q):
return q.put(msg)
def get(q):
return q.get()
lgen = gen(N, load, msg = 'hello', q = queue)
t0 = now()
while True:
try:
lgen.next()
except StopIteration:
break
t1 = now()
print 'loaded %d items in ' % N, t1-t0
t0 = now()
while queue.qsize() > 0:
queue.get()
t1 = now()
print 'got %d items in ' % N, t1-t0
So it seems like the solution is simple enough: Don't use serve_forever(), and use manager.start() instead.
According to Eli Bendersky, the BaseManager (and it's extended version SyncManager) already spawns the server in a new process (and looking at the multiprocessing.managers code confirms this). The problem I have been experiencing stems from the form used in the example, in which the server is started under the main process.
I still don't understand why the current example doesn't work when run under a child process, but that's no longer an issue.
Here's the working (and much simplified from OP) code to manage multiple queue servers:
Server:
from multiprocessing import Queue
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
name0 = 'qm0'
name1 = 'qm1'
name2 = 'qm2'
description = 'Queue Server'
def CreateQueueServer(HOST, PORT, AUTHKEY, name = None, description = None):
name = name
description = description
q = Queue()
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue', callable = lambda: q)
QueueManager.register('get_name', callable = name)
QueueManager.register('get_description', callable = description)
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.start() # This actually starts the server
return manager
# Start three queue servers
qm0 = CreateQueueServer(HOST, PORT0, AUTHKEY, name0, description)
qm1 = CreateQueueServer(HOST, PORT1, AUTHKEY, name1, description)
qm2 = CreateQueueServer(HOST, PORT2, AUTHKEY, name2, description)
raw_input("return to end")
Client:
from multiprocessing.managers import SyncManager
HOST = ''
PORT0 = 5011
PORT1 = 5012
PORT2 = 5013
AUTHKEY = 'authkey'
def QueueServerClient(HOST, PORT, AUTHKEY):
class QueueManager(SyncManager):
pass
QueueManager.register('get_queue')
QueueManager.register('get_name')
QueueManager.register('get_description')
manager = QueueManager(address = (HOST, PORT), authkey = AUTHKEY)
manager.connect() # This starts the connected client
return manager
# create three connected managers
qc0 = QueueServerClient(HOST, PORT0, AUTHKEY)
qc1 = QueueServerClient(HOST, PORT1, AUTHKEY)
qc2 = QueueServerClient(HOST, PORT2, AUTHKEY)
# Get the queue objects from the clients
q0 = qc0.get_queue()
q1 = qc1.get_queue()
q2 = qc2.get_queue()
# put stuff in the queues
q0.put('some stuff')
q1.put('other stuff')
q2.put({1:123, 2:'abc'})
# check their sizes
print 'q0 size', q0.qsize()
print 'q1 size', q1.qsize()
print 'q2 size', q2.qsize()
# pull some stuff and print it
print q0.get()
print q1.get()
print q2.get()
Adding an additional server to share a dictionary with the information of the running queue servers so that consumers can easily tell what's available where is easy enough using that model. One thing to note, though, is that the shared dictionary requires slightly different syntax than a normal dictionary: dictionary[0] = something will not work. You need to use dictionary.update([(key, value), (otherkey, othervalue)]) and dictionary.get(key) syntax, which propagates across to all other clients connected to this dictionary..