Python pyzmq: program stucks - python

It is a simple PUB/SUB program using pyzmq and multiprocessing.
The server is PUB. It sends a slice of an ahah list to client SUB every time.
The client SUB first .recv_string() one message, then it changes the socket .recv_string()-processing mode to a NOBLOCK one, inside the .Poller() loop.
import logging
import zmq
from multiprocessing import Process
def server_init(port_pub):
ahah = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
index = 0
num = 2
context = zmq.Context()
socket_pub = context.socket(zmq.PUB)
socket_pub.bind("tcp://127.0.0.1:%s" % port_pub)
# socket_rep = context.socket(zmq.REP)
# socket_rep.bind("tcp://*:%s" % port_rep)
socket_pub.send_string(' '.join(str(v) for v in ahah[index : index + num - 1]))
index = index + num
poller_pub = zmq.Poller()
poller_pub.register(socket_pub, zmq.POLLOUT)
should_continue = True
while should_continue:
socks = dict(poller_pub.poll())
if socket_pub in socks and socks[socket_pub] == zmq.POLLOUT and index <= 9:
socket_pub.send_string(' '.join(str(v) for v in ahah[index : index + num - 1]), zmq.NOBLOCK)
index = index + num
else:
should_continue = False
poller_pub.unregister(socket_pub)
def client(port_sub):
context = zmq.Context()
socket_sub = context.socket(zmq.SUB)
socket_sub.connect("tcp://127.0.0.1:%s" % port_sub)
tmp = socket_sub.recv_string()
process_message(tmp)
poller_sub = zmq.Poller()
poller_sub.register(socket_sub, zmq.POLLIN)
should_continue = True
while should_continue:
socks = dict(poller_sub.poll())
if socket_sub in socks and socks[socket_sub] == zmq.POLLIN:
tmp = socket_sub.recv_string(zmq.NOBLOCK)
process_message(tmp)
else:
should_continue = False
poller_pub.unregister(socket_sub)
def process_message(msg):
print("Processing ... %s" % msg)
if __name__ == '__main__':
logging.info('starting')
Process(target=server_init, args=(5566,)).start()
Process(target=client, args=(5566,)).start()
When I launch the program, it just stucks there and outputs nothing like:
$ python test.py
Until after a Ctrl-C is pressed:
$ python test2.py
^CProcess Process-2:
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/Users/jack/.pyenv/versions/3.5.1/lib/python3.5/multiprocessing/popen_fork.py", line 29, in poll
pid, sts = os.waitpid(self.pid, flag)
KeyboardInterrupt
Traceback (most recent call last):
File "/Users/jack/.pyenv/versions/3.5.1/lib/python3.5/multiprocessing/process.py", line 254, in _bootstrap
self.run()
File "/Users/jack/.pyenv/versions/3.5.1/lib/python3.5/multiprocessing/process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "test2.py", line 38, in client
tmp = socket_sub.recv_string()
File "/Users/jack/.pyenv/versions/3.5.1/lib/python3.5/site-packages/zmq/sugar/socket.py", line 402, in recv_string
b = self.recv(flags=flags)
File "zmq/backend/cython/socket.pyx", line 674, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6971)
File "zmq/backend/cython/socket.pyx", line 708, in zmq.backend.cython.socket.Socket.recv (zmq/backend/cython/socket.c:6763)
File "zmq/backend/cython/socket.pyx", line 145, in zmq.backend.cython.socket._recv_copy (zmq/backend/cython/socket.c:1931)
File "zmq/backend/cython/checkrc.pxd", line 12, in zmq.backend.cython.checkrc._check_rc (zmq/backend/cython/socket.c:7222)
KeyboardInterrupt
I think the client should at least .recv() one msg.
But why not?

Why?
Besides other reasons, your client side has simply forgotten to subscribe to anything meaningful before calling the first .recv_string(), thus it hangs forever in a blocking-mode receive, while there is nothing that can meet the SUB-side TOPIC-filter on received messages and thus no such one will ever pass to .recv_string()-processing.
Just add socket_sub.setsockopt( "" ) as the ZeroMQ default is to rather subscribe to nothing ( as no one can indeed guess any magic of what shall pass the TOPIC-filter in one's actual context, so as a paradox, nothing seems to be the best choice available in this sense ).
Next also be careful on timing ( .bind() / .connect() ) sensitivity.
For more details, do not hesistate to download and read the fabulous Pieter HINTJENS' book "Code Connected, Volume 1".

Related

Python pySerial - Problem using subclasses

I'm working on a project in Python that uses two or more serial ports to manage some devices from my RPi. When ports are open in the same file and I send commands to different instances of serial.Serial object everything works fine. This is an example:
import serial
device1_port = "/dev/ttyUSB0"
device2_port = "/dev/ttyUSB1"
# Command sent to device 1. No problem
d1 = serial.Serial(device1_port, timeout = 0.5)
d1.write(b'GET MUTE\n')
output1 = d1.readline()
print("Device 1 output: " + str(output1))
# Command sent to device 2. No problem
d2 = serial.Serial(device2_port, timeout = 1)
d2.write(b'00vP\r')
output2 = d2.readline()
print("Device 2 output: " + str(output2))
Output:
Device 1 output: b'DATA MUTE OFF\r\n'
Device 2 output: b'00vP0\r'
The problem comes when I try to separate one device from another using subclasses of serial.Serial. The reason is I want to deal with them like objects with their own methods (each device needs a lot of different commands, status queries...).
class device1(serial.Serial):
def __init__(self, port, timeout):
super().__init__(port, timeout)
serial.Serial(port, timeout)
def command1(self):
self.write(b'SET MUTE OFF\n')
self.write(b'GET MUTE\n')
output = self.readline()
print("Device 1 output: " + str(output))
class device2(serial.Serial):
def __init__(self, port, timeout):
super().__init__(port, timeout)
serial.Serial(port, timeout)
def command2(self):
self.write(b'00vP\r')
output = self.readline()
print("Device 2 output: " + str(output))
device1_port = "/dev/ttyUSB0"
device2_port = "/dev/ttyUSB1"
d1 = device1(device1_port, timeout=0.5)
d2 = device2(device2_port, timeout=1)
d1.command1()
d2.command2()
When I run this code the output is:
Device 1 output: b'DATA MUTE OFF\r\n'
_
and it keeps waiting forever for the second device. I'm forced to Ctrl + C and I get this:
^CTraceback (most recent call last):
File "./ct3.py", line 35, in <module>
d2.command2()
File "./ct3.py", line 23, in command2
output = self.readline()
File "/usr/lib/python3/dist-packages/serial/serialposix.py", line 483, in read
ready, _, _ = select.select([self.fd, self.pipe_abort_read_r], [], [], timeout.time_left())
KeyboardInterrupt
It's seems like there is some kind of conflict between the two subclasses but, obviously I have no idea what I'm doing wrong.
Can someone help me please?
You shouldn't be calling serial.Serial(port, timeout) from your __init__,
as super().__init__(...) is already doing this. See these answers. You don't even need an __init__ if you are not going to change what the base class does.
Also, there is a difference in your two versions with respect to the use of positional and keyword arguments. serial.Serial()'s first 2 positional arguments are port, baudrate, so you need to explicitly use the keyword argument timeout=:
def __init__(self, port, timeout):
super().__init__(port, timeout=timeout)

Job Pending Exception During Snap7-Python Data Read / Write to PLC

During reading and writing data to Siemens s7 1200 PLC with Python- Snap7, I get an Exception as follows:
Exception in thread Thread-2:
Traceback (most recent call last):
File "C:\Users\MDoganli\AppData\Local\Programs\Python\Python37-32\Lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Users\MDoganli\AppData\Local\Programs\Python\Python37-32\Lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:\Companies\Personal\deneme\deneme_iterasyonlar\plcman.py", line 59, in read_data
torque=plc.read_area(areas['DB'],110,80,24)
File "C:\Users\MDoganli\AppData\Local\Programs\Python\Python37-32\lib\site-packages\snap7\client.py", line 256, in read_area
check_error(result, context="client")
File "C:\Users\MDoganli\AppData\Local\Programs\Python\Python37-32\lib\site-packages\snap7\common.py", line 65, in check_error
raise Snap7Exception(error)
snap7.snap7exceptions.Snap7Exception: b'CLI : Job pending'
I don't experience this problem during single channel db_read/db_write but occurs when an additional read or write channel is active.
I have tried area_read & area_write and db_read and db_write options but receive similar errors.
Main Code:
plc=plcman.PLC_Controller('192.168.30.100',0,1)
plc.connect()
time.sleep(1)
plc.start_thread2()
time.sleep(1)
plc.start_thread()
PLC Data-Read Write Code
class PLC_Controller:
plc=c.Client()
def __init__(self, address, rack, slot):
self.address = address
self.rack = rack
self.slot = slot
def connect(self):
count = 0
if plc.get_connected() == False:
print("Try " + str(count) + " - Connecting to PLC: " +
self.address + ", Rack: " + str(self.rack) + ", Slot: " + str(self.slot))
try:
plc.connect(self.address, self.rack, self.slot) #('IP-address', rack, slot)
except Exception as e:
print(e)
if plc.get_connected() == True:
return plc.get_connected() == True
def get_word(self,_bytearray, byte_index):
data = _bytearray[byte_index:byte_index + 2]
data=data[::-1]
dword = struct.unpack('H', struct.pack('2B', *data))[0]
return dword
def read_data(self):
torque=plc.read_area(areas['DB'],110,80,24)
data1=self.get_word(torque,0)
time.sleep(0.8)
self.read_data()
def start_thread(self):
thread = threading.Thread(target=self.read_data, args=())
thread.daemon = True
thread.start()
def set_word(self,_bytearray, byte_index, word):
word=int(word)
_bytes = struct.pack('H', word)
_bytes=_bytes[::-1]
for i, b in enumerate(_bytes):
time.sleep(1)
_bytearray[byte_index + i] = b
res=plc.write_area(areas['DB'],110,24,_bytearray)
def start_thread2(self):
thread = threading.Thread(target=self.stoprun, args=())
thread.daemon = True
thread.start()
def stoprun(self):
Lamp=4
torque=plc.read_area(areas['DB'],110,80,24)
val1=self.set_word(torque, 0, 8)
self.stoprun()
Thanks in advance.
read & write should have different instances of PLC connection. Modified connection will be:
plc=plcman.PLC_Controller('192.168.30.100',0,1) # for reading use plc.read_area()
plc.connect()
plc2=plcman.PLC_Controller('192.168.30.100',0,1)
plc2.connect() #for writing use plc2.write_area()
upto 3 instances are allowed. During read&write "job pending" will not be received

Scapy sniff() function not working for no apparent reason

I'm trying to use the sniff() function that scapy provides but it raises the following error:
Traceback (most recent call last):
File "TestCode.py", line 54, in <module>
packets = getMessege()
File "TestCode.py", line 45, in getMessege
return sniff(count=getLen(), lfilter=filterFrom)
File "C:\Heights\PortableApps\PortablePython2.7.6.1\App\lib\site-packages\scapy\sendrecv.py", line 575, in sniff
sel = select([s],[],[],remain)
select.error: (10038, 'An operation was attempted on something that is not a socket')
Here is the code (FromGlobal is a tuple that contains the IP and Port of the sender):
def getLen():
while True:
length, LenFrom = sock.recvfrom(1024)
try:
IntLen = int(length)
except:
pass
else:
if LenFrom == FromGlobal:
return IntLen
def filterFrom(pck):
try:
return pck[IP].src == FromGlobal[0] and pck[UDP].sport == FromGlobal[1]
except:
return False
def getMessege(): # TODO This needs to return only the messege and port
return sniff(count=getLen(), lfilter=filterFrom)
packets = getMessege()
print packets.show()
The weird part is that if I try to do it like so:
def func1():
return int('1')
def lfilter(pack):
return TCP in pack and pack[IP].src != '8.8.8.8'
def func():
return sniff(count=func1(), lfilter=lfilter)
var = func()
print var.show()
it works perfectly well. If someone could point out the difference between the two it would help a lot.
I'm use WinPcap 4.1.3 and scapy 2.x.
Well, Resolved it myself. apparently if you do:
from scapy.all import *
from scapy.layers.inet import *
the sniff function won't works so do only
from scapy.all import *

Python Threading More then one agrument given

I am trying to start threads and I keep getting and error message saying that I am trying to send more than one argument. It seems like the Thread object does not take the variable port as one argument but rather each character in the string as one separate argument. How is this working ? It is my first time multithreading in python.
Error message:
Exception in thread /dev/ttyUSB0:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 810, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 763, in run
self.__target(*self.__args, **self.__kwargs)
TypeError: report() takes exactly 1 argument (12 given)
Code:
def report(port):
print("\n")
print(st() +"Connecting to" + port[0])
device = serial.Serial(port=port[0], baudrate=9600, timeout=0.2)
print (st() + "Connection sucessfull...")
print (st() + "Initializing router on "+ port[0])
if initialize_router(device) == 0:
return 0
print (st() + "Initialization sucessfull")
print (st() + "Starting to inject IP basic config")
if inject_config(device) == 0:
print(errror("injecing the confing",port[0]))
return 0
print(st()+ "Finished injecting default IP setting on router connected to " + port[0])
return 1
if __name__ == '__main__':
ports = list_ports.comports()
list_port = list(ports)
port_counter = -1
for port in list_port:
if "USB" in port[0]:
port_counter = port_counter + 1
port = "/dev/ttyUSB" + str(port_counter)
thread = Thread(target=report, args=(port), name=port)
thread.start()
print port
print ("\n")
continue
thread = Thread(target=report, args=(port), name=port)
I'm guessing you wanted to pass a single element tuple to args here. But those parentheses around port have no effect by themselves. Try:
thread = Thread(target=report, args=(port,), name=port)

Sending a Raspberry Pi Datastream to xively

I have my rpi connected up to a digital scales via RS232, a MAX3232 serial port / ttl module connected into the GPIO. Successfully collected the weight data streaming from the scales using this code (data is in blocks of 20 and the weight is at 11 - 14). This prints it out locally in terminal:
import serial
import time
ser = serial.Serial("/dev/ttyAMA0")
read = ser.read(20)
def get_num(read):
return float(''.join(ele for ele in read if ele.isdigit() or ele == '.'))
while True:
print(get_num(read))
time.sleep(2)
This works great but my python code to send datastream to xively isn't so good.
def get_num(read):
return float(''.join(ele for ele in read if ele.isdigit() or ele == '.'))
# function to return a datastream object. This either creates a new datastream,
# or returns an existing one
def get_datastream(feed):
try:
datastream = feed.datastreams.get("(get_num(read))")
return datastream
except:
datastream = feed.datastreams.create("(get_num(read))", tags = "weight")
return datastream
def run():
feed = api.feeds.get(FEED_ID)
datastream = get_datastream(feed)
datastream.max_value = None
datastream.min_value = None
while True:
weight = get_num(read)
datastream.current_value = weight
datastream.at = datetime.datetime.utcnow()
try:
datastream.update()
except requests.HTTPError as e:
print "HTTPError({0}): {1}".format(e.errno, e.strerror)
time.sleep(5)
run()
It checks out OK in IDLE - but when run it generates an error as follows:
Traceback (most recent call last):
File "xivelycodescales.py", line 51, in <module>
run()
File "xivelycodescales.py", line 36, in run
datastream = get_datastream(feed)
File "xivelycodescales.py", line 32, in get_datastream
datastream = feed.datastreams.create("(get_num(read))")
File "/usr/local/lib/python2.7/dist-packages/xively/managers.py", line 416, in create
response.raise_for_status()
File "/usr/local/lib/python2.7/dist-packages/requests/models.py", line 773, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 422 Client Error:
I can see what the error type is - and had good help at the RPI forums. But can't figure out where I've gone wrong
Many thanks

Categories