Why isn't trollius making this loop run asynchronously? - python

I am trying to write some simple loops to control objects in Pygazebo, but alas it only ever calls the method once and then the loops appears to block.
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 12:52:50 2015
#author: skylion
"""
import trollius #NOTE: Trollius requires protobuffer from Google
from trollius import From
import pygazebo
import pygazebo.msg.joint_cmd_pb2
import time
def apply_joint_force(world_name, robot_name, joint_name, force, duration=-1):
#trollius.coroutine
def joint_force_loop():
manager = yield From(pygazebo.connect())
print("connected")
publisher = yield From(
manager.advertise('/gazebo/' + world_name + '/' + robot_name + '/joint_cmd',
'gazebo.msgs.JointCmd'))
message = pygazebo.msg.joint_cmd_pb2.JointCmd()
message.name = robot_name + '::' + joint_name #format should be: name_of_robot + '::name_of_joint'
message.force = force
#t_end = time.time() + duration # The time that you want the controller to stop
while True: #time.time() < t_end or duration == -1:
try:
yield From(publisher.publish(message))
yield From(trollius.sleep(1.0))
except:
pass
#Nothing
print("Connection closed")
wait_net_service('localhost',11345)
loop = trollius.new_event_loop()
loop.run_until_complete(joint_force_loop())
raise
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
#param timeout: in seconds, if None or 0 wait forever
#return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
import socket
import errno
s = socket.socket()
if timeout:
from time import time as now
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
time.sleep(1)
except socket.timeout, err:
# this exception occurs only if timeout is set
if timeout:
return False
except socket.error, err:
# catch timeout exception from underlying network library
# this one is different from socket.timeout
if type(err.args) != tuple or (err[0] != errno.ETIMEDOUT and err[0] != errno.ECONNREFUSED):
raise err
else:
s.close()
return True
I thought #coroutines were suppose to be wrapped asynchronously? Do I just misunderstand the use this code? Or am I doing something else wrong? This is my first time with concurrency in Python btw.
Also this is how I am calling that function:
counter = 0
for joint_def in self.all_joint_props:
print("each joint_def")
apply_joint_force(world_name, robot_name, "hingejoint" + str(counter), joint_def[2])
#print("Appliing joint force")
Any idea why it keep blocking the thread? Should I be using a different method to this? Any help would be appreciated

So, the answer is quite simple really. You have to queue up the multiple Trollius.Tasks you want to run as a list before starting the object and combine that with Trollius.wait() to achieve this. To ensure the thread is non-blocking you then use the following method
Here is my code so far:
tasks = []
for joint_name in joint_names:
tasks.append(trollius.Task(joint_force_loop(world_name, robot_name, joint_name, force, duration))
loop = trollius.get_event_loop()
loop.run_until_complete(trollius.wait(tasks))

Related

Winrt API Python getting Bluetooth signal strength

I am trying to measure Bluetooth signal strength using winrt APi in python using winsdk. My workflow is to measure Bluetooth Signal strength of a device that is already connected with my Windows machine. I followed the guideline from Pywinrt documentation found here:
Here is my code snippet:
import asyncio
import winsdk.windows.devices.enumeration as e
import winsdk.windows.devices.bluetooth as bl
async def scan():
sig_strength = "System.Devices.Aep.SignalStrength"
additionalProperties = [sig_strength]
watcher = e.DeviceInformation.create_watcher(bl.BluetoothDevice.get_device_selector(), additionalProperties)
received_queue = asyncio.Queue()
def added_w(device_watcher, device_info_update):
if(device_info_update.name == "my_device"):
print("found!")
for value, key in enumerate(device_info_update.properties):
if key == "System.Devices.Aep.SignalStrength":
print("signal strength: {}".format(value) )
def updated_w(device_watcher, device_info_update):
print("update for {} with kind {}".format(device_info_update.id, device_info_update.kind))
def removed_w(device_watcher, device_info_update):
pass
def stopped_w(device_watcher, device_info_update):
pass
received_token = watcher.add_added(
lambda s, e: event_loop.call_soon_threadsafe(added_w, s, e)
)
updated_token = watcher.add_updated(
lambda s, e: event_loop.call_soon_threadsafe(updated_w, s, e)
)
removed_token = watcher.add_removed(
lambda s, e: event_loop.call_soon_threadsafe(removed_w, s, e)
)
event_loop = asyncio.get_running_loop()
stopped_future = event_loop.create_future()
def handle_stopped(sender, event_args):
stopped_future.set_result(event_args)
try:
print("scanning...")
watcher.start()
# this is the consumer for the received event queue
async def print_received():
while True:
event_args = await received_queue.get()
print(
"received:",
event_args.bluetooth_address.to_bytes(6, "big").hex(":"),
event_args.raw_signal_strength_in_d_bm, "dBm",
)
printer_task = asyncio.create_task(print_received())
# since the print task is an infinite loop, we have to cancel it when we don't need it anymore
stopped_future.add_done_callback(printer_task.cancel)
# scan for 30 seconds or until an unexpected stopped event (due to error)
done, pending = await asyncio.wait(
[stopped_future, printer_task], timeout=30, return_when=asyncio.FIRST_COMPLETED
)
if stopped_future in done:
print("unexpected stopped event", stopped_future.result().error)
else:
print("stopping...")
watcher.stop()
await stopped_future
finally:
# event handler are removed in a finally block to ensure we don't leak
watcher.remove_received(received_token)
watcher.remove_stopped(handle_stopped)
asyncio.run(scan())
However, I only get a fixed RSSI value 8 in my print in added_w function.
Any help on potential solution would be greatly appreciated!

How to add configuration setting for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication in python script

Need some help to set the configuration for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
We are using confluent Kafka here, there are two scripts, one a python script and the second one is a bash script which calls the python one. You can find the script below.
Thanks for the help in advance!
import json
import os
import string
import random
import socket
import uuid
import re
from datetime import datetime
import time
import hashlib
import math
import sys
from functools import cache
from confluent_kafka import Producer, KafkaError, KafkaException
topic_name = os.environ['TOPIC_NAME']
partition_count = int(os.environ['PARTITION_COUNT'])
message_key_template = json.loads(os.environ['KEY_TEMPLATE'])
message_value_template = json.loads(os.environ['VALUE_TEMPLATE'])
message_header_template = json.loads(os.environ['HEADER_TEMPLATE'])
bootstrap_servers = os.environ['BOOTSTRAP_SERVERS']
perf_counter_batch_size = int(os.environ.get('PERF_COUNTER_BATCH_SIZE', 100))
messages_per_aggregate = int(os.environ.get('MESSAGES_PER_AGGREGATE', 1))
max_message_count = int(os.environ.get('MAX_MESSAGE_COUNT', sys.maxsize))
def error_cb(err):
""" The error callback is used for generic client errors. These
errors are generally to be considered informational as the client will
automatically try to recover from all errors, and no extra action
is typically required by the application.
For this example however, we terminate the application if the client
is unable to connect to any broker (_ALL_BROKERS_DOWN) and on
authentication errors (_AUTHENTICATION). """
print("Client error: {}".format(err))
if err.code() == KafkaError._ALL_BROKERS_DOWN or \
err.code() == KafkaError._AUTHENTICATION:
# Any exception raised from this callback will be re-raised from the
# triggering flush() or poll() call.
raise KafkaException(err)
def acked(err, msg):
if err is not None:
print("Failed to send message: %s: %s" % (str(msg), str(err)))
producer_configs = {
'bootstrap.servers': bootstrap_servers,
'client.id': socket.gethostname(),
'error_cb': error_cb
}
# TODO: Need to support sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
# TODO: Need to support truststores for connecting to private DCs.
producer = Producer(producer_configs)
# generates a random value if it is not cached in the template_values dictionary
def get_templated_value(term, template_values):
if not term in template_values:
template_values[term] = str(uuid.uuid4())
return template_values[term]
def fill_template_value(value, template_values):
str_value = str(value)
template_regex = '{{(.+?)}}'
templated_terms = re.findall(template_regex, str_value)
for term in templated_terms:
str_value = str_value.replace(f"{{{{{term}}}}}", get_templated_value(term, template_values))
return str_value
def fill_template(template, templated_terms):
# TODO: Need to address metadata field, as it's treated as a string instead of a nested object.
return {field: fill_template_value(value, templated_terms) for field, value in template.items()}
#cache
def get_partition(lock_id):
bits = 128
bucket_size = 2**bits / partition_count
partition = (int(hashlib.md5(lock_id.encode('utf-8')).hexdigest(), 16) / bucket_size)
return math.floor(partition)
sequence_number = int(time.time() * 1000)
sequence_number = 0
message_count = 0
producing = True
start_time = time.perf_counter()
aggregate_message_counter = 0
# cache for templated term values so that they match across the different templates
templated_values = {}
try:
while producing:
sequence_number += 1
aggregate_message_counter += 1
message_count += 1
if aggregate_message_counter % messages_per_aggregate == 0:
# reset templated values
templated_values = {}
else:
for term in list(templated_values):
if term not in ['aggregateId', 'tenantId']:
del(templated_values[term])
# Fill in templated field values
message_key = fill_template(message_key_template, templated_values)
message_value = fill_template(message_value_template, templated_values)
message_header = fill_template(message_header_template, templated_values)
ts = datetime.utcnow().isoformat()[:-3]+'Z'
message_header['timestamp'] = ts
message_header['sequence_number'] = str(sequence_number)
message_value['timestamp'] = ts
message_value['sequenceNumber'] = sequence_number
lock_id = message_header['lock_id']
partition = get_partition(lock_id) # partition by lock_id, since key could be random, but a given aggregate_id should ALWAYS resolve to the same partition, regardless of key.
# Send message
producer.produce(topic_name, partition=partition, key=json.dumps(message_key), value=json.dumps(message_value), headers=message_header, callback=acked)
if sequence_number % perf_counter_batch_size == 0:
producer.flush()
end_time = time.perf_counter()
total_duration = end_time - start_time
messages_per_second=(perf_counter_batch_size/total_duration)
print(f'{messages_per_second} messages/second')
# reset start time
start_time = time.perf_counter()
if message_count >= max_message_count:
break
except Exception as e:
print(f'ERROR: %s' % e)
sys.exit(1)
finally:
producer.flush()

Multiprocessing function not writing to file or printing

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!
I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

is there any operation like socket.recv_into in python 3 asyncio?

Socket module has a socket.recv_into method, so it can use user-defined bytebuffer (like bytearray) for zero-copy. But perhaps BaseEventLoop has no method like that. Is there a way to use method like socket.recv_into in asyncio?
The low-level socket operations defined for BaseEventLoop require a socket.socket object to be passed in, e.g. BaseEventLoop.sock_recv(sock, nbytes). So, given that you have a socket.socket, you could call sock.recv_into(). Whether it is a good idea to do that is another question.
You may implement own asyncio transport which utilizes .recv_into() function but yes, for now asyncio has not a way to use .recv_into() out-the-box.
Personally I doubt in very big speedup: when you develop with C the zero-copy is extremely important but for high-level languages like Python benefits are much lesser.
Update: Starting with Python 3.7.0, which is in alpha release as I write this, the standard library's asyncio module documents AbstractEventLoop.sock_recv_into().
Edit: expanding answer as requested...
A call to asyncio's sock_recv_into() typically looks like:
byte_count = await loop.sock_recv_into(sock, buff)
The buff is a mutable object that implements Python's buffer protocol, examples of which include a bytearray and a memoryview on a bytearray. The code below demonstrates receiving into a bytearray using a memoryview.
Working demo code for asyncio sockets necessarily includes a bunch of scaffolding to set up both sides of the connections and run the event loop. The point here is the use of asyncio's sock_recv_into() in the sock_read_exactly() co-routine below.
#!/usr/bin/env python3
"""Demo the asyncio module's sock_recv_into() facility."""
import sys
assert sys.version_info[:2] >= (3, 7), (
'asyncio sock_recv_into() new in Python 3.7')
import socket
import asyncio
def local_echo_server(port=0):
"""Trivial treaded echo server with sleep delay."""
import threading
import time
import random
ssock = socket.socket()
ssock.bind(('127.0.0.1', port))
_, port = ssock.getsockname()
ssock.listen(5)
def echo(csock):
while True:
data = csock.recv(8192)
if not data:
break
time.sleep(random.random())
csock.sendall(data)
csock.shutdown(1)
def serve():
while True:
csock, client_addr = ssock.accept()
tclient = threading.Thread(target=echo, args=(csock,), daemon=True)
tclient.start()
tserve = threading.Thread(target=serve, daemon=True)
tserve.start()
return port
N_COROS = 100
nrunning = 0
async def sock_read_exactly(sock, size, loop=None):
"Read and return size bytes from sock in event-loop loop."
if loop is None: loop = asyncio.get_event_loop()
bytebuff = bytearray(size)
sofar = 0
while sofar < size:
memview = memoryview(bytebuff)[sofar:]
nread = await loop.sock_recv_into(sock, memview)
print('socket', sock.getsockname(), 'read %d bytes' % nread)
if not nread:
raise RuntimeError('Unexpected socket shutdown.')
sofar += nread
return bytebuff
async def echo_client(port):
"Send random data to echo server and test that we get back the same."
from os import urandom
global nrunning
loop = asyncio.get_event_loop()
sock = socket.socket()
sock.setblocking(False)
await loop.sock_connect(sock, ('127.0.0.1', port))
for size in [1, 64, 1024, 55555]:
sending = urandom(size)
await loop.sock_sendall(sock, sending)
received = await sock_read_exactly(sock, size)
assert received == sending
nrunning -= 1
if not nrunning:
loop.stop()
if __name__ == '__main__':
port = local_echo_server()
print('port is', port)
loop = asyncio.get_event_loop()
for _ in range(N_COROS):
loop.create_task(echo_client(port))
nrunning += 1
print('Start loop.')
loop.run_forever()

Watchdog compatibility: A workaround for "CancelIoEx"

Using the python watchdog file system events watching library I noticed that when being used under Windows Server 2003 it entered into "Polling Mode" thus stoping using asynchronous OS notification and, therefore, heavily reducing system performance under big amount of file changes.
I traced the problem to watchdog/observers/winapi.py file where CancelIoEx system call is used in order to stop ReadDirectoryChangesW call lock when the user wants to stop monitoring the watched directory or file:
(winapi.py)
CancelIoEx = ctypes.windll.kernel32.CancelIoEx
CancelIoEx.restype = ctypes.wintypes.BOOL
CancelIoEx.errcheck = _errcheck_bool
CancelIoEx.argtypes = (
ctypes.wintypes.HANDLE, # hObject
ctypes.POINTER(OVERLAPPED) # lpOverlapped
)
...
...
...
def close_directory_handle(handle):
try:
CancelIoEx(handle, None) # force ReadDirectoryChangesW to return
except WindowsError:
return
The problem with CancelIoEx call is that it is not available until Windows Server 2008:
http://msdn.microsoft.com/en-us/library/windows/desktop/aa363792(v=vs.85).aspx
One possible alternative is to change close_directory_handle in order to make it create a mock file within the monitored directory, thus unlocking the thread waiting for ReadDirectoryChangesW to return.
However, I noticed that CancelIo system call is in fact available in Windows Server 2003:
Cancels all pending input and output (I/O) operations that are issued
by the calling thread for the specified file. The function does not
cancel I/O operations that other threads issue for a file handle. To
cancel I/O operations from another thread, use the CancelIoEx
function.
But calling CancelIo won't affect the waiting thread.
Do you have any idea on how to solve this problem?
May be threading.enumerate() could be used issue a signal to be handled by each thread being CancelIo called from these handlers?
The natural approach is to implement a completion routine and call to ReadDirectoryChangesW using its overlapped mode. The following example shows the way to do that:
RDCW_CALLBACK_F = ctypes.WINFUNCTYPE(None, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD, ctypes.POINTER(OVERLAPPED))
First, create a WINFUNCTYPE factory which will be used to generate (callable from Windows API) C like functions from python methods. In this case, no return value and 3 parameters corresponding to
VOID CALLBACK FileIOCompletionRoutine(
_In_ DWORD dwErrorCode,
_In_ DWORD dwNumberOfBytesTransfered,
_Inout_ LPOVERLAPPED lpOverlapped
);
FileIOCompletionRoutine header.
The callback reference as well as the overlapped structure need to be added to ReadDirectoryChangesW arguments list:
ReadDirectoryChangesW = ctypes.windll.kernel32.ReadDirectoryChangesW
ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL
ReadDirectoryChangesW.errcheck = _errcheck_bool
ReadDirectoryChangesW.argtypes = (
ctypes.wintypes.HANDLE, # hDirectory
LPVOID, # lpBuffer
ctypes.wintypes.DWORD, # nBufferLength
ctypes.wintypes.BOOL, # bWatchSubtree
ctypes.wintypes.DWORD, # dwNotifyFilter
ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned
ctypes.POINTER(OVERLAPPED), # lpOverlapped
RDCW_CALLBACK_F # FileIOCompletionRoutine # lpCompletionRoutine
)
From here, we are ready to perform the overlapped system call.
This is a simple call bacl just usefult to test that everything works fine:
def dir_change_callback(dwErrorCode,dwNumberOfBytesTransfered,p):
print("dir_change_callback! PID:" + str(os.getpid()))
print("CALLBACK THREAD: " + str(threading.currentThread()))
Prepare and perform the call:
event_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
nbytes = ctypes.wintypes.DWORD()
overlapped_read_dir = OVERLAPPED()
call2pass = RDCW_CALLBACK_F(dir_change_callback)
hand = get_directory_handle(os.path.abspath("/test/"))
def docall():
ReadDirectoryChangesW(hand, ctypes.byref(event_buffer),
len(event_buffer), False,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(nbytes),
ctypes.byref(overlapped_read_dir), call2pass)
print("Waiting!")
docall()
If you load and execute all this code into a DreamPie interactive shell you can check the system call is done and that the callback executes thus printing the thread and pid numbers after the first change done under c:\test directory. Besides, you will notice those are the same than the main thread and process: Despite the event is raised by a separated thread, the callback runs in the same process and thread as our main program thus providing an undesired behaviour:
lck = threading.Lock()
def dir_change_callback(dwErrorCode,dwNumberOfBytesTransfered,p):
print("dir_change_callback! PID:" + str(os.getpid()))
print("CALLBACK THREAD: " + str(threading.currentThread()))
...
...
...
lck.acquire()
print("Waiting!")
docall()
lck.acquire()
This program will lock the main thread and the callback will never execute.
I tried many synchronization tools, even Windows API semaphores always getting the same behaviour so, finally, I decided to implement the ansynchronous call using the synchronous configuration for ReadDirectoryChangesW within a separate process managed and synchronized using multiprocessing python library:
Calls to get_directory_handle won't return the handle number given by windows API but one managed by winapi library, for that I implemented a handle generator:
class FakeHandleFactory():
_hl = threading.Lock()
_next = 0
#staticmethod
def next():
FakeHandleFactory._hl.acquire()
ret = FakeHandleFactory._next
FakeHandleFactory._next += 1
FakeHandleFactory._hl.release()
return ret
Each generated handle has to be globally associated with a file system path:
handle2file = {}
Each call to read_directory_changes will now generate ReadDirectoryRequest (derived from multiprocessing.Process) object:
class ReadDirectoryRequest(multiprocessing.Process):
def _perform_and_wait4request(self, path, recursive, event_buffer, nbytes):
hdl = CreateFileW(path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS,
None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None)
#print("path: " + path)
aux_buffer = ctypes.create_string_buffer(BUFFER_SIZE)
aux_n = ctypes.wintypes.DWORD()
#print("_perform_and_wait4request! PID:" + str(os.getpid()))
#print("CALLBACK THREAD: " + str(threading.currentThread()) + "\n----------")
try:
ReadDirectoryChangesW(hdl, ctypes.byref(aux_buffer),
len(event_buffer), recursive,
WATCHDOG_FILE_NOTIFY_FLAGS,
ctypes.byref(aux_n), None, None)
except WindowsError as e:
print("!" + str(e))
if e.winerror == ERROR_OPERATION_ABORTED:
nbytes = 0
event_buffer = []
else:
nbytes = 0
event_buffer = []
# Python 2/3 compat
nbytes.value = aux_n.value
for i in xrange(self.int_class(aux_n.value)):
event_buffer[i] = aux_buffer[i]
CloseHandle(hdl)
try:
self.lck.release()
except:
pass
def __init__(self, handle, recursive):
buffer = ctypes.create_string_buffer(BUFFER_SIZE)
self.event_buffer = multiprocessing.Array(ctypes.c_char, buffer)
self.nbytes = multiprocessing.Value(ctypes.wintypes.DWORD, 0)
targetPath = handle2file.get(handle, None)
super(ReadDirectoryRequest, self).__init__(target=self._perform_and_wait4request, args=(targetPath, recursive, self.event_buffer, self.nbytes))
self.daemon = True
self.lck = multiprocessing.Lock()
self.result = None
try:
self.int_class = long
except NameError:
self.int_class = int
if targetPath is None:
self.result = ([], -1)
def CancelIo(self):
try:
self.result = ([], 0)
self.lck.release()
except:
pass
def read_changes(self):
#print("read_changes! PID:" + str(os.getpid()))
#print("CALLBACK THREAD: " + str(threading.currentThread()) + "\n----------")
if self.result is not None:
raise Exception("ReadDirectoryRequest object can be used only once!")
self.lck.acquire()
self.start()
self.lck.acquire()
self.result = (self.event_buffer, self.int_class(self.nbytes.value))
return self.result
This class specifies Process providing a process which perform the system call and waits until (or):
A change event has been raised.
The main thread cancels the request by calling to the ReadDirectoryRequest object CancelIo method.
Note that:
get_directory_handle
close_directory_handle
read_directory_changes
Roles are now to manage requests. For that, thread locks and auxiliary data structures are needed:
rqIndexLck = threading.Lock() # Protects the access to `rqIndex`
rqIndex = {} # Maps handles to request objects sets.
get_directory_handle
def get_directory_handle(path):
rqIndexLck.acquire()
ret = FakeHandleFactory.next()
handle2file[ret] = path
rqIndexLck.release()
return ret
close_directory_handle
def close_directory_handle(handle):
rqIndexLck.acquire()
rqset4handle = rqIndex.get(handle, None)
if rqset4handle is not None:
for rq in rqset4handle:
rq.CancelIo()
del rqIndex[handle]
if handle in handle2file:
del handle2file[handle]
rqIndexLck.release()
And last but not least: read_directory_changes
def read_directory_changes(handle, recursive):
rqIndexLck.acquire()
rq = ReadDirectoryRequest(handle, recursive)
set4handle = None
if handle in rqIndex:
set4handle = rqIndex[handle]
else:
set4handle = set()
rqIndex[handle] = set4handle
set4handle.add(rq)
rqIndexLck.release()
ret = rq.read_changes()
rqIndexLck.acquire()
if rq in set4handle:
set4handle.remove(rq)
rqIndexLck.release()
return ret

Categories