recently i have been working with Pipe and raspberry pi. I am trying to send a signal to my function to kill it however the "pipe.recv" is blocking the function. The signal is sent however the while loop doesnt get executed.
from multiprocessing import Process, Pipe
import time
import os
import signal
def start(pipe):
pipe1 = pipe[1].recv()
while True:
print('hello world')
os.kill(pipe1,signal.SIGTERM)
if __name__ == "__main__":
conn1 = Pipe()
a = Process(target = start,args = (conn1,))
a.start()
time.sleep(5)
print("TIMES UP")
conn1[1].send(a.pid)
You are sending, and attempting to retrieve the item from the same end of the pipe. Try this, where pipe[0] and pipe[1] are named to parent and child, for readability, instead:
from multiprocessing import Process, Pipe
import time
import os
import signal
def start(child):
pipe1 = child.recv()
while True:
print('hello world')
os.kill(pipe1,signal.SIGTERM)
if __name__ == "__main__":
parent, child = Pipe()
a = Process(target = start,args = (child,))
a.start()
time.sleep(5)
print("TIMES UP")
parent.send(a.pid)
Related
I have two independent processes on the same machine in need of IPC. As of now, I have this working solution:
server.py
#!/usr/bin/python3
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
def do_whatever():
print('function do whatever, triggered by xyz')
# do something
def start_queue_server(q):
class QueueManager(BaseManager): pass
QueueManager.register('get_queue', callable=lambda:q)
m = QueueManager(address=('', 55555), authkey=b'tuktuktuk')
s = m.get_server()
s.serve_forever()
def main():
queue = Queue()
proc = Process(target=start_queue_server, args=(queue,))
proc.start()
while True:
command = queue.get()
print('command from queue:', command)
if command == 'xyz':
do_whatever()
# many more if, elif, else statements
if __name__ == "__main__":
main()
client.py
#!/usr/bin/python3
from multiprocessing.managers import BaseManager
def communicator(command):
class QueueManager(BaseManager): pass
QueueManager.register('get_queue')
m = QueueManager(address=('', 55555), authkey=b'tuktuktuk')
m.connect()
queue = m.get_queue()
queue.put(command)
def main():
command = ('xyz')
communicator(command)
if __name__ == "__main__":
main()
Is there a more elegant way to call 'do_whatever' than parsing the commands passed on by the queue and then calling the target function?
Can I somehow pass on a reference to 'do_whatever' and call it directly from the client?
How is an answer from the server, e.g. True or False, communicated to the client? I tried passing a shared variable instead of a queue object but failed. Do I need to open another connection using a second socket to pass the answer?
I read the python documentation but couldn't find more options for unrelated processes. Inputs would be welcome!
Cheers singultus
Finally, I settled for an additional Listener
server.py
#!/usr/bin/python3
from multiprocessing.managers import BaseManager
from multiprocessing import Process, Queue
from multiprocessing.connection import Client
def do_whatever():
print('function do whatever, triggered by xyz')
# do something
def start_queue_server(q):
class QueueManager(BaseManager): pass
QueueManager.register('get_queue', callable=lambda:q)
m = QueueManager(address=('', 55555), authkey=b'tuktuktuk')
s = m.get_server()
s.serve_forever()
def talkback(msg, port):
conn = Client(address=('', port), authkey=b'tuktuktuk')
conn.send(msg)
conn.close()
def main():
queue = Queue()
proc = Process(target=start_queue_server, args=(queue,))
proc.start()
while True:
command = queue.get()
print('command from queue:', command)
if command[0] == 'xyz':
do_whatever()
talkback('aaa', command[1])
# many more if, elif, else statements
if __name__ == "__main__":
main()
client.py
#!/usr/bin/python3
from multiprocessing.managers import BaseManager
from multiprocessing.connection import Listener
def communicator(command, talkback=False):
if talkback:
listener = Listener(address=('', 0), authkey=b'prusaprinter')
return_port = listener.address[1]
command = command + (return_port,)
class QueueManager(BaseManager): pass
QueueManager.register('get_queue')
m = QueueManager(address=('', 55555), authkey=b'tuktuktuk')
m.connect()
queue = m.get_queue()
queue.put(command)
if talkback:
conn = listener.accept()
server_return = conn.recv()
conn.close()
listener.close()
return server_return
def main():
command = ('xyz')
communicator(command, True)
if __name__ == "__main__":
main()
The client opens an available port and starts listening on it. It then sends the command to the server together with the aforementioned port number. The server executes the command, then uses the port number to report back to the client. After receiving the answer, the client closes the port.
i have written a NMap-TCP-Port-Scanner in python and everything works just fine except that i'm no longer able to see what i'm writing on the terminal.
First things first.
The code:
import argparse, nmap, sys
from threading import *
def initParser():
parser = argparse.ArgumentParser()
parser.add_argument("tgtHost", help="Specify target host")
parser.add_argument("tgtPort", help="Specify target port")
args = parser.parse_args()
return (args.tgtHost,args.tgtPort.split(","))
def nmapScan(tgtHost, tgtPorts):
nm = nmap.PortScanner()
lock = Semaphore(value=1)
for tgtPort in tgtPorts:
t = Thread(target=nmapScanThread, args=(tgtHost, tgtPort, lock, nm))
t.start()
def nmapScanThread(tgtHost, tgtPort, lock, nm):
nm.scan(tgtHost, tgtPort)
state = nm[tgtHost]['tcp'][int(tgtPort)]['state']
lock.acquire()
print("Port {} is {}".format(tgtPort, state))
lock.release()
if __name__ == '__main__':
(tgtHost, tgtPorts) = initParser()
nmapScan(tgtHost, tgtPorts)
sys.exit(0)
So, after i have run the script i don't see what i'm typing on the console anymore, but i can still execute my invisible commands. As you can see i want to start a thread for each port just because i am learning about threading right now.
My assumption is that not all threads are terminated properly because everthing works just fine after i have added "t.join()" to the code.
Unfortunately i couldn't manage to find anything about this issue.
Just like this:
import argparse, nmap, sys
from threading import *
def initParser():
parser = argparse.ArgumentParser()
parser.add_argument("tgtHost", help="Specify target host")
parser.add_argument("tgtPort", help="Specify target port")
args = parser.parse_args()
return (args.tgtHost,args.tgtPort.split(","))
def nmapScan(tgtHost, tgtPorts):
nm = nmap.PortScanner()
lock = Semaphore(value=1)
for tgtPort in tgtPorts:
t = Thread(target=nmapScanThread, args=(tgtHost, tgtPort, lock, nm))
t.start()
t.join()
def nmapScanThread(tgtHost, tgtPort, lock, nm):
nm.scan(tgtHost, tgtPort)
state = nm[tgtHost]['tcp'][int(tgtPort)]['state']
lock.acquire()
print("Port {} is {}".format(tgtPort, state))
lock.release()
if __name__ == '__main__':
(tgtHost, tgtPorts) = initParser()
nmapScan(tgtHost, tgtPorts)
sys.exit(0)
Is it the proper way to handle this problem or did i mess things up a bit?
Additionally:
I cannot see the join() method as useful in this example because i don't think that there is any major difference to the same script without threading
here is a example:
from multiprocessing import Process
import time
def func():
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
print('done')
what I expect is that the main process will terminate right after it start a subprocess. But after printing out 'done', the terminal is still waiting....Is there any way to do this so that the main process will exit right after printing out 'done', instead of waiting for subprocess? I'm confused here because I'm not calling p.join()
Python will not end if there exists a non-daemon process.
By setting, daemon attribute before start() call, you can make the process daemonic.
p = Process(target=func)
p.daemon = True # <-----
p.start()
print('done')
NOTE: There will be no sub process finished message printed; because the main process will terminate sub-process at exit. This may not be what you want.
You should do double-fork:
import os
import time
from multiprocessing import Process
def func():
if os.fork() != 0: # <--
return # <--
print('sub process is running')
time.sleep(5)
print('sub process finished')
if __name__ == '__main__':
p = Process(target=func)
p.start()
p.join()
print('done')
Following the excellent answer from #falsetru, I wrote out a quick generalization in the form of a decorator.
import os
from multiprocessing import Process
def detachify(func):
"""Decorate a function so that its calls are async in a detached process.
Usage
-----
.. code::
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
"""
# create a process fork and run the function
def forkify(*args, **kwargs):
if os.fork() != 0:
return
func(*args, **kwargs)
# wrapper to run the forkified function
def wrapper(*args, **kwargs):
proc = Process(target=lambda: forkify(*args, **kwargs))
proc.start()
proc.join()
return
return wrapper
Usage (copied from docstring):
import time
#detachify
def f(message):
time.sleep(5)
print(message)
f('Async and detached!!!')
Or if you like,
def f(message):
time.sleep(5)
print(message)
detachify(f)('Async and detached!!!')
I have a python script that spawns a new Process using multiprocessing.Process class. This process is supposed to run forever to monitor stuff. On Unix I can now use os.kill() to send a signal to that specific process and signal.signal(...) within that process to implement my specific interrupt handler. On Windows things don't work.
I read how to do it using popen. Can I specify the CREATE_NEW_PROCESS_GROUP flag for the Process class also? and How?
here is my example code:
import multiprocessing as mp
import time
import signal
import os
import platform
def my_h(signal, frame):
print("recieved signal", signal)
raise InterruptedError
def a_task():
signal.signal(signal.SIGINT, my_h)
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except Exception as e:
print(type(e), e)
print("'a_task' is at end")
if __name__ == '__main__':
p = mp.Process(target=a_task)
p.start()
time.sleep(1)
if platform.system() == 'Windows':
print("send CTRL_C_EVENT")
os.kill(p.pid, signal.CTRL_C_EVENT)
elif platform.system() == 'Linux':
print("send SIGINT")
os.kill(p.pid, signal.SIGINT)
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass
I found a workaround, sorta implementing signaling using multiprocessing.Event class.
The clue was then to find interrupt_main() method (which is in either thread (Python2) or _thread (Python3)) which raises KeybordInterrupt in the main thread, which is the process I want to interrupt.
import multiprocessing as mp
import time
import signal
import os
import threading
import _thread
def interrupt_handler(interrupt_event):
print("before wait")
interrupt_event.wait()
print("after wait")
_thread.interrupt_main()
def a_task(interrupt_event, *args):
task = threading.Thread(target=interrupt_handler, args=(interrupt_event,))
task.start()
print("this is 'a_task'", os.getpid())
try:
while True:
print(time.time())
time.sleep(1)
except KeyboardInterrupt:
print("got KeyboardInterrupt")
print("'a_task' is at end")
if __name__ == '__main__':
interrupt_event = mp.Event()
p = mp.Process(target=a_task, args = (interrupt_event, tuple()))
p.start()
time.sleep(2)
print("set interrupt_event")
interrupt_event.set()
time.sleep(3)
try:
os.kill(p.pid, signal.SIGTERM)
except:
pass
Problem: I expect child to time out and be done. but instead it times out and begins to run again.
Can anyone tell me why this program runs forever? I expect it to run one time and exit...
Here is a working program. Master threads a function to spawn a child. Works great except it ends up looping.
Here is the master:
# master.py
import multiprocessing, subprocess, sys, time
def f():
p = subprocess.Popen(["C:\\Python32\\python.exe", "child.py"])
# wait until child ends and check exit code
while p.poll() == None:
time.sleep(2)
if p.poll() != 0:
print("something went wrong with child.py")
# multithread a function process to launch and monitor a child
p1 = multiprocessing.Process(target = f())
p1.start()
and the child:
# child.py
import socket, sys
def main(args):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.settimeout(10)
sock.bind(('', 54324))
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
print(data)
sock.close()
return 0
except KeyboardInterrupt as e:
try:
sock.close()
return 0
except:
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
The problem is that your master.py doesn't have an if __name__ == '__main__' guard. On Windows, multiprocessing has to be able to reimport the main module in the child process, and if you don't use this if guard, you will re-execute the multiprocessing.Process in the child (resulting in an accidental forkbomb).
To fix, simply put all of the commands in master.py in the if guard:
if __name__ == '__main__':
# multithread a function process to launch and monitor a child
p1 = multiprocessing.Process(target = f())
p1.start()