Getting different outputs on Gitbash and VScode Terminal - python

Below is the smallest reproducible example I could come up with. In the main function, first an object of serverUDP class is created and with the use of threading a function run is called which also creates another thread to call another function RecvData. Problem is the main thread is not printing port value until the program is stopped with ctrl + C. Cannot understand why is this happening.
import socket, simpleaudio as sa
import threading, queue
from threading import Thread
import time
class ServerUDP:
def __init__(self):
while 1:
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind(('127.0.0.1', 0))
self.clients = set()
self.recvPackets = queue.Queue()
break
except:
print("Couldn't bind to that port")
def get_ports(self):
return self.s.getsockname()
def RecvData(self, name, delay, run_event):
while run_event.is_set():
time.sleep(delay)
pass
def run(self, name, delay, run_event):
threading.Thread(target=self.RecvData, args = ("bob",d1,run_event)).start() #separate thread for listening to the clients
while run_event.is_set():
time.sleep(delay)
pass
self.s.close()
def close(self):
self.s.close()
if __name__ == "__main__":
roomserver = ServerUDP()
run_event = threading.Event()
run_event.set()
d1 = 1
t = Thread(target= roomserver.run, args = ("bob",d1,run_event))
t.start()
port = roomserver.get_ports()[1]
print("port is", port)
try:
while 1:
time.sleep(.1)
except KeyboardInterrupt:
print("attempting to close threads. Max wait =",d1)
run_event.clear()
t.join()
print("threads successfully closed")
UPD: I'm on windows platform and was using VScode editor for coding and Git Bash terminal to run this program. I just ran this on VScode terminal and magically it was giving the port number. Is this a known issue in Git Bash terminal?
Adding VScode and Git Bash tags to know something about it.

Related

Stop child thread from main thread in python

I'm starting a webserver in a new thread. After all tests are run I want to kill the child thread with running server inside. The only one solution is interrupting entire process with all threads inside by calling "os.system('kill %d' % os.getpid())" (see the code below). I'm not sure it's the smartest solution. I'm not sure all threads will be killed after all. Could I send some kind of "Keyboard interrupt" signal to stop the thread before exiting main thread?
import http
import os
import sys
import unittest
import time
import requests
import threading
from addresses import handle_get_addresses, load_addresses
from webserver import HTTPHandler
def run_in_thread(fn):
def run(*k, **kw):
t = threading.Thread(target=fn, args=k, kwargs=kw)
t.start()
return t
return run
#run_in_thread
def start_web_server():
web_host = 'localhost'
print("starting server...")
web_port = 8808
httpd = http.server.HTTPServer((web_host, web_port), HTTPHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
class TestAddressesApi(unittest.TestCase):
WEB_SERVER_THREAD: threading.Thread = None
#classmethod
def setUpClass(cls):
cls.WEB_SERVER_THREAD = start_web_server()
pass
#classmethod
def tearDownClass(cls):
print("shutting down the webserver...")
# here someting like cls.WEB_SERVER_THREAD.terminate()
# instead of line below
os.system('kill %d' % os.getpid())
def test_get_all_addresses(self):
pass
def test_1(self):
pass
if __name__ == "__main__":
unittest.main()
Maybe threading.Event is you wanted.
Just found a solution. Daemon Threads stop executing when main thread stops working

How to "listen" to a multiprocessing queue in Python

I will start with the code, I hope it is simple enough:
import Queue
import multiprocessing
class RobotProxy(multiprocessing.Process):
def __init__(self, commands_q):
multiprocessing.Process.__init__(self)
self.commands_q = commands_q
def run(self):
self.listen()
print "robot started"
def listen(self):
print "listening"
while True:
print "size", self.commands_q.qsize()
command = self.commands_q.get()
print command
if command is "start_experiment":
self.start_experiment()
elif command is "end_experiment":
self.terminate_experiment()
break
else: raise Exception("Communication command not recognized")
print "listen finished"
def start_experiment(self):
#self.vision = ds.DropletSegmentation( )
print "start experiment"
def terminate_experiment(self):
print "terminate experiment"
if __name__ == "__main__":
command_q = Queue.Queue()
robot_proxy = RobotProxy( command_q )
robot_proxy.start()
#robot_proxy.listen()
print "after start"
print command_q.qsize()
command_q.put("start_experiment")
command_q.put("end_experiment")
print command_q.qsize()
raise SystemExit
So basically I launch a process, and I want this process to listen to commands put on the Queue.
When I execute this code, I get the following:
after start
0
2
listening
size 0
it seems that I am not sharing the Queue properly, or that I am doing any other error. The program gets stuck forever in that "self.commands_q.get() when in theory the queue has 2 elements
You need to use multiprocessing.Queue instead of Queue.Queue in order to have the Queue object be shared across processes.
See here: Multiprocessing Queues

How to close the thread and exit to the command line?

I have a very simple example, it prints out the names, but the problem is, when I press ctrl+C, the program doesn't return to the normal command line interface:
^CStopping
After I only see my cursor blinking, but I can't do anything, so I have to close the window and open it up again.
I'm running Ubuntu 12.10.
that's my code:
import threading
import random
import time
import Queue
import urllib2
import sys
queue = Queue.Queue()
keep_running = True
class MyThread(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
self.names = ['Sophia', 'Irina', 'Tanya', 'Cait', 'Jess']
def run(self):
while keep_running:
time.sleep(0.25)
line = self.names[random.randint(0,len(self.names)-1)]
queue.put(line)
self.queue.task_done()
class Starter():
def __init__(self):
self.queue = queue
t = MyThread(self.queue)
t.start()
self.next()
def next(self):
while True:
time.sleep(0.2)
if not self.queue.empty():
line = self.queue.get()
print line, self.queue.qsize()
else:
print 'waiting for queue'
def main():
try:
Starter()
queue.join()
except KeyboardInterrupt, e:
print 'Stopping'
keep_running = False
sys.exit(1)
main()
Your main problem is that you didn't declare keep_running as global, so main is just creating a local variable with the same name.
If you fix that, it will usually exit on some platforms.
If you want it to always exit on all platforms, you need to do two more things:
join the thread that you created.
protect the shared global variable with a Lock or other sync mechanism.
However, a shared global keep_running flag isn't really needed here anyway. You've already got a queue. Just define a special "shutdown" message you can post on the queue, or use closing the queue as a signal to shutdown.
While we're at it, unless you're trying to simulate a slow network or something, there is no need for that time.sleep in your code. Just call self.queue.get(timeout=0.2). That way, instead of always taking 0.2 seconds to get each entry, it will take up to 0.2 seconds, but as little as 0 if there's already something there.
Your main thread is stuck in Starter.next. The interrupt then is called there and propagates up to the first line of the try statement and is caught, jumping to the except clause before join can be called. Try putting the join call in a finally block (with the sys.exit) or simply moving it to th exception handler

Multiprocessing python-server creates too many temp-directories

I'm trying to implement a server in python3.3 that has a separate thread preloaded to do all the processing for the incoming connections.
from multiprocessing import Process, Pipe, Queue
from multiprocessing.reduction import reduce_socket
import time
import socketserver,socket
def process(q):
while 1:
fn,args = q.get()
conn = fn(*args)
while conn.recv(1, socket.MSG_PEEK):
buf = conn.recv(100)
if not buf: break
conn.send(b"Got it: ")
conn.send(buf)
conn.close()
class MyHandler(socketserver.BaseRequestHandler):
def handle(self):
print("Opening connection")
print("Processing")
self.server.q.put(reduce_socket(self.request))
while self.request.recv(1, socket.MSG_PEEK):
time.sleep(1)
print("Closing connection")
class MyServer(socketserver.ForkingTCPServer):
p = Process
q = Queue()
parent_conn,child_conn = Pipe()
def __init__(self,server_address,handler):
socketserver.ForkingTCPServer.__init__(self,server_address, handler)
self.p = Process(target=process,args=(self.q,))
self.p.start()
def __del__(self):
self.p.join()
server_address = ('',9999)
myserver = MyServer(server_address,MyHandler)
myserver.serve_forever()
I can test that it works using the following script:
from multiprocessing.reduction import reduce_socket
import time
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', 9999))
time.sleep(1)
print("reduce_socket(s)")
fn,args = reduce_socket(s)
time.sleep(1)
print("rebuild_socket(s)")
conn = fn(*args)
time.sleep(1)
print("using_socket(s)")
conn.send("poks")
print conn.recv(255)
conn.send("poks")
print conn.recv(255)
conn.send("")
print conn.recv(255)
conn.close()
Unfortunately there seems to be something that is wrong since after running the test for n times, my tmp-folder is filled with subfolders:
$ ls /tmp/pymp*|wc -l
32000
These temporary files are created by socket_reduce(). Interestingly the rebuild/reduce_socket() in the client also creates the temporary files, but they are removed once the function exits. The maximum amount of folders in my current tmp-filesystem is 32000 which causes a problem. I could remove the /tmp/pymp*-files by hand or somewhere in the server, but I guess there should also be the correct way to do this. Can anyone help me with this?
Okay, Kind of fixed it. From the ../lib/python3.3/multiprocessing/util.py:
$ grep "def get_temp_dir" -B5 /usr/local/lib/python3.3/multiprocessing/util.py
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
It seems that the temporary directory should be available until the process quits. Since my process() and main() both run forever, the temporary file won't be removed. To fix it I can create another process that will hand the reduced_socket to the process():
def process(q):
while 1:
fn,args = q.get()
conn = fn(*args)
while conn.recv(1, socket.MSG_PEEK):
buf = conn.recv(100)
if not buf: break
conn.send(b"Got it: ")
conn.send(buf)
conn.close()
q.put("ok")
class MyHandler(socketserver.BaseRequestHandler):
def socket_to_process(self,q):
q.put(reduce_socket(self.request))
q.get()
def handle(self):
p = Process(target=self.socket_to_process,args=(self.server.q,))
p.start()
p.join()
This way the temporary file is created in a subprocess that will exit once the process() has done its thing with the input. I don't think this is an elegant way of doing it but it works. If someone knows better, please let stackoverflow know.

unable to create a thread in python

I have following code which compares user input
import thread,sys
if(username.get_text() == 'xyz' and password.get_text()== '123' ):
thread.start_new_thread(run,())
def run():
print "running client"
start = datetime.now().second
while True:
try:
host ='localhost'
port = 5010
time = abs(datetime.now().second-start)
time = str(time)
print time
client = socket.socket()
client.connect((host,port))
client.send(time)
except socket.error:
pass
If I just call the function run() it works but when I try to create a thread to run this function, for some reason the thread is not created and run() function is not executed I am unable to find any error..
Thanks in advance...
you really should use the threading module instead of thread.
what else are you doing? if you create a thread like this, then the interpreter will exit no matter if the thread is still running or not
for example:
import thread
import time
def run():
time.sleep(2)
print('ok')
thread.start_new_thread(run, ())
--> this produces:
Unhandled exception in thread started by
sys.excepthook is missing
lost sys.stderr
where as:
import threading
import time
def run():
time.sleep(2)
print('ok')
t=threading.Thread(target=run)
t.daemon = True # set thread to daemon ('ok' won't be printed in this case)
t.start()
works as expected. if you don't want to keep the interpreter waiting for the thread, just set daemon=True* on the generated Thread.
*edit: added that in example
thread is a low level library, you should use threading.
from threading import Thread
t = Thread(target=run, args=())
t.start()

Categories