Im trying to execute a program in a python subprocess:
class MiThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
try:
from Queue import Queue, Empty
except ImportError:
#from queue import Queue, Empty # python 3.x
print "error"
ON_POSIX = 'posix' in sys.builtin_module_names
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
p= Popen(["java -Xmx256m -jar bin/HelloWorld.jar"],cwd=r'/home/karen/sphinx4-1.0beta5-src/sphinx4-1.0beta5/',stdout=PIPE, shell=True, bufsize= 4024)
q = Queue()
t = Thread(target=enqueue_output, args=(p.stdout, q))
print "estoy en el hilo"
t.daemon = True # thread dies with the program
t.start()
print l
But when i execute the thread it fails with the following error:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 551, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/site-packages/GNS3/Workspace.py", line 65, in run
t = Thread(target=enqueue_output, args=(p.stdout, q))
NameError: global name 'Thread' is not defined
QObject::connect: Cannot queue arguments of type 'QTextCursor'
(Make sure 'QTextCursor' is registered using qRegisterMetaType().)
i dont have any idea! What is happening?
Try changing:
t = Thread(target=enqueue_output, args=(p.stdout, q))
to:
t = threading.Thread(target=enqueue_output, args=(p.stdout, q))
In your current namespace, Thread exists as threading.Thread (a member of the threading module), so when you say Thread alone, Python can't find a match and throws that error.
Related
This question already has answers here:
Using python multiprocessing Pool in the terminal and in code modules for Django or Flask
(3 answers)
Closed 5 years ago.
I want to put the generated result of the func in pool.apply_async() method into a queue, everything seems very well but the error confused me a lot.
My purpose is try to make multiple asynchronized producers(maybe not correct here) and multiple consumers.
Here is my toy example:
from multiprocessing import Pool
import multiprocessing
from threading import Thread
from six.moves import xrange
pool = Pool(processes=2, maxtasksperchild=1000)
# resp_queue = multiprocessing.Queue(1000)
manager = multiprocessing.Manager()
resp_queue = manager.Queue()
rang = 10000
def fetch_page(url):
resp_queue.put(url)
def parse_response():
url = resp_queue.get()
print(url)
r_threads = []
def start_processing():
for i in range(2):
r_threads.append(Thread(target=parse_response))
print("start %s thread.." % i)
r_threads[-1].start()
urls = map(lambda x: "this is url %s" % x, xrange(rang))
for i in xrange(rang):
pool.apply_async(fetch_page, (urls[i],))
start_processing()
pool.close()
pool.join()
The error reads that:
> Process PoolWorker-1: Process PoolWorker-2: Traceback (most recent
> call last): Traceback (most recent call last): File
> "/usr/lib/python2.7/multiprocessing/process.py", line 258, in
> _bootstrap File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
> self.run()
> self.run() File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run File
> "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
> self._target(*self._args, **self._kwargs)
> self._target(*self._args, **self._kwargs) File "/usr/lib/python2.7/multiprocessing/pool.py", line 102, in worker
> File "/usr/lib/python2.7/multiprocessing/pool.py", line 102, in worker
> task = get()
> task = get() File "/usr/lib/python2.7/multiprocessing/queues.py", line 376, in get
> File "/usr/lib/python2.7/multiprocessing/queues.py", line 376, in get
> return recv()
> return recv() AttributeError: 'module' object has no attribute 'fetch_page' AttributeError: 'module' object has no attribute
> 'fetch_page' start 0 thread.. start 1 thread..
I have read this answer but found it very strange, and this answer doesn't work on my Ubuntu machine.
Any suggestions are highly appreciated. Thanks very much.
Have a look at the code below. Changes I made to your version:
I'm using map instead of apply as it gets an iterable and splits work between workers nicely.
I've added a while loop to your parse_resp function (now get_url) so each thread will get values from queue to exhaustion.
Pool instantiation & calling is after __name__ == '__main__' which is a windows hack needed for Python multiprocessing (as much as I know, might be wrong I'm on Ubuntu).
from multiprocessing import Pool
import multiprocessing
from threading import Thread
manager = multiprocessing.Manager()
url_queue = manager.Queue()
rang = 10000
def put_url(url):
url_queue.put(url)
def get_url(thread_id):
while not url_queue.empty():
print('Thread {0} got url {1}'.format(str(thread_id), url_queue.get()))
r_threads = []
def start_threading():
for i in range(2):
r_threads.append(Thread(target=get_url, args=(i,)))
print("start %s thread.." % i)
r_threads[-1].start()
for i in r_threads:
i.join()
urls = ["url %s" % x for x in range(rang)]
if __name__ == '__main__':
pool = Pool(processes=2, maxtasksperchild=1000)
pool.map_async(put_url, urls)
start_threading()
pool.close()
pool.join()
Prints:
start 0 thread..
start 1 thread..
Thread 0 got url 0
Thread 0 got url 1
Thread 1 got url 2
Thread 0 got url 3
Thread 0 got url 4
Thread 1 got url 5
Thread 0 got url 6
I am trying to get multiprocessing and multi-threading to work together nicely. I have the following code which I have derived from the multiprocessing documentation found at https://docs.python.org/2/library/multiprocessing.html
from multiprocessing import Process, Manager
from threading import Thread
def foo():
print ("hello world")
def simple_process(threads_manager):
"""Simple process that starts Threads
"""
threads = []
for i in range(10):
t = Thread(target=foo, args=())
t.start()
t.join()
threads.append(t)
threads_manager['threads'] = threads
manager = Manager()
threads_manager = manager.dict()
p = Process(target=simple_process, args=(threads_manager, ), kwargs={})
p.start()
p.join()
threads = threads_manager.get('threads')
print (threads)
but when I run the code I get the following error.
hello world
Process Process-2:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "multiprocessing_multithreading.py", line 17, in simple_process
threads_manager['threads'] = threads
File "<string>", line 2, in __setitem__
File "/usr/lib/python2.7/multiprocessing/managers.py", line 758, in _callmethod
conn.send((self._id, methodname, args, kwds))
TypeError: can't pickle thread.lock objects
None
I'm trying to get the threads spawned by the simple_process function as a threads list.
Can someone please help?
I have a Python program that produces an error:
File "myTest.py", line 34, in run
self.output = self.p.stdout
AttributeError: RunCmd instance has no attribute 'p'
The Python code:
class RunCmd():
def __init__(self, cmd):
self.cmd = cmd
def run(self, timeout):
def target():
self.p = sp.Popen(self.cmd[0], self.cmd[1], stdin=sp.PIPE,
stdout=sp.PIPE, stderr=sp.STDOUT)
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print "process timed out"
self.p.stdin.write("process timed out")
self.p.terminate()
thread.join()
self.output = self.p.stdout #self.p.stdout.read()?
self.status = self.p.returncode
def getOutput(self):
return self.output
def getStatus(self):
return self.status
Here's the entire back trace.
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 505, in run
self.__target(*self.__args, **self.__kwargs)
File "myTest.py", line 18, in target
self.p = sp.Popen(self.cmd, stdin=PIPE,
NameError: global name 'PIPE' is not defined
Traceback (most recent call last):
File "myTest.py", line 98, in <module>
c = mydd.ddmin(deltas) # Invoke DDMIN
File "/home/DD.py", line 713, in ddmin
return self.ddgen(c, 1, 0)
File "/home/DD.py", line 605, in ddgen
outcome = self._dd(c, n)
File "/home/DD.py", line 615, in _dd
assert self.test([]) == self.PASS
File "/home/DD.py", line 311, in test
outcome = self._test(c)
File "DD.py", line 59, in _test
test.run(3)
File "DD.py", line 30, in run
self.status = self.p.returncode
AttributeError: 'RunCmd' object has no attribute 'p'
What does this error mean and what is it trying to tell me?
You didn't give all the error messages. The code in the thread fails because your call to Popen is wrong, it should be:
def target():
self.p = sp.Popen(self.cmd, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.STDOUT)
As the thread fails, it doesn't set the "p" variable, that's why you're getting the error message you're talking about.
How to reproduce this error in Python very simply:
class RunCmd():
def __init__(self):
print(self.p)
r = RunCmd()
Prints:
AttributeError: 'RunCmd' object has no attribute 'p'
What's going on:
You have to learn to read and reason about the code you are dealing with. Verbalize the code like this:
I define a class called RunCmd. It has a constructor called __init__ that takes no parameters. The constructor prints out the local member variable p.
I instantiate a new object (instance) of RunCmd class. The constructor is run, and it tries to access the value of p. No such attribute p exists, so the error message is printed.
The error message means exactly what it says. You need to create something before you can use it. If you don't, this AttributeError will be thrown.
Solutions:
Throw an error earlier on when your variable is not created.
Put the code in a try/catch to stop the program when it's not created.
Test if the variable exists before using it.
Recently in a project, I had a multiprocessing Process that crashed. A child process was supposed to do a calculation, then send it to the parent process in a Pipe. If the child crashed, the parent would freeze while reading from the pipe. Is there a 'correct' way to send data that would avoid blocking the parent forever if a child dies?
This is an example that reproduces the problem I'm having:
import multiprocessing as mp
def f(pipe):
a = 1/0
pipe.send('hola')
parent, child = mp.Pipe()
proc = mp.Process(target=f, args=(child,))
proc.start()
print "Grabbing result"
print "Result: {0}".format(parent.recv())
proc.join()
The parent process could use the connection's poll(...) method to determine if any result was forthcoming with a reasonable time limit:
import multiprocessing as mp
timelimit = 3
def f(pipe):
a = 1/0
pipe.send('hola')
parent, child = mp.Pipe()
proc = mp.Process(target=f, args=(child,))
proc.start()
print "Grabbing result"
if parent.poll(timelimit):
print "Result: {0}".format(parent.recv())
else:
print "No data available after {0} seconds...".format(timelimit)
proc.join()
When I run this code I get the following results:
Grabbing result
Process Process-1:
Traceback (most recent call last):
File "/usr/lib/python2.7/multiprocessing/process.py", line 258, in _bootstrap
self.run()
File "/usr/lib/python2.7/multiprocessing/process.py", line 114, in run
self._target(*self._args, **self._kwargs)
File "s.py", line 6, in f
a = 1/0
ZeroDivisionError: integer division or modulo by zero
No data available after 3 seconds...
For some reason I can't access the Queue.Empty exception - what am I doing wrong here?
from multiprocessing import Process, Queue
# ...
try:
action = action_queue.get(False)
print "Action: " + action
except Queue.Empty:
pass
The stack trace:
Traceback (most recent call last):
File "C:\Program Files\Python27\lib\multiprocessing\process.py", line 258,
in _bootstrap
self.run()
File "C:\Program Files\Python27\lib\multiprocessing\process.py", line 114,
in run
self._target(*self._args, **self._kwargs)
File "D:\Development\populate.py", line 39, in permutate
except Queue.Empty: AttributeError: 'function' object has no attribute 'Empty'
The Queue.Empty exception is in the Queue module, not in the multiprocessing.queues.Queue class. The multiprocessing module actually uses the Queue (module) Empty exception class:
from multiprocessing import Queue
from Queue import Empty
q = Queue()
try:
q.get( False )
except Empty:
print "Queue was empty"
If you want to be very explicit and verbose, you can do this:
import multiprocessing
import Queue
q = multiprocessing.Queue()
try:
q.get( False )
except Queue.Empty:
print "Queue was empty"
Favoring the former approach is probably a better idea because there is only one Queue object to worry about and you don't have to wonder if you are working with the class or the module as in my second example.