Is it possible to spawn multiple processes from a single thread? Or is it a proper design to implement?
My code sample is -
def run_all_tasks(self):
for platform in self._platforms:
task_thread = threading.Thread(
target=self._run_task_list, args=(
self._get_tasks(),platform,))
taskset_threads.append(task_thread)
for taskset_thread in taskset_threads:
taskset_thread.start()
for taskset_thread in taskset_threads:
taskset_thread.join()
def _run_task_list(self, tasklist, platform):
try:
test_case_name = task.__class__.__name__
try:
test_case_name = task._get_test_case_name()
except:
test_case_name = task.__class__.__name__
pass
max_runtime = task.get_max_runtime()
manager = Manager()
self._shared_mem = manager.dict()
for task in tasklist:
task_proc = Process(
target=self.proc_setup,
args=(task, self, self._shared_mem))
task_proc.start()
task_proc.join(max_runtime)
This works however, sometimes it gives following error -
Traceback (most recent call last):
File "C:\wor\lib\TaskSet.py", line 430, in _run_task_list
if "warning" in self._shared_mem:
File "<string>", line 2, in __contains__
File "C:\Python27\lib\multiprocessing\managers.py", line 755, in _callmethod
self._connect()
File "C:\Python27\lib\multiprocessing\managers.py", line 742, in _connect
conn = self._Client(self._token.address, authkey=self._authkey)
File "C:\Python27\lib\multiprocessing\connection.py", line 167, in Client
c = PipeClient(address)
File "C:\Python27\lib\multiprocessing\connection.py", line 387, in PipeClient
win32.WaitNamedPipe(address, 1000)
WindowsError: [Error 2] The system cannot find the file specified
This can also be seen on linux platform.
Related
I'm trying to add multiprocessing to my tkinter app and I've been having issues with the error: TypeError: cannot pickle '_tkinter.tkapp' object. I had a look at the solution proposed in the question here and tried to implement my own version of it and this appears to have solved this particular error but now I instead I have constant OSError: [Errno 22] Invalid argument:
What I aspire to have the code do is that some calculation is being performed in the background and results of this calculation are being put into the Queue (here just integers but will be Numpy arrays in the actual code). The GUI application then displays some statistics and results to the user.
from multiprocessing import Process, Queue
from queue import Empty
import tkinter as tk
from tkinter import Tk
class FooUI(Process):
def __init__(self, q: Queue):
super().__init__(target=self, args=(q,))
self.queue = q
self.duh = []
self.root = Tk()
self._create_interface()
self.root.after(100, self._check_queue)
self.root.mainloop()
def _check_queue(self):
try:
out = self.queue.get_nowait()
if out:
self.duh.append(out)
print(self.duh)
return
except Empty:
pass
self.root.after(100, self._check_queue)
def _create_interface(self):
self.root.geometry("100x100")
b = tk.Button(self.root, text='Start', command=self.calc)
b.grid(row=0, column=0)
def calc(self):
p = Process(target=do_calc)
p.start()
def do_calc(q: Queue):
for i in range(20):
q.put(i**2)
If __name__ == '__main__':
q = Queue()
f = FooUI(q)
f.start()
And here is the traceback:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 264, in run_path
code, fname = _get_code_from_file(run_name, path_name)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 234, in _get_code_from_file
with io.open_code(decoded_path) as f:
OSError: [Errno 22] Invalid argument: 'C:\\python\\block_model_variable_imputer\\<input>'
Traceback (most recent call last):
File "<input>", line 3, in <module>
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\context.py", line 224, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle '_tkinter.tkapp' object
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 264, in run_path
code, fname = _get_code_from_file(run_name, path_name)
File "C:\Users\cherp2\AppData\Local\Programs\Python\Python38\lib\runpy.py", line 234, in _get_code_from_file
with io.open_code(decoded_path) as f:
OSError: [Errno 22] Invalid argument: 'C:\\python\\block_model_variable_imputer\\<input>'
I've been trying for a while to get it to work. Any help will be greatly appreciated!
You do the subclass of Process() in a wrong way. You need to override the run() method instead of passing target option.
from multiprocessing import Process, Queue
from queue import Empty
import tkinter as tk
class FooUI(Process):
def __init__(self, q: Queue):
super().__init__() # don't pass target and args options
self.queue = q
self.duh = []
# override run() method and create the Tk() inside the function
def run(self):
self.root = tk.Tk()
self._create_interface()
self.root.after(100, self._check_queue)
self.root.mainloop()
def _check_queue(self):
try:
out = self.queue.get_nowait()
if out:
self.duh.append(out)
print(self.duh)
#return
except Empty:
pass
self.root.after(100, self._check_queue)
def _create_interface(self):
self.root.geometry("100x100")
b = tk.Button(self.root, text='Start', command=self.calc)
b.grid(row=0, column=0)
def calc(self):
if self.queue.empty():
self.duh.clear()
p = Process(target=do_calc, args=[self.queue]) # pass self.queue to do_calc()
p.start()
def do_calc(q: Queue):
for i in range(20):
q.put(i**2)
if __name__ == '__main__':
q = Queue()
f = FooUI(q)
f.start()
I wanted to use dbus in the raspberry pi with this simple script
bus = dbus.SystemBus()
obj = bus.get_object('org.bluez', '/')
print "object"
print obj
manager = dbus.Interface(obj,'org.bluez.Manager')
obj = bus.get_object('org.bluez',manager.DefaultAdapter())
print "Manager"
print manager
print "object"
print obj
But when i am try to run that code i am getting
Traceback (most recent call last):
File "/home/pi/Desktop/hangul-recog/tools/DisableICT.py", line 350, in <module>
ge = Paint()
File "/home/pi/Desktop/hangul-recog/tools/DisableICT.py", line 125, in __init__
self.test()
File "/home/pi/Desktop/hangul-recog/tools/DisableICT.py", line 280, in test
obj = bus.get_object('org.bluez',manager.DefaultAdapter())
File "/usr/lib/python2.7/dist-packages/dbus/proxies.py", line 70, in __call__
return self._proxy_method(*args, **keywords)
File "/usr/lib/python2.7/dist-packages/dbus/proxies.py", line 145, in __call__
**keywords)
File "/usr/lib/python2.7/dist-packages/dbus/connection.py", line 651, in call_blocking
message, timeout)
DBusException: org.freedesktop.DBus.Error.UnknownMethod: Method "DefaultAdapter" with signature "" on interface "org.bluez.Manager" doesn't exist
I am using bluez 5.43 version. I have checked for the solution but no luck.
What should i do?
Thanks in advance.
I had the same issue with org.bluez.Manager
There is also org.freedesktop.DBus.ObjectManager. This should get you those objects (from https://github.com/Douglas6/blueplayer/blob/master/blueplayer.py):
import dbus
SERVICE_NAME = "org.bluez"
OBJECT_IFACE = "org.freedesktop.DBus.ObjectManager"
ADAPTER_IFACE = SERVICE_NAME + ".Adapter1"
DEVICE_IFACE = SERVICE_NAME + ".Device1"
PROPERTIES_IFACE = "org.freedesktop.DBus.Properties"
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez", "/"), "org.freedesktop.DBus.ObjectManager")
objects = manager.GetManagedObjects()
for path, ifaces in objects.iteritems():
adapter = ifaces.get(ADAPTER_IFACE)
if adapter is None:
continue
obj = bus.get_object(SERVICE_NAME, path)
adapter = dbus.Interface(obj, ADAPTER_IFACE)
Can anyone suggest a Python client for AWS Redis Cluster enabled?
I'm using redis-py-cluster, but it fails:
Sample code:
from rediscluster import StrictRedisCluster
startup_nodes = [{"host": "xxxx.clustercfg.apn2.cache.amazonaws.com", "port": "6379"}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
r.set('foo', 'bar')
value = r.get('foo')
======
Exception:
Traceback (most recent call last):
File "testRedisCluster.py", line 11, in
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
File "/Library/Python/2.7/site-packages/rediscluster/client.py", line 181, in init
**kwargs
File "/Library/Python/2.7/site-packages/rediscluster/connection.py", line 141, in init
self.nodes.initialize()
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 228, in initialize
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in cluster_require_full_coverage
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 267, in node_require_full_coverage
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
File "/Library/Python/2.7/site-packages/redis/client.py", line 715, in config_get
return self.execute_command('CONFIG GET', pattern)
File "/Library/Python/2.7/site-packages/redis/client.py", line 668, in execute_command
return self.parse_response(connection, command_name, **options)
File "/Library/Python/2.7/site-packages/redis/client.py", line 680, in parse_response
response = connection.read_response()
File "/Library/Python/2.7/site-packages/redis/connection.py", line 629, in read_response
raise response
redis.exceptions.ResponseError: unknown command 'CONFIG'
I'm using redis-py-cluster 1.3.4.
Any idea?
Change the parameter skip_full_coverage_check=False to skip_full_coverage_check=True
I am trying to use my other cores in my python program. And the following is the basic structure/logic of my code:
import multiprocessing as mp
import pandas as pd
import gc
def multiprocess_RUN(param):
result = Analysis_Obj.run(param)
return result
class Analysis_Obj():
def __init__(self, filename):
self.DF = pd.read_csv(filename)
return
def run_Analysis(self, param):
# Multi-core option
pool = mp.Pool(processes=1)
run_result = pool.map(multiprocess_RUN, [self, param])
# Normal option
run_result = self.run(param)
return run_result
def run(self, param):
# Let's say I have written a function to count the frequency of 'param' in the target file
result = count(self.DF, param)
return result
if __name__ == "__main__":
files = ['file1.csv', 'file2.csv']
params = [1,2,3,4]
results = []
for i in range(0,len(files)):
analysis = Analysis_Obj(files[i])
for j in range(0,len(params)):
result = analysis.run_Analysis(params[j])
results.append(result)
del result
del analysis
gc.collect()
If I comment out the 'Multi-core option' and run the 'Normal option' everything runs fine. But even if I run the 'Multi-core option' with processes=1 I get a Memory Error when my for loop starts on the 2nd file. I have deliberately set it up so that I create and delete an Analysis object in each for loop, so that the file that has been processed will be cleared from memory. Clearly this hasn't worked. Advice of how to get around this would be very much appreciated.
Cheers
EDIT:
Here is the error message I have in the terminal:
Exception in thread Thread-7:
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 754, in run
self.__target(*self.__args, **self.__kwargs)
File "/usr/lib/python2.7/multiprocessing/pool.py", line 326, in _handle_workers
pool._maintain_pool()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 230, in _maintain_pool
self._repopulate_pool()
File "/usr/lib/python2.7/multiprocessing/pool.py", line 223, in _repopulate_pool
w.start()
File "/usr/lib/python2.7/multiprocessing/process.py", line 130, in start
self._popen = Popen(self)
File "/usr/lib/python2.7/multiprocessing/forking.py", line 121, in __init__
self.pid = os.fork()
OSError: [Errno 12] Cannot allocate memory
I'm new in Python 3. I use aiohttp module for Python 3.5.
When I run my project, I have a following error
TypeError: an integer is required (got type str)
The stack-trace is:
Traceback (most recent call last):
File "/home/santi/tesis/tanner/server.py", line 82, in <module>
srv = loop.run_until_complete(f)
File "/usr/lib/python3.5/asyncio/base_events.py", line 373, in run_until_complete
return future.result()
File "/usr/lib/python3.5/asyncio/futures.py", line 274, in result
raise self._exception
File "/usr/lib/python3.5/asyncio/tasks.py", line 240, in _step
result = coro.send(None)
File "/usr/lib/python3.5/asyncio/base_events.py", line 949, in create_server
sock.bind(sa)
The code is:
if __name__ == '__main__':
loop = asyncio.get_event_loop()
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75),'0.0.0.0','8090')
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
What is the error in my code?
What am I doing wrong?
The port number should be an Integer:
f = loop.create_server(
lambda: HttpRequestHandler(debug=False, keep_alive=75), '0.0.0.0', 8090)