Python, multi-processing and memory : how does it work? - python

I'm working "for fun" on a dash app that compares execution time of different algorithms.
Here, I want to compare list sorting algorithms, and in this particular case, I'll be talking about the recursive merging algo.
The app works as follows: generate a set of random lists with chosen lengths, chose an algo, run the tests. Using concurrent.futures.ProcessPoolExecutor, all lists are sorted at the same time in a different process. I've tried with up to 150 lists and it works fine.
Then, I wanted the merging algo to use 2 processes if the list is bigger than 1024 element, which is also working fine... until I have more than 4 lists. And if I run the test with like 100 lists of length under 1024 and add only one list of a length that will make it use 2 processes to be sorted, it won't work and will return a memory error.
Also, I know about the multiprocessing.cpu_count() function but that doesn't seem to be of any use here? It says I have 12 cpu, but will run 150 processes at the same time and crash for 5 * 2 sub-processes.
So, could anyone explain this to me? I have 32Go of ram and we're talking about 5 lists of 2000 int so...
Edit: Adding the code I used
Each generated list is stored as a Data object:
class Data:
def __init__(self, data):
self.datas = data
self.sorted_datas = None
self.insert_sort_time = None
self.merge_sort_time = None
self.mt_merge_sort_time = None
self.heapify_sort_time = None
def __lt__(self, other):
return (len(self.datas) < len(other.datas))
def __repr__(self):
return str(self.datas)
def _sort_by_merging(self):
a = time.time()
self.sorted_datas = [item for item in self.datas]
self.sorted_datas = mergeSort(self.sorted_datas)
b = time.time() - a
return b
def _sort_by_multiprocMerging(self):
a = time.time()
self.sorted_datas = [item for item in self.datas]
self.sorted_datas = multiprocMerging(self.sorted_datas)
b = time.time() - a
return b
...
And all the Data objects are stored in a DataSet object:
class DataSet:
def __init__(self):
self.raw_datas=[]
self._datas = []
def add(self, new_data):
heapq.heappush(self.raw_datas, new_data)
def sort(self):
self._datas = [heapq.heappop(self.raw_datas) for _ in range(len(self.raw_datas))]
self.raw_datas = self._datas
def run_tests(self, *algos):
if 'merge' in algos:
self.merge_sort_time = 0
self.merge_datas = []
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(datas._sort_by_merging) for datas in self.raw_datas]
i = 0
for result in concurrent.futures.as_completed(results):
self.merge_datas.append((len(self.raw_datas[i].datas), result.result()))
self.raw_datas[i].merge_sort_time = self.merge_datas[i][1] * 1000
self.merge_sort_time += self.raw_datas[i].merge_sort_time
i += 1
if 'mp_merge' in algos:
self.mt_merge_sort_time = 0
self.mt_merge_datas = []
with concurrent.futures.ProcessPoolExecutor() as executor:
results = [executor.submit(datas._sort_by_multiprocMerging) for datas in self.raw_datas]
i = 0
for result in concurrent.futures.as_completed(results):
self.mt_merge_datas.append((len(self.raw_datas[i].datas), result.result()))
self.raw_datas[i].mt_merge_sort_time = self.mt_merge_datas[i][1] * 1000
self.mt_merge_sort_time += self.raw_datas[i].mt_merge_sort_time
i += 1
...
And here are the sorting algos :
def mergeSort(my_list):
if len(my_list) > 1:
mid = len(my_list) // 2
left = my_list[:mid]
right = my_list[mid:]
mergeSort(left)
mergeSort(right)
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if left[i] < right[j]:
my_list[k] = left[i]
i += 1
else:
my_list[k] = right[j]
j += 1
k += 1
while i < len(left):
my_list[k] = left[i]
i += 1
k += 1
while j < len(right):
my_list[k] = right[j]
j += 1
k += 1
return my_list
def multiprocMerging(my_list):
if len(my_list) > 1024:
mid = len(my_list) // 2
left = my_list[:mid]
right = my_list[mid:]
with concurrent.futures.ProcessPoolExecutor() as executor:
results = executor.map(mergeSort, [left, right])
sides = [result for result in results]
i = 0
j = 0
k = 0
while i < len(left) and j < len(right):
if sides[0][i] < sides[1][j]:
my_list[k] = sides[0][i]
i += 1
else:
my_list[k] = sides[1][j]
j += 1
k += 1
while i < len(sides[0]):
my_list[k] = sides[0][i]
i += 1
k += 1
while j < len(sides[1]):
my_list[k] = sides[1][j]
j += 1
k += 1
else:
mergeSort(my_list)
return my_list
Edit 2 : here is the console log when the app crashes:
Traceback (most recent call last):
File "<string>", line 1, in <module>
Process SpawnProcess-186:5:
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
from . import context
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
Process SpawnProcess-184:10:
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 16, in <module>
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "D:\Python\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "D:\Python\lib\concurrent\futures\process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
File "D:\Python\lib\multiprocessing\queues.py", line 116, in get
return _ForkingPickler.loads(res)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 2, in <module>
import numpy as np
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\__init__.py", line 140, in <module>
from . import core
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\core\__init__.py", line 72, in <module>
from . import numeric
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
Traceback (most recent call last):
File "<frozen importlib._bootstrap_external>", line 911, in get_code
File "<frozen importlib._bootstrap_external>", line 580, in _compile_bytecode
MemoryError
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 125, in _main
prepare(preparation_data)
File "D:\Python\lib\multiprocessing\spawn.py", line 236, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "D:\Python\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
main_content = runpy.run_path(main_path,
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\__init__.py", line 8, in <module>
File "D:\Python\lib\sre_compile.py", line 291, in _optimize_charset
Traceback (most recent call last):
from . import context
import socket
Traceback (most recent call last):
File "<string>", line 1, in <module>
from concurrent.futures._base import (FIRST_COMPLETED,
File "D:\Python\lib\runpy.py", line 264, in run_path
Traceback (most recent call last):
charmap[k] = 1
File "D:\Python\lib\concurrent\futures\_base.py", line 7, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 125, in _main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
prepare(preparation_data)
self = reduction.pickle.load(from_parent)
IndexError: bytearray index out of range
Traceback (most recent call last):
File "<string>", line 1, in <module>
import logging
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 236, in prepare
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
File "D:\Python\lib\socket.py", line 49, in <module>
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\__init__.py", line 8, in <module>
File "<string>", line 1, in <module>
During handling of the above exception, another exception occurred:
File "<string>", line 1, in <module>
_fixup_main_from_path(data['init_main_from_path'])
File "<string>", line 1, in <module>
File "<string>", line 1, in <module>
File "<string>", line 1, in <module>
from concurrent.futures._base import (FIRST_COMPLETED,
File "D:\Python\lib\concurrent\futures\_base.py", line 7, in <module>
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
Process SpawnProcess-186:6:
from . import context
File "D:\Python\lib\multiprocessing\spawn.py", line 287, in _fixup_main_from_path
code, fname = _get_code_from_file(run_name, path_name)
import logging
import multiprocessing.connection
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
Traceback (most recent call last):
File "D:\Python\lib\multiprocessing\process.py", line 315, in _bootstrap
self.run()
File "D:\Python\lib\multiprocessing\process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
Traceback (most recent call last):
main_content = runpy.run_path(main_path,
File "D:\Python\lib\multiprocessing\__init__.py", line 16, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
from . import reduction
File "<string>", line 1, in <module>
from . import context
File "D:\Python\lib\runpy.py", line 264, in run_path
import _multiprocessing
File "D:\Python\lib\runpy.py", line 239, in _get_code_from_file
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "D:\Python\lib\concurrent\futures\process.py", line 233, in _process_worker
call_item = call_queue.get(block=True)
exitcode = _main(fd, parent_sentinel)
from . import context
import multiprocessing.connection
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
File "D:\Python\lib\multiprocessing\spawn.py", line 116, in spawn_main
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
code, fname = _get_code_from_file(run_name, path_name)
File "D:\Python\lib\runpy.py", line 239, in _get_code_from_file
code = compile(f.read(), fname, 'exec')
File "D:\Python\lib\multiprocessing\queues.py", line 116, in get
return _ForkingPickler.loads(res)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 2, in <module>
import numpy as np
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
code = compile(f.read(), fname, 'exec')
MemoryError
import pickle
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
self = reduction.pickle.load(from_parent)
File "D:\Python\lib\concurrent\futures\process.py", line 54, in <module>
import multiprocessing.connection
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\__init__.py", line 140, in <module>
from . import core
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\numpy\core\__init__.py", line 98, in <module>
from . import _add_newdocs
File "<frozen importlib._bootstrap>", line 991, in _find_and_load
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
File "<frozen importlib._bootstrap_external>", line 874, in get_code
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
import _multiprocessing
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
File "<frozen importlib._bootstrap>", line 975, in _find_and_load_unlocked
File "D:\Python\lib\multiprocessing\connection.py", line 21, in <module>
File "D:\Python\lib\pickle.py", line 34, in <module>
File "<frozen importlib._bootstrap>", line 671, in _load_unlocked
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
import pickle
File "D:\Python\lib\pickle.py", line 37, in <module>
File "D:\Python\lib\multiprocessing\context.py", line 6, in <module>
from . import reduction
File "D:\Python\lib\multiprocessing\reduction.py", line 15, in <module>
exitcode = _main(fd, parent_sentinel)
File "D:\Python\lib\multiprocessing\spawn.py", line 126, in _main
import _multiprocessing
ImportError: DLL load failed while importing _multiprocessing: Le fichier de pagination est insuffisant pour terminer cette opération.
File "<frozen importlib._bootstrap_external>", line 779, in exec_module
File "<frozen importlib._bootstrap_external>", line 874, in get_code
File "<frozen importlib._bootstrap_external>", line 973, in get_data
MemoryError
File "<frozen importlib._bootstrap_external>", line 973, in get_data
Internal Server Error: /django_plotly_dash/app/lists/_dash-update-component
concurrent.futures.process._RemoteTraceback:
"""
Traceback (most recent call last):
File "D:\Python\lib\concurrent\futures\process.py", line 239, in _process_worker
r = call_item.fn(*call_item.args, **call_item.kwargs)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\toolbox\list_datas.py", line 47, in _sort_by_threadmerging
self.sorted_datas = multiThreadMerging(self.sorted_datas)
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 103, in multiThreadMerging
sides = [result for result in results]
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\algos\Dunod\list_sorting.py", line 103, in <listcomp>
sides = [result for result in results]
File "D:\Python\lib\concurrent\futures\process.py", line 484, in _chain_from_iterable_of_lists
for element in iterable:
File "D:\Python\lib\concurrent\futures\_base.py", line 611, in result_iterator
yield fs.pop().result()
File "D:\Python\lib\concurrent\futures\_base.py", line 432, in result
return self.__get_result()
File "D:\Python\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
"""
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\core\handlers\exception.py", line 47, in inner
response = get_response(request)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\core\handlers\base.py", line 179, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django\views\decorators\csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\views.py", line 74, in update
return _update(request, ident, stateless, **kwargs)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\views.py", line 93, in _update
resp = view_func()
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\dash_wrapper.py", line 560, in dispatch
return self.dispatch_with_args(body, argMap=dict())
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\django_plotly_dash\dash_wrapper.py", line 647, in dispatch_with_args
res = self.callback_map[target_id]['callback'](*args, **argMap)
File "D:\Projets\Python\Algorithmic\venv\lib\site-packages\dash\dash.py", line 985, in add_context
output_value = func(*args, **kwargs) # %% callback invoked %%
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\Apps\lists.py", line 382, in mergeTest
data_set.run_tests('mt_merge')
File "D:\Projets\Python\Algorithmic\AlgoWebSite\DashApps\toolbox\list_datas.py", line 104, in run_tests
self.mt_merge_datas.append((len(self.raw_datas[i].datas), result.result()))
File "D:\Python\lib\concurrent\futures\_base.py", line 432, in result
return self.__get_result()
File "D:\Python\lib\concurrent\futures\_base.py", line 388, in __get_result
raise self._exception
concurrent.futures.process.BrokenProcessPool: A process in the process pool was terminated abruptly while the future was running or pending.
HTTP POST /django_plotly_dash/app/lists/_dash-update-component 500 [1.74, 127.0.0.1:63195]

Related

How to solve pywhatkit KeyError in pyrhon?

import pywhatkit as pwt
number=input("Enter the number: ")
msg=input("Msg: ")
pwt.sendwhatmsg_instantly(number,msg)
And then I got this error:
Traceback (most recent call last):
File "/data/user/0/ru.iiec.pydroid3/files/accomp_files/iiec_run/iiec_run.py", line 31, in
start(fakepyfile,mainpyfile)
File "/data/user/0/ru.iiec.pydroid3/files/accomp_files/iiec_run/iiec_run.py", line 30, in start
exec(open(mainpyfile).read(), main.dict)
File "", line 1, in
File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/site-packages/pywhatkit/init.py", line 16, in
from pywhatkit.whats import (
File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/site-packages/pywhatkit/whats.py", line 7, in import pyautogui as pg
File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/site-packages/pyautogui/init.py", line 249, in
import mouseinfo
File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/site-packages/mouseinfo/init.py", line 223, in
_display = Display(os.environ['DISPLAY'])
File "/data/user/0/ru.iiec.pydroid3/files/arm-linux-androideabi/lib/python3.9/os.py", line 679, in getitem
raise KeyError(key) from None
KeyError: 'DISPLAY'
[Program finished]

python: pyABC with simple ODE - Traceback error <= not supported between instances str and int

I have a simple ODE with some unknown parameters (r, C, mu and gamma) that predicts the decay and regrowth of bacteria on a surface after being cleaned. I have experimental data for the number of bacteria at various times points (0h, 1h, 2h, 4h, 8h and 24h) after cleaning. I'm trying to use the EasyABC package to estimate the parameter distributions r, C, d and g by an Approximate Bayesian Computation Sequential Monte Carlo Simulation approach.
The ODE is defined as follows:
The experimental data looks like this:
#Preamble
from pyabc import (ABCSMC,
RV, Distribution,
MedianEpsilon,
LocalTransition)
from pyabc.visualization import plot_kde_2d, plot_data_callback
import matplotlib.pyplot as plt
import os
import tempfile
import numpy as np
from scipy.integrate import odeint
import math
db_path = ("sqlite:///" +
os.path.join(tempfile.gettempdir(), "test.db"))
#Experimental Data
initial_contamination=59 #For ODE
measurement_data = np.array([19,5,5,2,9]) #To compare simulation against
s=[26,2.3,4.67,4.33,4.27] #Standard deviation used in Distance
precision=5000
measurement_times = np.array([0,1,2,4,8,24]) #Hours after cleaning
#Define ODE:
def ode_model(contamination,t,r,C,mu,gamma):
Contamination = contamination;
return(Contamination*r*(1-Contamination/C)-mu*math.exp(-gamma*t)*Contamination)
def deterministic_run(parameters):#precision,initial_contamination,r,C,mu, gamma):
precision=5000
tmax = 24
time_space = np.linspace(0,tmax,precision+1)
sim=odeint(ode_model,initial_contamination,time_space,args=(parameters["r"],parameters["C"],parameters["mu"],parameters["gamma"]))
num_at_1=sim[int(precision*1/50.0)]
num_at_2=sim[int(precision*2/50.0)]
num_at_4=sim[int(precision*4/50.0)]
num_at_8=sim[int(precision*8/50.0)]
num_at_24=sim[int(precision*24/50.0)]
return([num_at_1,num_at_2,num_at_4,num_at_8,num_at_24])
#Define prior distribution
parameter_prior = Distribution(r=RV("uniform", 0, 4),
C=RV("uniform", 6, 15),
mu=RV("uniform", 0, 4),
gamma=RV("uniform", 0, 4))
parameter_prior.get_parameter_names()
#Define Euclidean Distance:
def Distance(x,y,s):
# computes the Euclidean distance between two lists of the same length
if len(x) == len(y):
return math.sqrt(sum([(((x[i]-y[i])/s[i])**2) for i in range(len(x))]))
else:
return 'lists not the same length'
#Set up the ABC-SMC configuration
abc = ABCSMC(models=deterministic_run,
parameter_priors=parameter_prior,
distance_function=Distance,
population_size=50,
transitions=LocalTransition(k_fraction=.3),
eps=MedianEpsilon(500, median_multiplier=0.7))
abc.new(db_path, {"Contamination": measurement_data})
#Run the ABC-SMC
h = abc.run(minimum_epsilon=0.1, max_nr_populations=5)
The error I get
TypeError: '<=' not supported between instances of 'str' and 'int'
This refers to line 100 of acceptor.py
def initialize(
self,
t: int,
get_weighted_distances: Callable[[], pd.DataFrame],
distance_function: Distance,
x_0: dict):
"""
Initialize. This method is called by the ABCSMC framework initially,
and can be used to calibrate the acceptor to initial statistics.
The default is to do nothing.
Parameters
----------
t: int
The timepoint to initialize the acceptor for.
get_weighted_distances: Callable[[], pd.DataFrame]
Returns on demand the distances for initializing the acceptor.
distance_function: Distance
Distance object. The acceptor should not modify it, but might
extract some meta information.
x_0: dict
The observed summary statistics.
"""
pass
but I cannot figure out what is wrong with it. Any thoughts would be much appreciated.
Edit: Full traceback:
runfile('/Users/YYY/Downloads/Beth/Code/pyABC_Cleaning.py', wdir='/Users/YYY/Downloads/Beth/Code')
INFO:Sampler:Parallelizing the sampling on 8 cores.
INFO:History:Start <ABCSMC(id=1, start_time=2020-06-22 09:19:29.808488, end_time=None)>
INFO:ABC:t: 0, eps: 500.
Process Process-1:
Traceback (most recent call last):
Process Process-4:
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
Traceback (most recent call last):
Process Process-2:
Process Process-3:
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
Process Process-5:
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
Process Process-6:
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
Process Process-7:
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
Process Process-8:
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
TypeError: '<=' not supported between instances of 'str' and 'int'
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/process.py", line 99, in run
self._target(*self._args, **self._kwargs)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 37, in work
new_sim = simulate_one()
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
TypeError: '<=' not supported between instances of 'str' and 'int'
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 598, in simulate_one
weight_function)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 683, in _evaluate_proposal
x_0)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/model.py", line 213, in accept
par=pars)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 306, in __call__
distance_function, eps, x, x_0, t, par)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/acceptor/acceptor.py", line 242, in accept_use_current_time
accept = d <= eps(t)
TypeError: '<=' not supported between instances of 'str' and 'int'
Traceback (most recent call last):
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicorebase.py", line 100, in get_if_worker_healthy
item = queue.get(True, 5)
File "/Users/YYY/opt/anaconda3/lib/python3.7/multiprocessing/queues.py", line 105, in get
raise Empty
Empty
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/YYY/Downloads/Beth/Code/pyABC_Cleaning.py", line 115, in <module>
h = abc.run(minimum_epsilon=0.1, max_nr_populations=5)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/smc.py", line 890, in run
pop_size, simulate_one, max_eval)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/base.py", line 151, in sample_until_n_accepted
sample = f(self, n, simulate_one, max_eval, all_accepted)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicore_evaluation_parallel.py", line 121, in sample_until_n_accepted
val = get_if_worker_healthy(processes, queue)
File "/Users/YYY/opt/anaconda3/lib/python3.7/site-packages/pyabc/sampler/multicorebase.py", line 104, in get_if_worker_healthy
raise ProcessError("At least one worker is dead.")
ProcessError: At least one worker is dead.
The error comes from this line and is caused by your distance function returning a string. You define the distance as:
def Distance(x,y,s):
# computes the Euclidean distance between two lists of the same length
if len(x) == len(y):
return math.sqrt(sum([(((x[i]-y[i])/s[i])**2) for i in range(len(x))]))
else:
return 'lists not the same length'
so I'm guessing you end up in the else case, return the string and kill the workers with the error you see.
You'll have to check your x and y to find what's causing them to be of different length.

ImportError: cannot import name 'ApiException' running python manage.py

I am not able to import from rest_framework.exceptions import ApiException
This is my Exception.py:
class APIException(Exception):
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
default_detail = _('A server error occurred.')
default_code = 'error'
def __init__(self, detail=None, code=None):
if detail is None:
detail = self.default_detail
if code is None:
code = self.default_code
self.detail = _get_error_details(detail, code)
def __str__(self):
return self.detail
def get_codes(self):
return _get_codes(self.detail)
def get_full_details(self):
return _get_full_details(self.detail)
error while running : python manage.py makemigrations networscanners
Traceback (most recent call last):
File "manage.py", line 25, in <module>
execute_from_command_line(sys.argv)
File "D:\Python36-32\lib\site-packages\django\core\management\__init__.py", li
ne 381, in execute_from_command_line
utility.execute()
File "D:\Python36-32\lib\site-packages\django\core\management\__init__.py", li
ne 375, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "D:\Python36-32\lib\site-packages\django\core\management\base.py", line 3
23, in run_from_argv
self.execute(*args, **cmd_options)
File "D:\Python36-32\lib\site-packages\django\core\management\base.py", line 3
61, in execute
self.check()
File "D:\Python36-32\lib\site-packages\django\core\management\base.py", line 3
90, in check
include_deployment_checks=include_deployment_checks,
File "D:\Python36-32\lib\site-packages\django\core\management\base.py", line 3
77, in _run_checks
return checks.run_checks(**kwargs)
File "D:\Python36-32\lib\site-packages\django\core\checks\registry.py", line 7
2, in run_checks
new_errors = check(app_configs=app_configs)
File "D:\Python36-32\lib\site-packages\django\core\checks\urls.py", line 13, i
n check_url_config
return check_resolver(resolver)
File "D:\Python36-32\lib\site-packages\django\core\checks\urls.py", line 23, i
n check_resolver
return check_method()
File "D:\Python36-32\lib\site-packages\django\urls\resolvers.py", line 400, in
check
for pattern in self.url_patterns:
File "D:\Python36-32\lib\site-packages\django\utils\functional.py", line 80, i
n __get__
res = instance.__dict__[self.name] = self.func(instance)
File "D:\Python36-32\lib\site-packages\django\urls\resolvers.py", line 585, in
url_patterns
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
File "D:\Python36-32\lib\site-packages\django\utils\functional.py", line 80, i
n __get__
res = instance.__dict__[self.name] = self.func(instance)
File "D:\Python36-32\lib\site-packages\django\urls\resolvers.py", line 578, in
urlconf_module
return import_module(self.urlconf_name)
File "D:\Python36-32\lib\importlib\__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 994, in _gcd_import
File "<frozen importlib._bootstrap>", line 971, in _find_and_load
File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 678, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "D:\archerysec framework\archerysec-master\archerysecurity\urls.py", line
36, in <module>
from rest_framework_jwt.views import obtain_jwt_token, verify_jwt_token
File "D:\Python36-32\lib\site-packages\rest_framework_jwt\views.py", line 1, i
n <module>
from rest_framework.views import APIView
File "D:\Python36-32\lib\site-packages\rest_framework\views\__init__.py", line
1, in <module>
from .base import BaseApiView
File "D:\Python36-32\lib\site-packages\rest_framework\views\base.py", line 5,
in <module>
from rest_framework.exceptions import ApiException
ImportError: cannot import name 'ApiException'
It's APIException (rest_framework.exceptions.APIException), not ApiException.

subprocess error that cause unhandled error

I am running newsplease project from https://github.com/fhamborg/news-please/blob/master/newsplease/__main__.py from subprocess
cmd = ["python",r"/media/dhruvagupta/F428F6FA28F6BB26/backup dhruva/python/New folder/training/news-please-master/news-please-master/newsplease/__main__.py","-c /newsplease/config/"]
j = psutil.Popen(cmd)
i am getting this error
Unhandled Error
Traceback (most recent call last):
File "/media/dhruvagupta/F428F6FA28F6BB26/backup dhruva/python/New folder/training/news-please-master/news-please-master/newsplease/single_crawler.py", line 152, in __init__
self.process.start()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/crawler.py", line 293, in start
reactor.run(installSignalHandlers=False) # blocking call
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 1272, in run
self.mainLoop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop
self.runUntilCurrent()
--- <exception caught here> ---
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 902, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/utils/reactor.py", line 41, in __call__
return self._func(*self._a, **self._kw)
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/engine.py", line 122, in _next_request
if not self._next_request_from_scheduler(spider):
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/engine.py", line 149, in _next_request_from_scheduler
request = slot.scheduler.next_request()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 71, in next_request
request = self._dqpop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 106, in _dqpop
d = self.dqs.pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/queuelib/pqueue.py", line 43, in pop
m = q.pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/squeues.py", line 19, in pop
s = super(SerializableQueue, self).pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/queuelib/queue.py", line 161, in pop
size, = struct.unpack(self.SIZE_FORMAT, self.f.read())
struct.error: unpack requires a string argument of length 4
and another error
Unhandled Error
Traceback (most recent call last):
File "/media/dhruvagupta/F428F6FA28F6BB26/backup dhruva/python/New folder/training/news-please-master/news-please-master/newsplease/single_crawler.py", line 152, in __init__
self.process.start()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/crawler.py", line 293, in start
reactor.run(installSignalHandlers=False) # blocking call
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 1272, in run
self.mainLoop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 1281, in mainLoop
self.runUntilCurrent()
--- <exception caught here> ---
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/twisted/internet/base.py", line 902, in runUntilCurrent
call.func(*call.args, **call.kw)
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/utils/reactor.py", line 41, in __call__
return self._func(*self._a, **self._kw)
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/engine.py", line 122, in _next_request
if not self._next_request_from_scheduler(spider):
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/engine.py", line 149, in _next_request_from_scheduler
request = slot.scheduler.next_request()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 71, in next_request
request = self._dqpop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/core/scheduler.py", line 106, in _dqpop
d = self.dqs.pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/queuelib/pqueue.py", line 43, in pop
m = q.pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/scrapy/squeues.py", line 19, in pop
s = super(SerializableQueue, self).pop()
File "/home/dhruvagupta/.local/lib/python2.7/site-packages/queuelib/queue.py", line 162, in pop
self.f.seek(-size-self.SIZE_SIZE, os.SEEK_END)
exceptions.IOError: [Errno 22] Invalid argument

Replicate a dataset with dask to all workers

I am using dask with distributed scheduler. I am trying to replicate a dataset read through csv on s3 to all worker nodes. Example:
from distributed import Executor
import dask.dataframe as dd
e= Executor('127.0.0.1:8786',set_as_default=True)
df = dd.read_csv('s3://bucket/file.csv', blocksize=None)
df = e.persist(df)
e.replicate(df)
distributed.utils - ERROR - unhashable type: 'list'
Traceback (most recent call last):
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/utils.py", line 102, in f
result[0] = yield gen.maybe_future(func(*args, **kwargs))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/executor.py", line 1347, in _replicate
branching_factor=branching_factor)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 444, in send_recv_from_rpc
result = yield send_recv(stream=stream, op=key, **kwargs)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1024, in run
yielded = self.gen.send(value)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 345, in send_recv
six.reraise(*clean_exception(**response))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 211, in handle_stream
result = yield gen.maybe_future(handler(stream, **msg))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 285, in wrapper
yielded = next(result)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/scheduler.py", line 1324, in replicate
keys = set(keys)
TypeError: unhashable type: 'list'
Is this the correct way to replicate a dataframe? It appears that e.persist(df) returned object does not work with e.replicate for some reason.
This was a bug and has been resolved in https://github.com/dask/distributed/pull/473

Categories