ipython parallel load balanced view failing randomly - python

Here's my code:
from IPython.parallel import Client
from sklearn.datasets import load_digits
def mytask(data, labels, id):
# ...
pass
engines = Client()
bview = engines.load_balanced_view()
bview.block = False
digits = load_digits()
X, y = digits.data, digits.target
job = bview.apply(mytask, X, y, 1)
while not job.ready(): # line 242
time.sleep(2)
print job.result
Occasionally with the same input my code fails with this:
Traceback (most recent call last):
File "task.py", line 242, in <module>
while not job.ready():
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/asyncresult.py", line 111, in ready
self.wait(0)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/asyncresult.py", line 121, in wait
self._ready = self._client.wait(self.msg_ids, timeout)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 844, in wait
self.spin()
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 799, in spin
self._flush_results(self._task_socket)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 692, in _flush_results
handler(msg)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 657, in _handle_apply_reply
self.results[msg_id] = util.unserialize_object(msg['buffers'])[0]
File "/usr/lib/python2.7/dist-packages/IPython/parallel/util.py", line 262, in unserialize_object
return uncanSequence(map(unserialize, sobj)), bufs
File "/usr/lib/python2.7/dist-packages/IPython/utils/newserialized.py", line 177, in unserialize
return UnSerializeIt(serialized).getObject()
File "/usr/lib/python2.7/dist-packages/IPython/utils/newserialized.py", line 161, in getObject
result = numpy.frombuffer(buf, dtype = self.serialized.metadata['dtype'])
ValueError: offset must be non-negative and smaller than buffer lenth (0)
This seems to be unconnected to my code. I'm not sure what's going wrong.

Related

RuntimeError: Unknown qengine?The code is written correctly, but there is a problem, why and how to solve it?

import torch
import sounddevice as sd
import time
language = "ru"
model_id = "ru_v3"
sample_rate = 48000
speaker = "baya"
put_accent = True
put_yo = True
device = torch.device('cpu')
text = "Здраствуй Олег, рад знакомству"
model, _ = torch.hub.load(repo_or_dir='snakers4/silero-models',
model='silero_tts',
language=language,
speaker=model_id
)
model.to(device)
audio = model.apply_tts(text=text,
speaker=speaker,
sample_rate=sample_rate,
put_accent=put_accent,
put_yo=put_yo)
print(text)
sd.play(audio, sample_rate)
time.sleep(len(audio) / sample_rate)
sd.stop()
Error message:
Using cache found in C:\Users\User/.cache\torch\hub\snakers4_silero-models_master
Traceback (most recent call last):
File "D:/Voice_Assistent2/main.py", line 14, in <module>
model, _ = torch.hub.load(repo_or_dir='snakers4/silero-models',
File "D:\Voice_Assistent2\venv\lib\site-packages\torch\hub.py", line 399, in load
model = _load_local(repo_or_dir, model, *args, **kwargs)
File "D:\Voice_Assistent2\venv\lib\site-packages\torch\hub.py", line 428, in _load_local
model = entry(*args, **kwargs)
File "C:\Users\User/.cache\torch\hub\snakers4_silero-models_master\src\silero\silero.py", line 88, in silero_tts
model = imp.load_pickle("tts_models", "model")
File "D:\Voice_Assistent2\venv\lib\site-packages\torch\package\package_importer.py", line 249, in load_pickle
result = unpickler.load()
File "C:\Users\User\AppData\Local\Programs\Python\Python38\lib\pickle.py", line 1210, in load
dispatch[key[0]](self)
File "C:\Users\User\AppData\Local\Programs\Python\Python38\lib\pickle.py", line 1251, in load_binpersid
self.append(self.persistent_load(pid))
File "D:\Voice_Assistent2\venv\lib\site-packages\torch\package\package_importer.py", line 227, in persistent_load
loaded_reduces[reduce_id] = func(self, *args)
File "D:\Voice_Assistent2\venv\lib\site-packages\torch\jit\_script.py", line 344, in unpackage_script_module
cpp_module = torch._C._import_ir_module_from_package(
RuntimeError: Unknown qengine

Error executing FMU model with pyFMI: "pyfmi.fmi.FMUException: Failed to get the Boolean values"

I am using the code below to simulate a model.
def run_demo(with_plots=True):
traj = np.array([[start_time,2.25]])
input_object = ('input_1[1]', traj)
model = load_fmu('[pyfmimodel.fmu',log_level=7)
opts = model.simulate_options ()
opts['ncp']=266
# Simulate
res = model.simulate(options=opts, input=input_object,final_time=stop_time )
This is the error I am getting. I need help to resolve this error.
Traceback (most recent call last):
File "D:\Projects\Python\DOCKER\model_2.py", line 55, in <module>
run_demo()
File "D:\Projects\Python\DOCKER\model_2.py", line 38, in run_demo
res = model.simulate(options=opts, input=input_object,final_time=stop_time )
File "src\pyfmi\fmi.pyx", line 7519, in pyfmi.fmi.FMUModelCS2.simulate
File "src\pyfmi\fmi.pyx", line 378, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
File "src\pyfmi\fmi.pyx", line 372, in pyfmi.fmi.ModelBase._exec_simulate_algorithm
File "C:\Users\tcto5k\Miniconda3\lib\site-packages\pyfmi\fmi_algorithm_drivers.py", line 984, in __init__
self.result_handler.simulation_start()
File "C:\Users\tcto5k\Miniconda3\lib\site-packages\pyfmi\common\io.py", line 2553, in simulation_start
[parameter_data, sorted_vars_real_vref, sorted_vars_int_vref, sorted_vars_bool_vref] = fmi_util.prepare_data_info(data_info, sorted_vars,
File "src\pyfmi\fmi_util.pyx", line 257, in pyfmi.fmi_util.prepare_data_info
File "src\pyfmi\fmi_util.pyx", line 337, in pyfmi.fmi_util.prepare_data_info
File "src\pyfmi\fmi.pyx", line 4377, in pyfmi.fmi.FMUModelBase2.get_boolean
pyfmi.fmi.FMUException: Failed to get the Boolean values.
This is the FMU model variable definition which accepts 1D array as input:
<ScalarVariable name="input_1[1]" valueReference="0" description="u" causality="input" variability="continuous">
<Real start="2.0"/>
</ScalarVariable>
<!-- 2 -->
<ScalarVariable name="dense_3[1]" valueReference="614" description="y (1st order)" causality="output" variability="continuous" initial="calculated">
<Real/>
</ScalarVariable>

xlwings recently stopped getting live data from excel via Range

I was running a script to get data from excel for over a year using the Xlwings range command like so...
list=Range('A1:D10').value
Suddenly, it stopper working. I had changed nothing in the code nor the system, other than maybe installing another network card.
This is the error when trying to use the Range assignment now.
Traceback (most recent call last):
File "G:\python32\fetcher.py", line 61, in <module>
listFull = getComData()
File "G:\python32\fetcher.py", line 38, in getComData
listFull=Range('A4:H184').value
File "G:\python32\lib\site-packages\xlwings\main.py", line 1490, in __init__
impl = apps.active.range(cell1).impl
File "G:\python32\lib\site-packages\xlwings\main.py", line 439, in range
return Range(impl=self.impl.range(cell1, cell2))
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 457, in range
xl1 = self.xl.Range(arg1)
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 341, in xl
self._xl = get_xl_app_from_hwnd(self._hwnd)
File "G:\python32\lib\site-packages\xlwings\_xlwindows.py", line 251, in get_xl_app_from_hwnd
disp = COMRetryObjectWrapper(Dispatch(p))
File "G:\python32\lib\site-packages\win32com\client\__init__.py", line 96, in Dispatch
return __WrapDispatch(dispatch, userName, resultCLSID, typeinfo, clsctx=clsctx)
File "G:\python32\lib\site-packages\win32com\client\__init__.py", line 37, in __WrapDispatch
klass = gencache.GetClassForCLSID(resultCLSID)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 180, in GetClassForCLSID
mod = GetModuleForCLSID(clsid)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 223, in GetModuleForCLSID
mod = GetModuleForTypelib(typelibCLSID, lcid, major, minor)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 259, in GetModuleForTypelib
mod = _GetModule(modName)
File "G:\python32\lib\site-packages\win32com\client\gencache.py", line 622, in _GetModule
mod = __import__(mod_name)
ValueError: source code string cannot contain null bytes

PicklingError when getting the result from ray

I'm working on slowly converting my very serialized text analysis engine to use Modin and Ray. Feels like I'm nearly there, however, I seem to have hit a stumbling block. My code looks like this:
vectorizer = TfidfVectorizer(
analyzer=ngrams, encoding="ascii", stop_words="english", strip_accents="ascii"
)
tf_idf_matrix = vectorizer.fit_transform(r_strings["name"])
r_vectorizer = ray.put(vectorizer)
r_tf_idf_matrix = ray.put(tf_idf_matrix)
n = 2
match_results = []
for fn in files["c.file"]:
match_results.append(
match_name.remote(fn, r_vectorizer, r_tf_idf_matrix, r_strings, n)
)
match_returns = ray.get(match_results)
I'm following the guidance from the "anti-patterns" section in the Ray documentation, on what to avoid, and this is very similar to that of the "better" pattern.
Traceback (most recent call last):
File "alt.py", line 213, in <module>
match_returns = ray.get(match_results)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/_private/client_mode_hook.py", line 62, in wrapper
return func(*args, **kwargs)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/worker.py", line 1501, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(PicklingError): ray::match_name() (pid=23393, ip=192.168.1.173)
File "python/ray/_raylet.pyx", line 564, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 565, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 1652, in ray._raylet.CoreWorker.store_task_outputs
File "/home/myuser/.local/lib/python3.7/site-packages/ray/serialization.py", line 327, in serialize
return self._serialize_to_msgpack(value)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/serialization.py", line 307, in _serialize_to_msgpack
self._serialize_to_pickle5(metadata, python_objects)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/serialization.py", line 267, in _serialize_to_pickle5
raise e
File "/home/myuser/.local/lib/python3.7/site-packages/ray/serialization.py", line 264, in _serialize_to_pickle5
value, protocol=5, buffer_callback=writer.buffer_callback)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "/home/myuser/.local/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 580, in dump
return Pickler.dump(self, obj)
_pickle.PicklingError: args[0] from __newobj__ args has the wrong class
Definitely an unexpected result. I'm not sure where to go next with this and would appreciate help from folks who have more experience with Ray and Modin.

TypeError: can't pickle cv2.xfeatures2d_SIFT objects (occured during using joblib.Parallel)

I am trying to convert my code to parallel and used joblib.Parallel for the purpose. I got the above error while feature calculating stage. Here is my code.
from utils import FeatureGetter
#some code
Class model:
#initialize
def getDescriptors(image):
descriptors = self.feature_getter.get_features(image)
return descriptors
def train():
#some code
self.feature_getter = FeatureGetter()
descriptors_list = Parallel(n_jobs=-1)(delayed(self.getDescriptors)(image) for image in self.X_train)
Complete Error log
Traceback (most recent call last):
File "test_parallel.py", line 375, in <module>
bow.trainModel()
File "test_parallel.py", line 144, in trainModel
self.desc_list = Parallel(n_jobs=-1)(delayed(self.getDescriptors)(image) for image in self.X_train)
File "/home/vamsi.muthireddy/miniconda3/lib/python3.6/site-packages/joblib/parallel.py", line 779, in __call__
while self.dispatch_one_batch(iterator):
File "/home/vamsi.muthireddy/miniconda3/lib/python3.6/site-packages/joblib/parallel.py", line 620, in dispatch_one_batch
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
File "/home/vamsi.muthireddy/miniconda3/lib/python3.6/site-packages/joblib/parallel.py", line 127, in __init__
self.items = list(iterator_slice)
File "test_parallel.py", line 144, in <genexpr>
self.desc_list = Parallel(n_jobs=-1)(delayed(self.getDescriptors)(image) for image in self.X_train)
File "/home/vamsi.muthireddy/miniconda3/lib/python3.6/site-packages/joblib/parallel.py", line 183, in delayed
pickle.dumps(function)
TypeError: can't pickle cv2.xfeatures2d_SIFT objects

Categories