Related
Trying to load a model from past run in mlflow, in jupyterlab, never finishes. After waiting for hours, interrupting the run throws the below state.
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
Input In [21], in <cell line: 2>()
1 logged_model = 'runs:/7f6932baef144fa69847ba11ef66f8e6/model/'
----> 2 loaded_model = mlflow.tensorflow.load_model(logged_model)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/tensorflow/__init__.py:397, in load_model(model_uri, dst_path)
360 def load_model(model_uri, dst_path=None):
361 """
362 Load an MLflow model that contains the TensorFlow flavor from the specified path.
363
(...)
395 for _, output_signature in signature_definition.outputs.items()]
396 """
--> 397 local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
398 flavor_conf = _get_flavor_configuration(local_model_path, FLAVOR_NAME)
399 _add_code_from_conf_to_system_path(local_model_path, flavor_conf)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/tracking/artifact_utils.py:95, in _download_artifact_from_uri(artifact_uri, output_path)
92 parsed_uri = parsed_uri._replace(path=posixpath.dirname(parsed_uri.path))
93 root_uri = prefix + urllib.parse.urlunparse(parsed_uri)
---> 95 return get_artifact_repository(artifact_uri=root_uri).download_artifacts(
96 artifact_path=artifact_path, dst_path=output_path
97 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/runs_artifact_repo.py:125, in RunsArtifactRepository.download_artifacts(self, artifact_path, dst_path)
110 def download_artifacts(self, artifact_path, dst_path=None):
111 """
112 Download an artifact file or directory to a local directory if applicable, and return a
113 local path for it.
(...)
123 :return: Absolute path of the local filesystem location containing the desired artifacts.
124 """
--> 125 return self.repo.download_artifacts(artifact_path, dst_path)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:242, in ArtifactRepository.download_artifacts(self, artifact_path, dst_path)
240 # Check if the artifacts points to a directory
241 if self._is_directory(artifact_path):
--> 242 dst_local_path, inflight_downloads = async_download_artifact_dir(
243 src_artifact_dir_path=artifact_path, dst_local_dir_path=dst_path
244 )
245 else:
246 inflight_downloads = async_download_artifact(
247 src_artifact_path=artifact_path, dst_local_dir_path=dst_path
248 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:208, in ArtifactRepository.download_artifacts.<locals>.async_download_artifact_dir(src_artifact_dir_path, dst_local_dir_path)
206 for file_info in dir_content:
207 if file_info.is_dir:
--> 208 inflight_downloads += async_download_artifact_dir(
209 src_artifact_dir_path=file_info.path,
210 dst_local_dir_path=dst_local_dir_path,
211 )[2]
212 else:
213 inflight_downloads += async_download_artifact(
214 src_artifact_path=file_info.path,
215 dst_local_dir_path=dst_local_dir_path,
216 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:199, in ArtifactRepository.download_artifacts.<locals>.async_download_artifact_dir(src_artifact_dir_path, dst_local_dir_path)
195 local_dir = os.path.join(dst_local_dir_path, src_artifact_dir_path)
196 inflight_downloads = []
197 dir_content = [ # prevent infinite loop, sometimes the dir is recursively included
198 file_info
--> 199 for file_info in self.list_artifacts(src_artifact_dir_path)
200 if file_info.path != "." and file_info.path != src_artifact_dir_path
201 ]
202 if not dir_content: # empty dir
203 if not os.path.exists(local_dir):
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/sftp_artifact_repo.py:94, in SFTPArtifactRepository.list_artifacts(self, path)
92 artifact_dir = self.path
93 list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir
---> 94 if not self.sftp.isdir(list_dir):
95 return []
96 artifact_files = self.sftp.listdir(list_dir)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/pysftp/__init__.py:652, in Connection.isdir(self, remotepath)
650 self._sftp_connect()
651 try:
--> 652 result = S_ISDIR(self._sftp.stat(remotepath).st_mode)
653 except IOError: # no such file
654 result = False
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:493, in SFTPClient.stat(self, path)
491 path = self._adjust_cwd(path)
492 self._log(DEBUG, "stat({!r})".format(path))
--> 493 t, msg = self._request(CMD_STAT, path)
494 if t != CMD_ATTRS:
495 raise SFTPError("Expected attributes")
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:822, in SFTPClient._request(self, t, *arg)
820 def _request(self, t, *arg):
821 num = self._async_request(type(None), t, *arg)
--> 822 return self._read_response(num)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:852, in SFTPClient._read_response(self, waitfor)
850 while True:
851 try:
--> 852 t, data = self._read_packet()
853 except EOFError as e:
854 raise SSHException("Server connection dropped: {}".format(e))
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp.py:201, in BaseSFTP._read_packet(self)
200 def _read_packet(self):
--> 201 x = self._read_all(4)
202 # most sftp servers won't accept packets larger than about 32k, so
203 # anything with the high byte set (> 16MB) is just garbage.
204 if byte_ord(x[0]):
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp.py:185, in BaseSFTP._read_all(self, n)
183 break
184 else:
--> 185 x = self.sock.recv(n)
187 if len(x) == 0:
188 raise EOFError()
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/channel.py:699, in Channel.recv(self, nbytes)
686 """
687 Receive data from the channel. The return value is a string
688 representing the data received. The maximum amount of data to be
(...)
696 if no data is ready before the timeout set by `settimeout`.
697 """
698 try:
--> 699 out = self.in_buffer.read(nbytes, self.timeout)
700 except PipeTimeout:
701 raise socket.timeout()
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/buffered_pipe.py:160, in BufferedPipe.read(self, nbytes, timeout)
158 while (len(self._buffer) == 0) and not self._closed:
159 then = time.time()
--> 160 self._cv.wait(timeout)
161 if timeout is not None:
162 timeout -= time.time() - then
File ~/.conda/envs/tensorflow/lib/python3.8/threading.py:302, in Condition.wait(self, timeout)
300 try: # restore state no matter what (e.g., KeyboardInterrupt)
301 if timeout is None:
--> 302 waiter.acquire()
303 gotit = True
304 else:
KeyboardInterrupt:
The mlflow tracking server is working properly for all the other operations. I am able to log params, metrics and artifacts. But I am not able to load a model or retrive any of the artifacts.
Update:
Looks like a bug as per https://github.com/mlflow/mlflow/issues/5656.
Please upgrade mlflow, their is some issue with version 1.26.0
pip install mlflow==1.27.0
Assuming you are also using above version
I am using python3 in Jupyter under Anaonda distribution and using sgt package 2.0.3. System - 64bit 8GB. The SGT function worked fine when I did not use multiprocessing but it throws an error when I use multiprocessing. Could you please help me if there is any system dependency to use the multiprocessing functionality?
from sgt import SGT
import numpy as np
import pandas as pd
import pandarallel
corpus = pd.DataFrame([[1, ["B","B","A","C","A","C","A","A","B","A"]],
[2, ["C", "Z", "Z", "Z", "D"]]],
columns=['id', 'sequence'])
sgt = SGT(kappa=1,
flatten=True,
lengthsensitive=False,
mode='default')
sgt.fit_transform(corpus)
However, when I run mode='multiprocessing' it throws following error
sgt = SGT(kappa=1,
flatten=True,
lengthsensitive=False,
mode='multiprocessing')
sgt.fit_transform(corpus)
Output:
INFO: Pandarallel will run on 7 workers.
INFO: Pandarallel will use standard multiprocessing data transfer (pipe) to transfer data between the main process and workers.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
in
3 lengthsensitive=False,
4 mode='multiprocessing')
----> 5 sgt.fit_transform(corpus)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\sgt\sgt.py in fit_transform(self, corpus)
214 list(self.fit(x['sequence'])),
215 axis=1,
--> 216 result_type='expand')
217 sgt.columns = ['id'] + self.feature_names
218 return sgt
~\AppData\Local\Continuum\anaconda3\lib\site-packages\pandarallel\pandarallel.py in closure(data, func, *args, **kwargs)
440 try:
441 pool = Pool(
--> 442 nb_workers, worker_init, (prepare_worker(use_memory_fs)(worker),),
443 )
444
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\context.py in Pool(self, processes, initializer, initargs, maxtasksperchild)
117 from .pool import Pool
118 return Pool(processes, initializer, initargs, maxtasksperchild,
--> 119 context=self.get_context())
120
121 def RawValue(self, typecode_or_type, *args):
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py in __init__(self, processes, initializer, initargs, maxtasksperchild, context)
174 self._processes = processes
175 self._pool = []
--> 176 self._repopulate_pool()
177
178 self._worker_handler = threading.Thread(
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py in _repopulate_pool(self)
239 w.name = w.name.replace('Process', 'PoolWorker')
240 w.daemon = True
--> 241 w.start()
242 util.debug('added worker')
243
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\process.py in start(self)
110 'daemonic processes are not allowed to have children'
111 _cleanup()
--> 112 self._popen = self._Popen(self)
113 self._sentinel = self._popen.sentinel
114 # Avoid a refcycle if the target function holds an indirect
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\context.py in _Popen(process_obj)
320 def _Popen(process_obj):
321 from .popen_spawn_win32 import Popen
--> 322 return Popen(process_obj)
323
324 class SpawnContext(BaseContext):
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\popen_spawn_win32.py in __init__(self, process_obj)
87 try:
88 reduction.dump(prep_data, to_child)
---> 89 reduction.dump(process_obj, to_child)
90 finally:
91 set_spawning_popen(None)
~\AppData\Local\Continuum\anaconda3\lib\multiprocessing\reduction.py in dump(obj, file, protocol)
58 def dump(obj, file, protocol=None):
59 '''Replacement for pickle.dump() using ForkingPickler.'''
---> 60 ForkingPickler(file, protocol).dump(obj)
61
62 #
AttributeError: Can't pickle local object 'prepare_worker..closure..wrapper'
This is possibly due to the pandas version. Check whether your pandas version is 1.x. If not, upgrade your pandas version.
$ pip install pandas --upgrade
I've a notebook on google colab that fails with following error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
---> 94 raise e
95 finally: cb_handler.on_train_end(exception)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
83 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 84 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
85 if cb_handler.on_batch_end(loss): break
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
24 if opt is not None:
---> 25 loss = cb_handler.on_backward_begin(loss)
26 loss.backward()
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_backward_begin(self, loss)
223 for cb in self.callbacks:
--> 224 a = cb.on_backward_begin(**self.state_dict)
225 if a is not None: self.state_dict['last_loss'] = a
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in on_backward_begin(self, smooth_loss, **kwargs)
266 if self.pbar is not None and hasattr(self.pbar,'child'):
--> 267 self.pbar.child.comment = f'{smooth_loss:.4f}'
268
/usr/local/lib/python3.6/dist-packages/torch/tensor.py in __format__(self, format_spec)
377 if self.dim() == 0:
--> 378 return self.item().__format__(format_spec)
379 return object.__format__(self, format_spec)
RuntimeError: CUDA error: device-side assert triggered
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-33-dd390b1c8108> in <module>()
----> 1 lr_find(learn)
2 learn.recorder.plot()
/usr/local/lib/python3.6/dist-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
26 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
27 a = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 28 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
29
30 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162 callbacks=self.callbacks+callbacks)
163
164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
94 raise e
---> 95 finally: cb_handler.on_train_end(exception)
96
97 loss_func_name2activ = {'cross_entropy_loss': partial(F.softmax, dim=1), 'nll_loss': torch.exp, 'poisson_nll_loss': torch.exp,
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_train_end(self, exception)
254 def on_train_end(self, exception:Union[bool,Exception])->None:
255 "Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
--> 256 self('train_end', exception=exception)
257
258 class AverageMetric(Callback):
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in <listcomp>(.0)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callbacks/lr_finder.py in on_train_end(self, **kwargs)
45 # restore the valid_dl we turned of on `__init__`
46 self.data.valid_dl = self.valid_dl
---> 47 self.learn.load('tmp')
48 if hasattr(self.learn.model, 'reset'): self.learn.model.reset()
49 print('LR Finder complete, type {learner_name}.recorder.plot() to see the graph.')
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in load(self, name, device)
202 "Load model `name` from `self.model_dir` using `device`, defaulting to `self.data.device`."
203 if device is None: device = self.data.device
--> 204 self.model.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device))
205 return self
206
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in load(f, map_location, pickle_module)
356 f = open(f, 'rb')
357 try:
--> 358 return _load(f, map_location, pickle_module)
359 finally:
360 if new_fd:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _load(f, map_location, pickle_module)
527 unpickler = pickle_module.Unpickler(f)
528 unpickler.persistent_load = persistent_load
--> 529 result = unpickler.load()
530
531 deserialized_storage_keys = pickle_module.load(f)
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in persistent_load(saved_id)
493 if root_key not in deserialized_objects:
494 deserialized_objects[root_key] = restore_location(
--> 495 data_type(size), location)
496 storage = deserialized_objects[root_key]
497 if view_metadata is not None:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in restore_location(storage, location)
376 elif isinstance(map_location, torch.device):
377 def restore_location(storage, location):
--> 378 return default_restore_location(storage, str(map_location))
379 else:
380 def restore_location(storage, location):
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in default_restore_location(storage, location)
102 def default_restore_location(storage, location):
103 for _, _, fn in _package_registry:
--> 104 result = fn(storage, location)
105 if result is not None:
106 return result
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _cuda_deserialize(obj, location)
84 'to an existing device.'.format(
85 device, torch.cuda.device_count()))
---> 86 return obj.cuda(device)
87
88
/usr/local/lib/python3.6/dist-packages/torch/_utils.py in _cuda(self, device, non_blocking, **kwargs)
74 else:
75 new_type = getattr(torch.cuda, self.__class__.__name__)
---> 76 return new_type(self.size()).copy_(self, non_blocking)
77
78
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/aten/src/THC/generic/THCTensorCopy.cpp:20
There is no information about the real cause, I tried to get the stack trace by forcing cuda to run on one gpu (as suggested here) using a cell like this
!export CUDA_LAUNCH_BLOCKING=1
But this does not seem to work, still having the same error with.
Is there another way that works with Google Colab?
Be sure that your targets values starts from zero to number of classes - 1. Ex: you have 100 classification class so your target should be from 0 to 99
!export FOO=blah is usually not useful to run in a notebook because ! means run the following command in a sub-shell, so the effect of the statement is gone by the time the ! returns.
You might have more success by storing your python code in a file and then executing that file in a subshell:
In one cell:
%%writefile foo.py
[...your code...]
In the next cell:
!export CUDA_LAUNCH_BLOCKING=1; python3 foo.py
(or s/python3/python2/ if you're writing py2)
Switch Hardware Accelerator Type to "None" under Runtime->Change Runtime Type . This should give you a more meaningful error message.
The proper way to set environmental variables in Google Colab is to use os:
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
Using the os library will allow you to set whatever environmental variables you need. Setting CUDA_LAUNCH_BLOCKING this way enables proper CUDA tracebacks in Google Colab.
mac os, python2.7
I use multiprocessing, but it failed:
In [1]: from multiprocessing import Pool
In [2]: pool = Pool(30)
OSError Traceback (most recent call last)
/Users/mac/Documents/python-f/ga_users_v4_multipro.py in <module>()
141
142 if __name__ == '__main__':
--> 143 a,t=main()
144
/Users/mac/Documents/python-f/ga_users_v4_multipro.py in main()
111 date_range.append(t.strftime('%Y-%m-%d'))
112
--> 113 pool = Pool(30)
114 rrr=pool.map(get_report, date_range)
115 pool.close()
/Users/mac/.edm/envs/edm/lib/python2.7/multiprocessing/__init__.pyc in Pool(processes, initializer, initargs, maxtasksperchild)
230 '''
231 from multiprocessing.pool import Pool
--> 232 return Pool(processes, initializer, initargs, maxtasksperchild)
233
234 def RawValue(typecode_or_type, *args):
/Users/mac/.edm/envs/edm/lib/python2.7/multiprocessing/pool.pyc in __init__(self, processes, initializer, initargs, maxtasksperchild)
157 self._processes = processes
158 self._pool = []
--> 159 self._repopulate_pool()
160
161 self._worker_handler = threading.Thread(
/Users/mac/.edm/envs/edm/lib/python2.7/multiprocessing/pool.pyc in _repopulate_pool(self)
221 w.name = w.name.replace('Process', 'PoolWorker')
222 w.daemon = True
--> 223 w.start()
224 debug('added worker')
225
/Users/mac/.edm/envs/edm/lib/python2.7/multiprocessing/process.pyc in start(self)
128 else:
129 from .forking import Popen
--> 130 self._popen = Popen(self)
131 _current_process._children.add(self)
132
/Users/mac/.edm/envs/edm/lib/python2.7/multiprocessing/forking.pyc in __init__(self, process_obj)
119 self.returncode = None
120
--> 121 self.pid = os.fork()
122 if self.pid == 0:
123 if 'random' in sys.modules:
OSError: [Errno 35] Resource temporarily unavailable
I searched using google, but many said it is a known issue in python. Solved yet?
is there any solutions I can do to solve it?
I have created a set up to connect via ssh to multiple machines. this is my configuration file:
c = get_config()
c.IPClusterEngines.engine_launcher_class = 'SSHEngineSetLauncher'
Clusters = [36,31,1,24,10,11,4,3,6,26,7,2,9]
c.SSHEngineSetLauncher.engines = dict( [ ('hostname%02d'%x,7) for x in Clusters ] )
c.SSHEngineSetLauncher.engine_args = ['--profile-dir=~/.ipython/profile_ssh']
c.LocalControllerLauncher.controller_args = ["--ip='*'"]
I have a custom class and get the error below. The thing I cannot understand is that if I connect to the standard ipcluster profile I get no error. Why the difference?
from IPython.parallel import Client
rc = Client() # standard
rcSSH = Client(profile='ssh') # SSH (this gives the error)
rc[:].use_dill()
rcSSH[:].use_dill()
rc[:].load_balanced_view().map_sync(customInstance.function, *args) # <- this runs fine
rcSSH[:].load_balanced_view().map_sync(customInstance.function, *args) # <- this gives the error
And the error
ImportError Traceback (most recent call last)~/.local/lib/python2.7/site-packages/IPython/kernel/zmq/serialize.pyc in unpack_apply_message(bufs, g, copy)
171 args = []
172 for i in range(info['nargs']):
--> 173 arg, arg_bufs = unserialize_object(arg_bufs, g)
174 args.append(arg)
175 args = tuple(args)
~/.local/lib/python2.7/site-packages/IPython/kernel/zmq/serialize.pyc in unserialize_object(buffers, g)
110 # a zmq message
111 pobj = bytes(pobj)
--> 112 canned = pickle.loads(pobj)
113 if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
114 for c in canned:
~/.local/lib/python2.7/site-packages/dill/dill.pyc in loads(str)
158 """unpickle an object from a string"""
159 file = StringIO(str)
--> 160 return load(file)
161
162 # def dumpzs(obj, protocol=None):
~/.local/lib/python2.7/site-packages/dill/dill.pyc in load(file)
148 pik = Unpickler(file)
149 pik._main_module = _main_module
--> 150 obj = pik.load()
151 if type(obj).__module__ == _main_module.__name__: # point obj class to main
152 try: obj.__class__ == getattr(pik._main_module, type(obj).__name__)
/usr/lib/python2.7/pickle.pyc in load(self)
856 while 1:
857 key = read(1)
--> 858 dispatch[key](self)
859 except _Stop, stopinst:
860 return stopinst.value
/usr/lib/python2.7/pickle.pyc in load_global(self)
1088 module = self.readline()[:-1]
1089 name = self.readline()[:-1]
-> 1090 klass = self.find_class(module, name)
1091 self.append(klass)
1092 dispatch[GLOBAL] = load_global
~/.local/lib/python2.7/site-packages/dill/dill.pyc in find_class(self, module, name)
224 if (module, name) == ('__builtin__', '__main__'):
225 return self._main_module.__dict__ #XXX: above set w/save_module_dict
--> 226 return StockUnpickler.find_class(self, module, name)
227 pass
228
/usr/lib/python2.7/pickle.pyc in find_class(self, module, name)
1122 def find_class(self, module, name):
1123 # Subclasses may override this
-> 1124 __import__(module)
1125 mod = sys.modules[module]
1126 klass = getattr(mod, name)
ImportError: No module named fiberModes.GRINmediumArbPrec
EDIT:
I should mention that doing the following doesn't change anything:
dview.execute('import fiberModes.GRINmediumArbPrec')