run python manage.py shell occurs errors - python

E:\Users\liuzhijun\workspace\mysite>python manage.py shell
---------------------------------------------------------------------------
TypeError Python 2.7.4: E:\Python27\python.exe
Mon May 20 07:22:35 2013
A problem occured executing Python code. Here is the sequence of function
calls leading up to the error, with the most recent (innermost) call last.
E:\Users\liuzhijun\workspace\mysite\manage.py in ()
7 #!/usr/bin/env python
8 import os
9 import sys
---> 10
global execute_from_command_line = <function execute_from_command_line at 0x02AF94F0>
global sys.argv = ['manage.py', 'shell']
11 if __name__ == "__main__":
12 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
13
14 from django.core.management import execute_from_command_line
15
16 execute_from_command_line(sys.argv)
17
E:\Python27\lib\site-packages\django\core\management__init__.pyc in execute_from_command_line(argv=['manage.py', 'shell'])
428 )
429
430 # Import the project module. We add the parent directory to PYTHONPATH to
431 # avoid some of the path errors new users can have.
432 sys.path.append(os.path.join(project_directory, os.pardir))
433 import_module(project_name)
434 sys.path.pop()
435
436 return project_directory
437
438 def execute_from_command_line(argv=None):
439 """
440 A simple method that runs a ManagementUtility.
441 """
442 utility = ManagementUtility(argv)
--> 443 utility.execute()
444
445 def execute_manager(settings_mod, argv=None):
446 """
447 Like execute_from_command_line(), but for use by manage.py, a
448 project-specific django-admin.py utility.
449 """
450 warnings.warn(
451 "The 'execute_manager' function is deprecated, "
452 "you likely need to update your 'manage.py'; "
453 "please see the Django 1.4 release notes "
454 "(https://docs.djangoproject.com/en/dev/releases/1.4/).",
455 PendingDeprecationWarning)
456
457 setup_environ(settings_mod)
458 utility = ManagementUtility(argv)
......omit lots of codes......
update:
E:\Python27\lib\site-packages\django\core\management__init__.pyc in execute(self=)
367 elif args[2] == '--commands':
368 sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
369 else:
370 self.fetch_command(args[2]).print_help(self.prog_name, args[2])
371 elif subcommand == 'version':
372 sys.stdout.write(parser.get_version() + '\n')
373 # Special-cases: We want 'django-admin.py --version' and
374 # 'django-admin.py --help' to work, for backwards compatibility.
375 elif self.argv[1:] == ['--version']:
376 # LaxOptionParser already takes care of printing the version.
377 pass
378 elif self.argv[1:] in (['--help'], ['-h']):
379 parser.print_lax_help()
380 sys.stdout.write(self.main_help_text() + '\n')
381 else:
--> 382 self.fetch_command(subcommand).run_from_argv(self.argv)
383
384 def setup_environ(settings_mod, original_settings_path=None):
385 """
386 Configures the runtime environment. This can also be used by external
387 scripts wanting to set up a similar environment to manage.py.
388 Returns the project directory (assuming the passed settings module is
389 directly in the project directory).
390
391 The "original_settings_path" parameter is optional, but recommended, since
392 trying to work out the original path from the module can be problematic.
393 """
394 warnings.warn(
395 "The 'setup_environ' function is deprecated, "
396 "you likely need to update your 'manage.py'; "
397 "please see the Django 1.4 release notes "
E:\Python27\lib\site-packages\django\core\management\base.pyc in run_from_argv(self=, argv=['manage.py', 'shell'])
181 ``self.usage()``.
182
183 """
184 parser = self.create_parser(prog_name, subcommand)
185 parser.print_help()
186
187 def run_from_argv(self, argv):
188 """
189 Set up any environment changes requested (e.g., Python path
190 and Django settings), then run this command.
191
192 """
193 parser = self.create_parser(argv[0], argv[1])
194 options, args = parser.parse_args(argv[2:])
195 handle_default_options(options)
--> 196 self.execute(*args, **options.__dict__)
global s = undefined
global appname = undefined
global appname...c = undefined
197
198 def execute(self, *args, **options):
199 """
200 Try to execute this command, performing model validation if
201 needed (as controlled by the attribute
202 ``self.requires_model_validation``). If the command raises a
203 ``CommandError``, intercept it and print it sensibly to
204 stderr.
205 """
206 show_traceback = options.get('traceback', False)
207
208 # Switch to English, because django-admin.py creates database content
209 # like permissions, and those shouldn't contain any translations.
210 # But only do this if we can assume we have a working settings file,
211 # because django.utils.translation requires settings.
E:\Python27\lib\site-packages\django\core\management\base.pyc in execute(self=, *args=(), **options={'plain': None, 'pythonpath': None, 'settings': None, 'traceback': None, 'verbosity':'1'})
217 translation.activate('en-us')
218 except ImportError, e:
219 # If settings should be available, but aren't,
220 # raise the error and quit.
221 if show_traceback:
222 traceback.print_exc()
223 else:
224 sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
225 sys.exit(1)
226
227 try:
228 self.stdout = options.get('stdout', sys.stdout)
229 self.stderr = options.get('stderr', sys.stderr)
230 if self.requires_model_validation:
231 self.validate()
--> 232 output = self.handle(*args, **options)
global Rather = undefined
global than = undefined
global implementing = undefined
global handle = undefined
global subclasses = undefined
global must = undefined
global implement = undefined
233 if output:
234 if self.output_transaction:
235 # This needs to be imported here, because it relies on
236 # settings.
237 from django.db import connections, DEFAULT_DB_ALIAS
238 connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
239 if connection.ops.start_transaction_sql():
240 self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
241 self.stdout.write(output)
242 if self.output_transaction:
243 self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
244 except CommandError, e:
245 if show_traceback:
246 traceback.print_exc()
247 else:
E:\Python27\lib\site-packages\django\core\management\base.pyc in handle(self=, *args=(), **options={'plain': None, 'pythonpath': None, 'settings': None, 'traceback': None, 'verbosity': '1'})
356 """
357 A command which takes no arguments on the command line.
358
359 Rather than implementing ``handle()``, subclasses must implement
360 ``handle_noargs()``; ``handle()`` itself is overridden to ensure
361 no arguments are passed to the command.
362
363 Attempting to pass arguments will raise ``CommandError``.
364
365 """
366 args = ''
367
368 def handle(self, *args, **options):
369 if args:
370 raise CommandError("Command doesn't accept any arguments")
--> 371 return self.handle_noargs(**options)
372
373 def handle_noargs(self, **options):
374 """
375 Perform this command's actions.
376
377 """
378 raise NotImplementedError()
379
380
381
382
383
384
385
386
E:\Python27\lib\site-packages\django\core\management\commands\shell.pyc in handle_noargs(self=, **options={'plain': None, 'pythonpath': None, 'settings': None, 'traceback': None, 'verbos
ity': '1'})
39 pass
40 raise ImportError
41
42 def handle_noargs(self, **options):
43 # XXX: (Temporary) workaround for ticket #1796: force early loading of all
44 # models from installed apps.
45 from django.db.models.loading import get_models
46 get_models()
47
48 use_plain = options.get('plain', False)
49
50 try:
51 if use_plain:
52 # Don't bother loading IPython, because the user wants plain Python.
53 raise ImportError
---> 54 self.run_shell()
55 except ImportError:
56 import code
57 # Set up a dictionary to serve as the environment for the shell, so
58 # that tab completion works on objects that are imported at runtime.
59 # See ticket 5082.
60 imported_objects = {}
61 try: # Try activating rlcompleter, because it's handy.
62 import readline
63 except ImportError:
64 pass
65 else:
66 # We don't have to wrap the following import in a 'try', because
67 # we already know 'readline' was imported successfully.
68 import rlcompleter
69 readline.set_completer(rlcompleter.Completer(imported_objects).complete)
E:\Python27\lib\site-packages\django\core\management\commands\shell.pyc in run_shell(self=)
22 try:
23 from IPython.Shell import IPShell
24 shell = IPShell(argv=[])
25 shell.mainloop()
26 except ImportError:
27 # IPython not found at all, raise ImportError
28 raise
29
30 def bpython(self):
31 import bpython
32 bpython.embed()
33
34 def run_shell(self):
35 for shell in self.shells:
36 try:
---> 37 return getattr(self, shell)()
global t = undefined
global __name__t = undefined
38 except ImportError:
39 pass
40 raise ImportError
41
42 def handle_noargs(self, **options):
43 # XXX: (Temporary) workaround for ticket #1796: force early loading of all
44 # models from installed apps.
45 from django.db.models.loading import get_models
46 get_models()
47
48 use_plain = options.get('plain', False)
49
50 try:
51 if use_plain:
52 # Don't bother loading IPython, because the user wants plain Python.
E:\Python27\lib\site-packages\django\core\management\commands\shell.pyc in ipython(self=)
9 )
10 help = "Runs a Python interactive interpreter. Tries to use IPython, if it's available."
11 shells = ['ipython', 'bpython']
12 requires_model_validation = False
13
14 def ipython(self):
15 try:
16 from IPython import embed
17 embed()
18 except ImportError:
19 # IPython < 0.11
20 # Explicitly pass an empty list as arguments, because otherwise
21 # IPython would use sys.argv from this script.
22 try:
23 from IPython.Shell import IPShell
---> 24 shell = IPShell(argv=[])
global j = undefined
global d = undefined
global s = undefined
global t = undefined
25 shell.mainloop()
26 except ImportError:
27 # IPython not found at all, raise ImportError
28 raise
29
30 def bpython(self):
31 import bpython
32 bpython.embed()
33
34 def run_shell(self):
35 for shell in self.shells:
36 try:
37 return getattr(self, shell)()
38 except ImportError:
39 pass
E:\Python27\lib\site-packages\IPython\Shell.pyc in init(self=, argv=[], user_ns=None, user_global_ns=None, debug=1, shell_class=)
58 # Default timeout for waiting for multithreaded shells (in seconds)
59 GUI_TIMEOUT = 10
60
61 #-----------------------------------------------------------------------------
62 # This class is trivial now, but I want to have it in to publish a clean
63 # interface. Later when the internals are reorganized, code that uses this
64 # shouldn't have to change.
65
66 class IPShell:
67 """Create an IPython instance."""
68
69 def __init__(self,argv=None,user_ns=None,user_global_ns=None,
70 debug=1,shell_class=InteractiveShell):
71 self.IP = make_IPython(argv,user_ns=user_ns,
72 user_global_ns=user_global_ns,
---> 73 debug=debug,shell_class=shell_class)
global For = undefined
global more = undefined
global details = undefined
global see = undefined
global the = undefined
global __call__ = undefined
global method = undefined
global below. = undefined
74
75 def mainloop(self,sys_exit=0,banner=None):
76 self.IP.mainloop(banner)
77 if sys_exit:
78 sys.exit()
79
80 #-----------------------------------------------------------------------------
81 def kill_embedded(self,parameter_s=''):
82 """%kill_embedded : deactivate for good the current embedded IPython.
83
84 This function (after asking for confirmation) sets an internal flag so that
85 an embedded IPython will never activate again. This is useful to
86 permanently disable a shell that is being called inside a loop: once you've
87 figured out what you needed from it, you may then kill it and the program
88 will then continue to run without the interactive shell interfering again.
E:\Python27\lib\site-packages\IPython\ipmaker.pyc in make_IPython(argv=[], user_ns=None, user_global_ns=None, debug=1, rc_override=None, shell_class=, embedded=False, **kw={})
506 # tweaks. Basically options which affect other options. I guess this
507 # should just be written so that options are fully orthogonal and we
508 # wouldn't worry about this stuff!
509
510 if IP_rc.classic:
511 IP_rc.quick = 1
512 IP_rc.cache_size = 0
513 IP_rc.pprint = 0
514 IP_rc.prompt_in1 = '>>> '
515 IP_rc.prompt_in2 = '... '
516 IP_rc.prompt_out = ''
517 IP_rc.separate_in = IP_rc.separate_out = IP_rc.separate_out2 = '0'
518 IP_rc.colors = 'NoColor'
519 IP_rc.xmode = 'Plain'
520
--> 521 IP.pre_config_initialization()
522 # configure readline
523
524 # update exception handlers with rc file status
525 otrap.trap_out() # I don't want these messages ever.
526 IP.magic_xmode(IP_rc.xmode)
527 otrap.release_out()
528
529 # activate logging if requested and not reloading a log
530 if IP_rc.logplay:
531 IP.magic_logstart(IP_rc.logplay + ' append')
532 elif IP_rc.logfile:
533 IP.magic_logstart(IP_rc.logfile)
534 elif IP_rc.log:
535 IP.magic_logstart()
536
E:\Python27\lib\site-packages\IPython\iplib.pyc in pre_config_initialization(self=)
820 self.user_ns, # globals
821 # Skip our own frame in searching for locals:
822 sys._getframe(depth+1).f_locals # locals
823 ))
824
825 def pre_config_initialization(self):
826 """Pre-configuration init method
827
828 This is called before the configuration files are processed to
829 prepare the services the config files might need.
830
831 self.rc already has reasonable default values at this point.
832 """
833 rc = self.rc
834 try:
--> 835 self.db = pickleshare.PickleShareDB(rc.ipythondir + "/db")
global prompt = undefined
global a = undefined
global string = <module 'string' from 'E:\Python27\lib\string.pyc'>
global to = undefined
global be = undefined
global printed = undefined
global the = undefined
global user. = undefined
836 except exceptions.UnicodeDecodeError:
837 print "Your ipythondir can't be decoded to unicode!"
838 print "Please set HOME environment variable to something that"
839 print r"only has ASCII characters, e.g. c:\home"
840 print "Now it is",rc.ipythondir
841 sys.exit()
842 self.shadowhist = IPython.history.ShadowHist(self.db)
843
844 def post_config_initialization(self):
845 """Post configuration init method
846
847 This is called after the configuration files have been processed to
848 'finalize' the initialization."""
849
850 rc = self.rc
E:\Python27\lib\site-packages\IPython\Extensions\pickleshare.pyc in init(self=PickleShareDB('C:\Users\liuzhijun_ipython\db'),root=u'C:\Users\liuzhijun\_ipython/db')
38 import cPickle as pickle
39 import UserDict
40 import warnings
41 import glob
42
43 def gethashfile(key):
44 return ("%02x" % abs(hash(key) % 256))[-2:]
45
46 _sentinel = object()
47
48 class PickleShareDB(UserDict.DictMixin):
49 """ The main 'connection' object for PickleShare database """
50 def __init__(self,root):
51 """ Return a db object that will manage the specied directory"""
52 self.root = Path(root).expanduser().abspath()
---> 53 if not self.root.isdir():
54 self.root.makedirs()
55 # cache has { 'key' : (obj, orig_mod_time) }
56 self.cache = {}
57
58
59 def __getitem__(self,key):
60 """ db['key'] reading """
61 fil = self.root / key
62 try:
63 mtime = (fil.stat()[stat.ST_MTIME])
64 except OSError:
65 raise KeyError(key)
66
67 if fil in self.cache and mtime == self.cache[fil][1]:
68 return self.cache[fil][0]
TypeError: _isdir() takes exactly 1 argument (0 given)
Oops, IPython crashed. We do our best to make it stable, but...
A crash report was automatically generated with the following information:
- A verbatim copy of the crash traceback.
- A copy of your input history during this session.
- Data on your current IPython configuration.
It was left in the file named:
'C:\Users\liuzhijun_ipython\IPython_crash_report.txt'
If you can email this file to the developers, the information in it will help
them in understanding and correcting the problem.
You can mail it to: Fernando Perez at fperez.net#gmail.com
with the subject 'IPython Crash Report'.
If you want to do it now, the following command will work (under Unix):
mail -s 'IPython Crash Report' fperez.net#gmail.com < C:\Users\liuzhijun_ipython\IPython_crash_report.txt
To ensure accurate tracking of this issue, please file a report about it at:
https://bugs.launchpad.net/ipython/+filebug
Press enter to exit:
system env:windows8

Related

Mlflow "load_model" goes in deadlock

Trying to load a model from past run in mlflow, in jupyterlab, never finishes. After waiting for hours, interrupting the run throws the below state.
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
Input In [21], in <cell line: 2>()
1 logged_model = 'runs:/7f6932baef144fa69847ba11ef66f8e6/model/'
----> 2 loaded_model = mlflow.tensorflow.load_model(logged_model)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/tensorflow/__init__.py:397, in load_model(model_uri, dst_path)
360 def load_model(model_uri, dst_path=None):
361 """
362 Load an MLflow model that contains the TensorFlow flavor from the specified path.
363
(...)
395 for _, output_signature in signature_definition.outputs.items()]
396 """
--> 397 local_model_path = _download_artifact_from_uri(artifact_uri=model_uri, output_path=dst_path)
398 flavor_conf = _get_flavor_configuration(local_model_path, FLAVOR_NAME)
399 _add_code_from_conf_to_system_path(local_model_path, flavor_conf)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/tracking/artifact_utils.py:95, in _download_artifact_from_uri(artifact_uri, output_path)
92 parsed_uri = parsed_uri._replace(path=posixpath.dirname(parsed_uri.path))
93 root_uri = prefix + urllib.parse.urlunparse(parsed_uri)
---> 95 return get_artifact_repository(artifact_uri=root_uri).download_artifacts(
96 artifact_path=artifact_path, dst_path=output_path
97 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/runs_artifact_repo.py:125, in RunsArtifactRepository.download_artifacts(self, artifact_path, dst_path)
110 def download_artifacts(self, artifact_path, dst_path=None):
111 """
112 Download an artifact file or directory to a local directory if applicable, and return a
113 local path for it.
(...)
123 :return: Absolute path of the local filesystem location containing the desired artifacts.
124 """
--> 125 return self.repo.download_artifacts(artifact_path, dst_path)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:242, in ArtifactRepository.download_artifacts(self, artifact_path, dst_path)
240 # Check if the artifacts points to a directory
241 if self._is_directory(artifact_path):
--> 242 dst_local_path, inflight_downloads = async_download_artifact_dir(
243 src_artifact_dir_path=artifact_path, dst_local_dir_path=dst_path
244 )
245 else:
246 inflight_downloads = async_download_artifact(
247 src_artifact_path=artifact_path, dst_local_dir_path=dst_path
248 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:208, in ArtifactRepository.download_artifacts.<locals>.async_download_artifact_dir(src_artifact_dir_path, dst_local_dir_path)
206 for file_info in dir_content:
207 if file_info.is_dir:
--> 208 inflight_downloads += async_download_artifact_dir(
209 src_artifact_dir_path=file_info.path,
210 dst_local_dir_path=dst_local_dir_path,
211 )[2]
212 else:
213 inflight_downloads += async_download_artifact(
214 src_artifact_path=file_info.path,
215 dst_local_dir_path=dst_local_dir_path,
216 )
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/artifact_repo.py:199, in ArtifactRepository.download_artifacts.<locals>.async_download_artifact_dir(src_artifact_dir_path, dst_local_dir_path)
195 local_dir = os.path.join(dst_local_dir_path, src_artifact_dir_path)
196 inflight_downloads = []
197 dir_content = [ # prevent infinite loop, sometimes the dir is recursively included
198 file_info
--> 199 for file_info in self.list_artifacts(src_artifact_dir_path)
200 if file_info.path != "." and file_info.path != src_artifact_dir_path
201 ]
202 if not dir_content: # empty dir
203 if not os.path.exists(local_dir):
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/mlflow/store/artifact/sftp_artifact_repo.py:94, in SFTPArtifactRepository.list_artifacts(self, path)
92 artifact_dir = self.path
93 list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir
---> 94 if not self.sftp.isdir(list_dir):
95 return []
96 artifact_files = self.sftp.listdir(list_dir)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/pysftp/__init__.py:652, in Connection.isdir(self, remotepath)
650 self._sftp_connect()
651 try:
--> 652 result = S_ISDIR(self._sftp.stat(remotepath).st_mode)
653 except IOError: # no such file
654 result = False
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:493, in SFTPClient.stat(self, path)
491 path = self._adjust_cwd(path)
492 self._log(DEBUG, "stat({!r})".format(path))
--> 493 t, msg = self._request(CMD_STAT, path)
494 if t != CMD_ATTRS:
495 raise SFTPError("Expected attributes")
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:822, in SFTPClient._request(self, t, *arg)
820 def _request(self, t, *arg):
821 num = self._async_request(type(None), t, *arg)
--> 822 return self._read_response(num)
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp_client.py:852, in SFTPClient._read_response(self, waitfor)
850 while True:
851 try:
--> 852 t, data = self._read_packet()
853 except EOFError as e:
854 raise SSHException("Server connection dropped: {}".format(e))
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp.py:201, in BaseSFTP._read_packet(self)
200 def _read_packet(self):
--> 201 x = self._read_all(4)
202 # most sftp servers won't accept packets larger than about 32k, so
203 # anything with the high byte set (> 16MB) is just garbage.
204 if byte_ord(x[0]):
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/sftp.py:185, in BaseSFTP._read_all(self, n)
183 break
184 else:
--> 185 x = self.sock.recv(n)
187 if len(x) == 0:
188 raise EOFError()
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/channel.py:699, in Channel.recv(self, nbytes)
686 """
687 Receive data from the channel. The return value is a string
688 representing the data received. The maximum amount of data to be
(...)
696 if no data is ready before the timeout set by `settimeout`.
697 """
698 try:
--> 699 out = self.in_buffer.read(nbytes, self.timeout)
700 except PipeTimeout:
701 raise socket.timeout()
File ~/.conda/envs/tensorflow/lib/python3.8/site-packages/paramiko/buffered_pipe.py:160, in BufferedPipe.read(self, nbytes, timeout)
158 while (len(self._buffer) == 0) and not self._closed:
159 then = time.time()
--> 160 self._cv.wait(timeout)
161 if timeout is not None:
162 timeout -= time.time() - then
File ~/.conda/envs/tensorflow/lib/python3.8/threading.py:302, in Condition.wait(self, timeout)
300 try: # restore state no matter what (e.g., KeyboardInterrupt)
301 if timeout is None:
--> 302 waiter.acquire()
303 gotit = True
304 else:
KeyboardInterrupt:
The mlflow tracking server is working properly for all the other operations. I am able to log params, metrics and artifacts. But I am not able to load a model or retrive any of the artifacts.
Update:
Looks like a bug as per https://github.com/mlflow/mlflow/issues/5656.
Please upgrade mlflow, their is some issue with version 1.26.0
pip install mlflow==1.27.0
Assuming you are also using above version

pytorch RuntimeError: CUDA error: device-side assert triggered

I've a notebook on google colab that fails with following error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
---> 94 raise e
95 finally: cb_handler.on_train_end(exception)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
83 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 84 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
85 if cb_handler.on_batch_end(loss): break
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
24 if opt is not None:
---> 25 loss = cb_handler.on_backward_begin(loss)
26 loss.backward()
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_backward_begin(self, loss)
223 for cb in self.callbacks:
--> 224 a = cb.on_backward_begin(**self.state_dict)
225 if a is not None: self.state_dict['last_loss'] = a
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in on_backward_begin(self, smooth_loss, **kwargs)
266 if self.pbar is not None and hasattr(self.pbar,'child'):
--> 267 self.pbar.child.comment = f'{smooth_loss:.4f}'
268
/usr/local/lib/python3.6/dist-packages/torch/tensor.py in __format__(self, format_spec)
377 if self.dim() == 0:
--> 378 return self.item().__format__(format_spec)
379 return object.__format__(self, format_spec)
RuntimeError: CUDA error: device-side assert triggered
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-33-dd390b1c8108> in <module>()
----> 1 lr_find(learn)
2 learn.recorder.plot()
/usr/local/lib/python3.6/dist-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
26 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
27 a = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 28 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
29
30 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162 callbacks=self.callbacks+callbacks)
163
164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
94 raise e
---> 95 finally: cb_handler.on_train_end(exception)
96
97 loss_func_name2activ = {'cross_entropy_loss': partial(F.softmax, dim=1), 'nll_loss': torch.exp, 'poisson_nll_loss': torch.exp,
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_train_end(self, exception)
254 def on_train_end(self, exception:Union[bool,Exception])->None:
255 "Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
--> 256 self('train_end', exception=exception)
257
258 class AverageMetric(Callback):
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in <listcomp>(.0)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callbacks/lr_finder.py in on_train_end(self, **kwargs)
45 # restore the valid_dl we turned of on `__init__`
46 self.data.valid_dl = self.valid_dl
---> 47 self.learn.load('tmp')
48 if hasattr(self.learn.model, 'reset'): self.learn.model.reset()
49 print('LR Finder complete, type {learner_name}.recorder.plot() to see the graph.')
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in load(self, name, device)
202 "Load model `name` from `self.model_dir` using `device`, defaulting to `self.data.device`."
203 if device is None: device = self.data.device
--> 204 self.model.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device))
205 return self
206
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in load(f, map_location, pickle_module)
356 f = open(f, 'rb')
357 try:
--> 358 return _load(f, map_location, pickle_module)
359 finally:
360 if new_fd:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _load(f, map_location, pickle_module)
527 unpickler = pickle_module.Unpickler(f)
528 unpickler.persistent_load = persistent_load
--> 529 result = unpickler.load()
530
531 deserialized_storage_keys = pickle_module.load(f)
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in persistent_load(saved_id)
493 if root_key not in deserialized_objects:
494 deserialized_objects[root_key] = restore_location(
--> 495 data_type(size), location)
496 storage = deserialized_objects[root_key]
497 if view_metadata is not None:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in restore_location(storage, location)
376 elif isinstance(map_location, torch.device):
377 def restore_location(storage, location):
--> 378 return default_restore_location(storage, str(map_location))
379 else:
380 def restore_location(storage, location):
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in default_restore_location(storage, location)
102 def default_restore_location(storage, location):
103 for _, _, fn in _package_registry:
--> 104 result = fn(storage, location)
105 if result is not None:
106 return result
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _cuda_deserialize(obj, location)
84 'to an existing device.'.format(
85 device, torch.cuda.device_count()))
---> 86 return obj.cuda(device)
87
88
/usr/local/lib/python3.6/dist-packages/torch/_utils.py in _cuda(self, device, non_blocking, **kwargs)
74 else:
75 new_type = getattr(torch.cuda, self.__class__.__name__)
---> 76 return new_type(self.size()).copy_(self, non_blocking)
77
78
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/aten/src/THC/generic/THCTensorCopy.cpp:20
There is no information about the real cause, I tried to get the stack trace by forcing cuda to run on one gpu (as suggested here) using a cell like this
!export CUDA_LAUNCH_BLOCKING=1
But this does not seem to work, still having the same error with.
Is there another way that works with Google Colab?
Be sure that your targets values starts from zero to number of classes - 1. Ex: you have 100 classification class so your target should be from 0 to 99
!export FOO=blah is usually not useful to run in a notebook because ! means run the following command in a sub-shell, so the effect of the statement is gone by the time the ! returns.
You might have more success by storing your python code in a file and then executing that file in a subshell:
In one cell:
%%writefile foo.py
[...your code...]
In the next cell:
!export CUDA_LAUNCH_BLOCKING=1; python3 foo.py
(or s/python3/python2/ if you're writing py2)
Switch Hardware Accelerator Type to "None" under Runtime->Change Runtime Type . This should give you a more meaningful error message.
The proper way to set environmental variables in Google Colab is to use os:
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
Using the os library will allow you to set whatever environmental variables you need. Setting CUDA_LAUNCH_BLOCKING this way enables proper CUDA tracebacks in Google Colab.

SageMath notebook is not opening

I'm trying to open the Sage's notebook, but it isn't working.
I have no idea where this error came from, because the notebook was working this week. I guess it just popped up out of nowhere.
The message's error is:
sage: notebook()
---------------------------------------------------------------------------
EnvironmentError Traceback (most recent call last)
<ipython-input-4-3728cb3d7c7d> in <module>()
----> 1 notebook()
/home/jerome/opt/SageMath/src/sage/misc/lazy_import.pyx in sage.misc.lazy_import.LazyImport.__call__ (/home/jerome/opt/SageMath/src/build/cythonized/sage/misc/lazy_import.c:3634)()
384 True
385 """
--> 386 return self._get_object()(*args, **kwds)
387
388 def __repr__(self):
/home/jerome/opt/SageMath/src/sage/misc/lazy_import.pyx in sage.misc.lazy_import.LazyImport._get_object (/home/jerome/opt/SageMath/src/build/cythonized/sage/misc/lazy_import.c:2241)()
244 elif self._at_startup and not startup_guard:
245 print('Option ``at_startup=True`` for lazy import {0} not needed anymore'.format(self._name))
--> 246 self._object = getattr(__import__(self._module, {}, {}, [self._name]), self._name)
247 alias = self._as_name or self._name
248 if self._deprecation is not None:
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/notebook/notebook_object.py in <module>()
15 import time, os, shutil, signal, tempfile
16
---> 17 import notebook as _notebook
18
19 import run_notebook
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/notebook/notebook.py in <module>()
33
34 # Sage libraries
---> 35 from sagenb.misc.misc import (pad_zeros, cputime, tmp_dir, load, save,
36 ignore_nonexistent_files, unicode_str)
37
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/misc/misc.py in <module>()
379
380 try:
--> 381 from sage.misc.cython import cython
382 except ImportError:
383 #stub
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sage/misc/cython.py in <module>()
28
29 # CBLAS can be one of multiple implementations
---> 30 cblas_pc = pkgconfig.parse('cblas')
31 cblas_libs = list(cblas_pc['libraries'])
32 cblas_library_dirs = list(cblas_pc['library_dirs'])
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in parse(packages)
185
186 for package in packages.split():
--> 187 for k, v in parse_package(package).items():
188 result[k].update(v)
189
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in parse_package(package)
158
159 # Execute the query to pkg-config and clean the result.
--> 160 out = _query(package, '--cflags --libs')
161 out = out.replace('\\"', '')
162
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in _wrapper(*args, **kwargs)
56 return func(*args, **kwargs)
57 except OSError:
---> 58 raise EnvironmentError("pkg-config is not installed")
59
60 return _wrapper
EnvironmentError: pkg-config is not installed
If you guys can help me, I'll be very thankful!

Using Sacred Module with iPython

I am trying to set up sacred for Python and I am going through the tutorial. I was able to set up sacred using pip install sacred with no issues. I am having trouble running the basic code:
from sacred import Experiment
ex = Experiment("hello_world")
Running this code returns the a ValueError:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-25-66f549cfb192> in <module>()
1 from sacred import Experiment
2
----> 3 ex = Experiment("hello_world")
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/experiment.pyc in __init__(self, name, ingredients)
42 super(Experiment, self).__init__(path=name,
43 ingredients=ingredients,
---> 44 _caller_globals=caller_globals)
45 self.default_command = ""
46 self.command(print_config, unobserved=True)
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/ingredient.pyc in __init__(self, path, ingredients, _caller_globals)
48 self.doc = _caller_globals.get('__doc__', "")
49 self.sources, self.dependencies = \
---> 50 gather_sources_and_dependencies(_caller_globals)
51
52 # =========================== Decorators ==================================
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/dependencies.pyc in gather_sources_and_dependencies(globs)
204 def gather_sources_and_dependencies(globs):
205 dependencies = set()
--> 206 main = Source.create(globs.get('__file__'))
207 sources = {main}
208 experiment_path = os.path.dirname(main.filename)
/Users/ryandevera/anaconda/lib/python2.7/site-packages/sacred/dependencies.pyc in create(filename)
61 if not filename or not os.path.exists(filename):
62 raise ValueError('invalid filename or file not found "{}"'
---> 63 .format(filename))
64
65 mainfile = get_py_file_if_possible(os.path.abspath(filename))
ValueError: invalid filename or file not found "None"
I am not sure why this error is returning. The documentation does not say anything about setting up an Experiment file prior to running the code. Any help would be greatly appreciated!
The traceback given indicates that the constructor for Experiment searches its namespace to find the file in which its defined.
Thus, to make the example work, place the example code into a file and run that file directly.
If you are using ipython, then you could always try using the %%python command, which will effectively capture the code you give it into a file before running it (in a separate python process).
According to the docs, if you're in IPython/Jupyter, you can allow the Experiment to run in a non-reproducible interactive environment:
ex = Experiment('jupyter_ex', interactive=True)
https://sacred.readthedocs.io/en/latest/experiment.html#run-the-experiment
The docs say it nicely (TL;DR: sacred checks this for you and fails in order to warn you)
Warning
By default, Sacred experiments will fail if run in an interactive
environment like a REPL or a Jupyter Notebook. This is an intended
security measure since in these environments reproducibility cannot be
ensured. If needed, this safeguard can be deactivated by passing
interactive=True to the experiment like this:
ex = Experiment('jupyter_ex', interactive=True)
Setting interactive=True doesn't work if you run the notebook as a script through ipython.
$ ipython code.ipynb
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[1], line 1
----> 1 ex = Experiment("image_classification", interactive=True)
2 ex.observers.append(NeptuneObserver(run=neptune_run))
File ~\miniconda3\envs\py38\lib\site-packages\sacred\experiment.py:119, in Experiment.__init__(self, name, ingredients, interactive, base_dir, additional_host_info, additional_cli_options, save_git_info)
117 elif name.endswith(".pyc"):
118 name = name[:-4]
--> 119 super().__init__(
120 path=name,
121 ingredients=ingredients,
122 interactive=interactive,
123 base_dir=base_dir,
124 _caller_globals=caller_globals,
125 save_git_info=save_git_info,
126 )
127 self.default_command = None
128 self.command(print_config, unobserved=True)
File ~\miniconda3\envs\py38\lib\site-packages\sacred\ingredient.py:75, in Ingredient.__init__(self, path, ingredients, interactive, _caller_globals, base_dir, save_git_info)
69 self.save_git_info = save_git_info
70 self.doc = _caller_globals.get("__doc__", "")
71 (
72 self.mainfile,
73 self.sources,
74 self.dependencies,
---> 75 ) = gather_sources_and_dependencies(
76 _caller_globals, save_git_info, self.base_dir
77 )
78 if self.mainfile is None and not interactive:
79 raise RuntimeError(
80 "Defining an experiment in interactive mode! "
81 "The sourcecode cannot be stored and the "
82 "experiment won't be reproducible. If you still"
83 " want to run it pass interactive=True"
84 )
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:725, in gather_sources_and_dependencies(globs, save_git_info, base_dir)
723 def gather_sources_and_dependencies(globs, save_git_info, base_dir=None):
724 """Scan the given globals for modules and return them as dependencies."""
--> 725 experiment_path, main = get_main_file(globs, save_git_info)
727 base_dir = base_dir or experiment_path
729 gather_sources = source_discovery_strategies[SETTINGS["DISCOVER_SOURCES"]]
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:596, in get_main_file(globs, save_git_info)
594 main = None
595 else:
--> 596 main = Source.create(globs.get("__file__"), save_git_info)
461 return Source(main_file, get_digest(main_file), repo, commit, is_dirty)
File ~\miniconda3\envs\py38\lib\site-packages\sacred\dependencies.py:382, in get_py_file_if_possible(pyc_name)
380 if pyc_name.endswith((".py", ".so", ".pyd")):
381 return pyc_name
--> 382 assert pyc_name.endswith(".pyc")
383 non_compiled_file = pyc_name[:-1]
384 if os.path.exists(non_compiled_file):
sacred==0.8.2

ipython parallel wokrs over default set up but not over ssh, why?

I have created a set up to connect via ssh to multiple machines. this is my configuration file:
c = get_config()
c.IPClusterEngines.engine_launcher_class = 'SSHEngineSetLauncher'
Clusters = [36,31,1,24,10,11,4,3,6,26,7,2,9]
c.SSHEngineSetLauncher.engines = dict( [ ('hostname%02d'%x,7) for x in Clusters ] )
c.SSHEngineSetLauncher.engine_args = ['--profile-dir=~/.ipython/profile_ssh']
c.LocalControllerLauncher.controller_args = ["--ip='*'"]
I have a custom class and get the error below. The thing I cannot understand is that if I connect to the standard ipcluster profile I get no error. Why the difference?
from IPython.parallel import Client
rc = Client() # standard
rcSSH = Client(profile='ssh') # SSH (this gives the error)
rc[:].use_dill()
rcSSH[:].use_dill()
rc[:].load_balanced_view().map_sync(customInstance.function, *args) # <- this runs fine
rcSSH[:].load_balanced_view().map_sync(customInstance.function, *args) # <- this gives the error
And the error
ImportError Traceback (most recent call last)~/.local/lib/python2.7/site-packages/IPython/kernel/zmq/serialize.pyc in unpack_apply_message(bufs, g, copy)
171 args = []
172 for i in range(info['nargs']):
--> 173 arg, arg_bufs = unserialize_object(arg_bufs, g)
174 args.append(arg)
175 args = tuple(args)
~/.local/lib/python2.7/site-packages/IPython/kernel/zmq/serialize.pyc in unserialize_object(buffers, g)
110 # a zmq message
111 pobj = bytes(pobj)
--> 112 canned = pickle.loads(pobj)
113 if istype(canned, sequence_types) and len(canned) < MAX_ITEMS:
114 for c in canned:
~/.local/lib/python2.7/site-packages/dill/dill.pyc in loads(str)
158 """unpickle an object from a string"""
159 file = StringIO(str)
--> 160 return load(file)
161
162 # def dumpzs(obj, protocol=None):
~/.local/lib/python2.7/site-packages/dill/dill.pyc in load(file)
148 pik = Unpickler(file)
149 pik._main_module = _main_module
--> 150 obj = pik.load()
151 if type(obj).__module__ == _main_module.__name__: # point obj class to main
152 try: obj.__class__ == getattr(pik._main_module, type(obj).__name__)
/usr/lib/python2.7/pickle.pyc in load(self)
856 while 1:
857 key = read(1)
--> 858 dispatch[key](self)
859 except _Stop, stopinst:
860 return stopinst.value
/usr/lib/python2.7/pickle.pyc in load_global(self)
1088 module = self.readline()[:-1]
1089 name = self.readline()[:-1]
-> 1090 klass = self.find_class(module, name)
1091 self.append(klass)
1092 dispatch[GLOBAL] = load_global
~/.local/lib/python2.7/site-packages/dill/dill.pyc in find_class(self, module, name)
224 if (module, name) == ('__builtin__', '__main__'):
225 return self._main_module.__dict__ #XXX: above set w/save_module_dict
--> 226 return StockUnpickler.find_class(self, module, name)
227 pass
228
/usr/lib/python2.7/pickle.pyc in find_class(self, module, name)
1122 def find_class(self, module, name):
1123 # Subclasses may override this
-> 1124 __import__(module)
1125 mod = sys.modules[module]
1126 klass = getattr(mod, name)
ImportError: No module named fiberModes.GRINmediumArbPrec
EDIT:
I should mention that doing the following doesn't change anything:
dview.execute('import fiberModes.GRINmediumArbPrec')

Categories