I am trying to import downloaded MNIST data to Jupyter Notebook, but when I try to run the code, it says that it doesn't have permissions.
How do I solve the problem?
pip install python-mnist
from mnist import MNIST
mndata = MNIST('C:\\Users\\username\\path\\to\\the\\samples')
images, labels = mndata.load_training()
---------------------------------------------------------------------------
PermissionError Traceback (most recent call last)
<ipython-input-2-1df33381d649> in <module>
3 mndata = MNIST('C:\\Users\\username\\path\\to\\the\\samples')
4
----> 5 images, labels = mndata.load_training()
D:\Anaconda\lib\site-packages\mnist\loader.py in load_training(self)
124 def load_training(self):
125 ims, labels = self.load(os.path.join(self.path, self.train_img_fname),
--> 126 os.path.join(self.path, self.train_lbl_fname))
127
128 self.train_images = self.process_images(ims)
D:\Anaconda\lib\site-packages\mnist\loader.py in load(self, path_img, path_lbl, batch)
245 '(start_point, batch_size)')
246
--> 247 with self.opener(path_lbl, 'rb') as file:
248 magic, size = struct.unpack(">II", file.read(8))
249 if magic != 2049:
D:\Anaconda\lib\site-packages\mnist\loader.py in opener(self, path_fn, *args, **kwargs)
237 return gzip.open(path_fn + '.gz', *args, **kwargs)
238 else:
--> 239 return open(path_fn, *args, **kwargs)
240
241 def load(self, path_img, path_lbl, batch=None):
PermissionError: [Errno 13] Permission denied: 'C:\\Users\\username\\path\\to\\the\\samples\\train-labels-idx1-ubyte'
Related
I try to run the git repository : https://github.com/DinoMan/speech-driven-animation
on jupyter.
I followed the steps indicated but I encountered an error when I wanted to run example :
Here is my code amd the error :
import sda
va = sda.VideoAnimator(gpu=-1, model_path="crema")# Instantiate the animator
vid, aud = va("example/image.bmp", "example/audio.wav")
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
Input In [7], in <cell line: 1>()
----> 1 vid, aud = va("example/image.bmp", "example/audio.wav")
File ~\IVERSE\sda\sda.py:229, in VideoAnimator.__call__(self, img, audio, fs, aligned)
226 frame = img
228 if not aligned:
--> 229 frame = self.preprocess_img(frame)
231 if isinstance(audio, str): # if we have a path then grab the audio clip
232 info = mediainfo(audio)
File ~\IVERSE\sda\sda.py:191, in VideoAnimator.preprocess_img(self, img)
190 def preprocess_img(self, img):
--> 191 src = self.fa.get_landmarks(img)[0][self.stablePntsIDs, :]
192 dst = self.mean_face[self.stablePntsIDs, :]
193 tform = tf.estimate_transform('similarity', src, dst) # find the transformation matrix
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\face_alignment\api.py:110, in FaceAlignment.get_landmarks(self, image_or_path, detected_faces, return_bboxes, return_landmark_score)
98 def get_landmarks(self, image_or_path, detected_faces=None, return_bboxes=False, return_landmark_score=False):
99 """Deprecated, please use get_landmarks_from_image
100
101 Arguments:
(...)
108 return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.
109 """
--> 110 return self.get_landmarks_from_image(image_or_path, detected_faces, return_bboxes, return_landmark_score)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\torch\autograd\grad_mode.py:27, in _DecoratorContextManager.__call__.<locals>.decorate_context(*args, **kwargs)
24 #functools.wraps(func)
25 def decorate_context(*args, **kwargs):
26 with self.clone():
---> 27 return func(*args, **kwargs)
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\face_alignment\api.py:141, in FaceAlignment.get_landmarks_from_image(self, image_or_path, detected_faces, return_bboxes, return_landmark_score)
138 image = get_image(image_or_path)
140 if detected_faces is None:
--> 141 detected_faces = self.face_detector.detect_from_image(image.copy())
143 if len(detected_faces) == 0:
144 warnings.warn("No faces were detected.")
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\face_alignment\detection\sfd\sfd_detector.py:45, in SFDDetector.detect_from_image(self, tensor_or_path)
42 def detect_from_image(self, tensor_or_path):
43 image = self.tensor_or_path_to_ndarray(tensor_or_path)
---> 45 bboxlist = detect(self.face_detector, image, device=self.device)[0]
46 bboxlist = self._filter_bboxes(bboxlist)
48 return bboxlist
File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\face_alignment\detection\sfd\detect.py:15, in detect(net, img, device)
12 # Creates a batch of 1
13 img = np.expand_dims(img, 0)
---> 15 img = torch.from_numpy(img.copy()).to(device, dtype=torch.float32)
17 return batch_detect(net, img, device)
RuntimeError: Numpy is not available
I uploaded models via Google drive, I tried to uninstall numpy and reinstall a lower version (1.19) but this doesn't work too..
The error was:
note: This error originates from a subprocess, and is likely not a problem with pip.
error: metadata-generation-failed
Could you help me understand what can I do to resolve the problem please ?
Thanks
After successfully installing Layout Parser in Windows, getting the below OS Error.
Code Used:
model = lp.Detectron2LayoutModel(config_path="lp://PubLayNet/mask_rcnn_X_101_32x8d_FPN_3x/config",
extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8],
label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"})
Using layout parser, trying to extract the content from image. But when I try to load models in Layout parser, it fails with the below error
---------------------------------------------------------------------------
OSError Traceback (most recent call last)
~\AppData\Local\Temp\ipykernel_178664\3254664512.py in <module>
1 model = lp.Detectron2LayoutModel(config_path="lp://PubLayNet/mask_rcnn_X_101_32x8d_FPN_3x/config",
2 extra_config=["MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.8],
----> 3 label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"})
4 # Load the deep layout model from the layoutparser API
5 # For all the supported model, please check the Model
~\Anaconda3\envs\layout\lib\site-packages\layoutparser\models\detectron2\layoutmodel.py in __init__(self, config_path, model_path, label_map, extra_config, enforce_cpu, device)
89 config_path, model_path, allow_empty_path=True
90 )
---> 91 config_path = PathManager.get_local_path(config_path)
92
93 if label_map is None:
~\Anaconda3\envs\layout\lib\site-packages\iopath\common\file_io.py in get_local_path(self, path, force, **kwargs)
1195 handler = self.__get_path_handler(path) # type: ignore
1196 try:
-> 1197 bret = handler._get_local_path(path, force=force, **kwargs)
1198 except TypeError:
1199 bret = handler._get_local_path(path, **kwargs)
~\Anaconda3\envs\layout\lib\site-packages\layoutparser\models\detectron2\catalog.py in _get_local_path(self, path, **kwargs)
134 else:
135 raise ValueError(f"Unknown data_type {data_type}")
--> 136 return PathManager.get_local_path(model_url, **kwargs)
137
138 def _open(self, path, mode="r", **kwargs):
~\Anaconda3\envs\layout\lib\site-packages\iopath\common\file_io.py in get_local_path(self, path, force, **kwargs)
1195 handler = self.__get_path_handler(path) # type: ignore
1196 try:
-> 1197 bret = handler._get_local_path(path, force=force, **kwargs)
1198 except TypeError:
1199 bret = handler._get_local_path(path, **kwargs)
~\Anaconda3\envs\layout\lib\site-packages\iopath\common\file_io.py in _get_local_path(self, path, force, cache_dir, **kwargs)
792
793 cached = os.path.join(dirname, filename)
--> 794 with file_lock(cached):
795 if not os.path.isfile(cached):
796 logger.info("Downloading {} ...".format(path))
~\Anaconda3\envs\layout\lib\site-packages\portalocker\utils.py in __enter__(self)
155
156 def __enter__(self):
--> 157 return self.acquire()
158
159 def __exit__(self,
~\Anaconda3\envs\layout\lib\site-packages\portalocker\utils.py in acquire(self, timeout, check_interval, fail_when_locked)
237
238 # Get a new filehandler
--> 239 fh = self._get_fh()
240
241 def try_close(): # pragma: no cover
~\Anaconda3\envs\layout\lib\site-packages\portalocker\utils.py in _get_fh(self)
287 def _get_fh(self) -> typing.IO:
288 '''Get a new filehandle'''
--> 289 return open(self.filename, self.mode, **self.file_open_kwargs)
290
291 def _get_lock(self, fh: typing.IO) -> typing.IO:
OSError: [Errno 22] Invalid argument: 'C:\\Users\\vchinna/.torch/iopath_cache\\s/nau5ut6zgthunil\\config.yaml?dl=1.lock'
Not sure whether it is a kind of lock or something.
Please help
Even I got a similar error. I tried out manually some work around in Windows.
I am using your case as example: OSError: [Errno 22] Invalid argument: 'C:\Users\vchinna/.torch/iopath_cache\s/nau5ut6zgthunil\config.yaml?dl=1.lock'
Please follow the following process.
Navigate to C:\Users\vchinna/.torch/iopath_cache\s/nau5ut6zgthunil\config.yaml
Open that config.yaml file
Scroll down to WEIGHTS: https://www.dropbox.com/s/h7th27jfv19rxiy/model_final.pth?dl=1 should be around 265 line.
Copy that link and paste it in your browser, a 'model_final.pth' will be downloaded. Copy this file to your desired folder.
Now replace the path to WEIGHTS: your_desired_folder/model_final.pth
Save it and run the code it works!
But there is a small work around I think before you do this (if you have not done)
[iopath work around][1]
https://github.com/Layout-Parser/layout-parser/issues/15 (Github link to the issue)
I've a notebook on google colab that fails with following error
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
---> 94 raise e
95 finally: cb_handler.on_train_end(exception)
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
83 xb, yb = cb_handler.on_batch_begin(xb, yb)
---> 84 loss = loss_batch(model, xb, yb, loss_func, opt, cb_handler)
85 if cb_handler.on_batch_end(loss): break
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in loss_batch(model, xb, yb, loss_func, opt, cb_handler)
24 if opt is not None:
---> 25 loss = cb_handler.on_backward_begin(loss)
26 loss.backward()
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_backward_begin(self, loss)
223 for cb in self.callbacks:
--> 224 a = cb.on_backward_begin(**self.state_dict)
225 if a is not None: self.state_dict['last_loss'] = a
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in on_backward_begin(self, smooth_loss, **kwargs)
266 if self.pbar is not None and hasattr(self.pbar,'child'):
--> 267 self.pbar.child.comment = f'{smooth_loss:.4f}'
268
/usr/local/lib/python3.6/dist-packages/torch/tensor.py in __format__(self, format_spec)
377 if self.dim() == 0:
--> 378 return self.item().__format__(format_spec)
379 return object.__format__(self, format_spec)
RuntimeError: CUDA error: device-side assert triggered
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-33-dd390b1c8108> in <module>()
----> 1 lr_find(learn)
2 learn.recorder.plot()
/usr/local/lib/python3.6/dist-packages/fastai/train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, **kwargs)
26 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
27 a = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 28 learn.fit(a, start_lr, callbacks=[cb], **kwargs)
29
30 def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False)->Learner:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(self, epochs, lr, wd, callbacks)
160 callbacks = [cb(self) for cb in self.callback_fns] + listify(callbacks)
161 fit(epochs, self.model, self.loss_func, opt=self.opt, data=self.data, metrics=self.metrics,
--> 162 callbacks=self.callbacks+callbacks)
163
164 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in fit(epochs, model, loss_func, opt, data, callbacks, metrics)
93 exception = e
94 raise e
---> 95 finally: cb_handler.on_train_end(exception)
96
97 loss_func_name2activ = {'cross_entropy_loss': partial(F.softmax, dim=1), 'nll_loss': torch.exp, 'poisson_nll_loss': torch.exp,
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in on_train_end(self, exception)
254 def on_train_end(self, exception:Union[bool,Exception])->None:
255 "Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
--> 256 self('train_end', exception=exception)
257
258 class AverageMetric(Callback):
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in __call__(self, cb_name, call_mets, **kwargs)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callback.py in <listcomp>(.0)
185 "Call through to all of the `CallbakHandler` functions."
186 if call_mets: [getattr(met, f'on_{cb_name}')(**self.state_dict, **kwargs) for met in self.metrics]
--> 187 return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
188
189 def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
/usr/local/lib/python3.6/dist-packages/fastai/callbacks/lr_finder.py in on_train_end(self, **kwargs)
45 # restore the valid_dl we turned of on `__init__`
46 self.data.valid_dl = self.valid_dl
---> 47 self.learn.load('tmp')
48 if hasattr(self.learn.model, 'reset'): self.learn.model.reset()
49 print('LR Finder complete, type {learner_name}.recorder.plot() to see the graph.')
/usr/local/lib/python3.6/dist-packages/fastai/basic_train.py in load(self, name, device)
202 "Load model `name` from `self.model_dir` using `device`, defaulting to `self.data.device`."
203 if device is None: device = self.data.device
--> 204 self.model.load_state_dict(torch.load(self.path/self.model_dir/f'{name}.pth', map_location=device))
205 return self
206
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in load(f, map_location, pickle_module)
356 f = open(f, 'rb')
357 try:
--> 358 return _load(f, map_location, pickle_module)
359 finally:
360 if new_fd:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _load(f, map_location, pickle_module)
527 unpickler = pickle_module.Unpickler(f)
528 unpickler.persistent_load = persistent_load
--> 529 result = unpickler.load()
530
531 deserialized_storage_keys = pickle_module.load(f)
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in persistent_load(saved_id)
493 if root_key not in deserialized_objects:
494 deserialized_objects[root_key] = restore_location(
--> 495 data_type(size), location)
496 storage = deserialized_objects[root_key]
497 if view_metadata is not None:
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in restore_location(storage, location)
376 elif isinstance(map_location, torch.device):
377 def restore_location(storage, location):
--> 378 return default_restore_location(storage, str(map_location))
379 else:
380 def restore_location(storage, location):
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in default_restore_location(storage, location)
102 def default_restore_location(storage, location):
103 for _, _, fn in _package_registry:
--> 104 result = fn(storage, location)
105 if result is not None:
106 return result
/usr/local/lib/python3.6/dist-packages/torch/serialization.py in _cuda_deserialize(obj, location)
84 'to an existing device.'.format(
85 device, torch.cuda.device_count()))
---> 86 return obj.cuda(device)
87
88
/usr/local/lib/python3.6/dist-packages/torch/_utils.py in _cuda(self, device, non_blocking, **kwargs)
74 else:
75 new_type = getattr(torch.cuda, self.__class__.__name__)
---> 76 return new_type(self.size()).copy_(self, non_blocking)
77
78
RuntimeError: cuda runtime error (59) : device-side assert triggered at /pytorch/aten/src/THC/generic/THCTensorCopy.cpp:20
There is no information about the real cause, I tried to get the stack trace by forcing cuda to run on one gpu (as suggested here) using a cell like this
!export CUDA_LAUNCH_BLOCKING=1
But this does not seem to work, still having the same error with.
Is there another way that works with Google Colab?
Be sure that your targets values starts from zero to number of classes - 1. Ex: you have 100 classification class so your target should be from 0 to 99
!export FOO=blah is usually not useful to run in a notebook because ! means run the following command in a sub-shell, so the effect of the statement is gone by the time the ! returns.
You might have more success by storing your python code in a file and then executing that file in a subshell:
In one cell:
%%writefile foo.py
[...your code...]
In the next cell:
!export CUDA_LAUNCH_BLOCKING=1; python3 foo.py
(or s/python3/python2/ if you're writing py2)
Switch Hardware Accelerator Type to "None" under Runtime->Change Runtime Type . This should give you a more meaningful error message.
The proper way to set environmental variables in Google Colab is to use os:
import os
os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
Using the os library will allow you to set whatever environmental variables you need. Setting CUDA_LAUNCH_BLOCKING this way enables proper CUDA tracebacks in Google Colab.
I'm trying to open the Sage's notebook, but it isn't working.
I have no idea where this error came from, because the notebook was working this week. I guess it just popped up out of nowhere.
The message's error is:
sage: notebook()
---------------------------------------------------------------------------
EnvironmentError Traceback (most recent call last)
<ipython-input-4-3728cb3d7c7d> in <module>()
----> 1 notebook()
/home/jerome/opt/SageMath/src/sage/misc/lazy_import.pyx in sage.misc.lazy_import.LazyImport.__call__ (/home/jerome/opt/SageMath/src/build/cythonized/sage/misc/lazy_import.c:3634)()
384 True
385 """
--> 386 return self._get_object()(*args, **kwds)
387
388 def __repr__(self):
/home/jerome/opt/SageMath/src/sage/misc/lazy_import.pyx in sage.misc.lazy_import.LazyImport._get_object (/home/jerome/opt/SageMath/src/build/cythonized/sage/misc/lazy_import.c:2241)()
244 elif self._at_startup and not startup_guard:
245 print('Option ``at_startup=True`` for lazy import {0} not needed anymore'.format(self._name))
--> 246 self._object = getattr(__import__(self._module, {}, {}, [self._name]), self._name)
247 alias = self._as_name or self._name
248 if self._deprecation is not None:
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/notebook/notebook_object.py in <module>()
15 import time, os, shutil, signal, tempfile
16
---> 17 import notebook as _notebook
18
19 import run_notebook
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/notebook/notebook.py in <module>()
33
34 # Sage libraries
---> 35 from sagenb.misc.misc import (pad_zeros, cputime, tmp_dir, load, save,
36 ignore_nonexistent_files, unicode_str)
37
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sagenb/misc/misc.py in <module>()
379
380 try:
--> 381 from sage.misc.cython import cython
382 except ImportError:
383 #stub
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/sage/misc/cython.py in <module>()
28
29 # CBLAS can be one of multiple implementations
---> 30 cblas_pc = pkgconfig.parse('cblas')
31 cblas_libs = list(cblas_pc['libraries'])
32 cblas_library_dirs = list(cblas_pc['library_dirs'])
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in parse(packages)
185
186 for package in packages.split():
--> 187 for k, v in parse_package(package).items():
188 result[k].update(v)
189
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in parse_package(package)
158
159 # Execute the query to pkg-config and clean the result.
--> 160 out = _query(package, '--cflags --libs')
161 out = out.replace('\\"', '')
162
/home/jerome/opt/SageMath/local/lib/python2.7/site-packages/pkgconfig-1.1.0-py2.7.egg/pkgconfig/pkgconfig.py in _wrapper(*args, **kwargs)
56 return func(*args, **kwargs)
57 except OSError:
---> 58 raise EnvironmentError("pkg-config is not installed")
59
60 return _wrapper
EnvironmentError: pkg-config is not installed
If you guys can help me, I'll be very thankful!
Any Help regarding the same would be appreciated!
Windows 10
Jupyter version 5
Anaconda
installed mndrake xgboost
I am not able to import xgboost inside the kernel. It throws up the following error .
WindowsError Traceback (most recent call last)
<ipython-input-2-afdaff4619ce> in <module>()
----> 1 import xgboost
C:\Anaconda2\lib\site-packages\xgboost\__init__.py in <module>()
9 import os
10
---> 11 from .core import DMatrix, Booster
12 from .training import train, cv
13 from . import rabit # noqa
C:\Anaconda2\lib\site-packages\xgboost\core.py in <module>()
110
111 # load the XGBoost library globally
--> 112 _LIB = _load_lib()
113
114
C:\Anaconda2\lib\site-packages\xgboost\core.py in _load_lib()
104 if len(lib_path) == 0:
105 return None
--> 106 lib = ctypes.cdll.LoadLibrary(lib_path[0])
107 lib.XGBGetLastError.restype = ctypes.c_char_p
108 return lib
C:\Anaconda2\lib\ctypes\__init__.pyc in LoadLibrary(self, name)
441
442 def LoadLibrary(self, name):
--> 443 return self._dlltype(name)
444
445 cdll = LibraryLoader(CDLL)
C:\Anaconda2\lib\ctypes\__init__.pyc in __init__(self, name, mode, handle, use_errno, use_last_error)
363
364 if handle is None:
--> 365 self._handle = _dlopen(self._name, mode)
366 else:
367 self._handle = handle
WindowsError: [Error 126] The specified module could not be found
create an environment variable
XGBOOST_BUILD_DOC = 'C:\Users\xxxxxx\Anaconda2\lib\site-packages\xgboost;'
import os
os.environ['XGBOOST_BUILD_DOC'] = 'C:\\Users\\xxxxxx\\Anaconda2\\lib\\site-packages\\xgboost;'