I'm trying to set up a GluonCV in a jupyter notebook in a virtual environment. For some reason whenever I try to import GluonCV I get this error:
ImportError Traceback (most recent call last)
<ipython-input-2-9a2bc396118f> in <module>
----> 1 import gluoncv
~\anaconda3\envs\mxnet\lib\site-packages\gluoncv\__init__.py in <module>
10 _require_mxnet_version('1.4.0', '2.0.0')
11
---> 12 from . import data
13 from . import model_zoo
14 from . import nn
~\anaconda3\envs\mxnet\lib\site-packages\gluoncv\data\__init__.py in <module>
29 from .sampler import SplitSampler, ShuffleSplitSampler
30 from .otb.tracking import OTBTracking
---> 31 from .kitti.kitti_dataset import KITTIRAWDataset, KITTIOdomDataset
32
33 datasets = {
~\anaconda3\envs\mxnet\lib\site-packages\gluoncv\data\kitti\__init__.py in <module>
1 # pylint: disable=missing-module-docstring
----> 2 from .kitti_dataset import *
3 from .kitti_utils import *
~\anaconda3\envs\mxnet\lib\site-packages\gluoncv\data\kitti\kitti_dataset.py in <module>
19
20 from ...utils.filesystem import try_import_skimage
---> 21 from .kitti_utils import generate_depth_map
22 from .mono_dataset import MonoDataset
23
~\anaconda3\envs\mxnet\lib\site-packages\gluoncv\data\kitti\kitti_utils.py in <module>
10
11 import mxnet as mx
---> 12 from mxnet.util import is_np_array
13
14
ImportError: cannot import name 'is_np_array'
I've tried using the same files that work on Google Collaboratory but I still get that error. I've tried reinstalling gluon and all that stuff in all manners. No idea what's going on. For convenience I really need this to work.
I resolved this error by installing the compatible versions of mxnet and gluoncv.In my case I had installed mxnet with with gluoncv native, that resolved the error.
Related
After running runas /netonly /user:domain\username "ipython" on windows cmd, IPython 3.8.8 (installed via Anaconda) successfully launches but as soon as I start importing packages, I get DLL load failed errors. For example if I run import nltk or import matplotlib.pyplot I get the following error messages respectively:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-1-1d2184025e54> in <module>
----> 1 import nltk
~\Anaconda3\lib\site-packages\nltk\__init__.py in <module>
147 from nltk.tag import *
148 from nltk.tokenize import *
--> 149 from nltk.translate import *
150 from nltk.sem import *
151 from nltk.stem import *
~\Anaconda3\lib\site-packages\nltk\translate\__init__.py in <module>
21 from nltk.translate.bleu_score import sentence_bleu as bleu
22 from nltk.translate.ribes_score import sentence_ribes as ribes
---> 23 from nltk.translate.meteor_score import meteor_score as meteor
24 from nltk.translate.metrics import alignment_error_rate
25 from nltk.translate.stack_decoder import StackDecoder
~\Anaconda3\lib\site-packages\nltk\translate\meteor_score.py in <module>
8
9
---> 10 from nltk.stem.porter import PorterStemmer
11 from nltk.corpus import wordnet
12 from itertools import chain, product
~\Anaconda3\lib\site-packages\nltk\stem\__init__.py in <module>
27 from nltk.stem.isri import ISRIStemmer
28 from nltk.stem.porter import PorterStemmer
---> 29 from nltk.stem.snowball import SnowballStemmer
30 from nltk.stem.wordnet import WordNetLemmatizer
31 from nltk.stem.rslp import RSLPStemmer
~\Anaconda3\lib\site-packages\nltk\stem\snowball.py in <module>
27 import re
28
---> 29 from nltk.corpus import stopwords
30 from nltk.stem import porter
31 from nltk.stem.util import suffix_replace, prefix_replace
~\Anaconda3\lib\site-packages\nltk\corpus\__init__.py in <module>
64 from nltk.tokenize import RegexpTokenizer
65 from nltk.corpus.util import LazyCorpusLoader
---> 66 from nltk.corpus.reader import *
67
68 abc = LazyCorpusLoader(
~\Anaconda3\lib\site-packages\nltk\corpus\reader\__init__.py in <module>
103 from nltk.corpus.reader.categorized_sents import *
104 from nltk.corpus.reader.comparative_sents import *
--> 105 from nltk.corpus.reader.panlex_lite import *
106 from nltk.corpus.reader.panlex_swadesh import *
107
~\Anaconda3\lib\site-packages\nltk\corpus\reader\panlex_lite.py in <module>
13
14 import os
---> 15 import sqlite3
16
17 from nltk.corpus.reader.api import CorpusReader
~\Anaconda3\lib\sqlite3\__init__.py in <module>
21 # 3. This notice may not be removed or altered from any source distribution.
22
---> 23 from sqlite3.dbapi2 import *
~\Anaconda3\lib\sqlite3\dbapi2.py in <module>
25 import collections.abc
26
---> 27 from _sqlite3 import *
28
29 paramstyle = "qmark"
ImportError: DLL load failed while importing _sqlite3: The specified module could not be found.
AND
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-2-864e826dab68> in <module>
----> 1 import matplotlib.pyplot
~\Anaconda3\lib\site-packages\matplotlib\__init__.py in <module>
172
173
--> 174 _check_versions()
175
176
~\Anaconda3\lib\site-packages\matplotlib\__init__.py in _check_versions()
157 # Quickfix to ensure Microsoft Visual C++ redistributable
158 # DLLs are loaded before importing kiwisolver
--> 159 from . import ft2font
160
161 for modname, minver in [
ImportError: DLL load failed while importing ft2font: The specified module could not be found.
However, imports such as import numpy or import pandas run fine.
Furthermore, when launching "ipython" without runas all imports run perfectly.
Any ideas on what's going on here?
EDIT:
I can confirm that it is not an administrative thing.
I installed python independently from Anaconda - and now when I run runas /netonly /user:domain\username "C:\somenewpath\python", importing nltk and matplotlib are successful. However, this is not necessarily ideal for two reasons: 1) because I have to manually install a host of packages, and 2) now I have 2 separate python IDLEs sitting in my environment.
Even though I have installed both the libraries several times using different orders in different virtual environments, I'm still facing an issue where I'm not able to import and use certain geospatial libraries like esda and libpysal. The following error shows up:
ImportError Traceback (most recent call last)
C:\Users\SLAADM~1\AppData\Local\Temp/ipykernel_35328/2667884714.py in <module>
3 import numpy as np
4 import matplotlib.pyplot as plt
----> 5 import esda
6 import libpysal as lps
7 import pysal
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\esda\__init__.py in <module>
5
6 """
----> 7 from . import adbscan
8 from .gamma import Gamma
9 from .geary import Geary
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\esda\adbscan.py in <module>
8 import pandas
9 import numpy as np
---> 10 from libpysal.cg.alpha_shapes import alpha_shape_auto
11 from scipy.spatial import cKDTree
12 from collections import Counter
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\libpysal\__init__.py in <module>
25 Tools for creating and manipulating weights
26 """
---> 27 from . import cg
28 from . import io
29 from . import weights
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\libpysal\cg\__init__.py in <module>
9 from .sphere import *
10 from .voronoi import *
---> 11 from .alpha_shapes import *
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\libpysal\cg\alpha_shapes.py in <module>
22
23 try:
---> 24 import pygeos
25
26 HAS_PYGEOS = True
c:\users\sla admin\appdata\local\programs\python\python39\lib\site-packages\pygeos\__init__.py in <module>
----> 1 from .lib import GEOSException # NOQA
2 from .lib import Geometry # NOQA
3 from .lib import geos_version, geos_version_string # NOQA
4 from .lib import geos_capi_version, geos_capi_version_string # NOQA
5 from .decorators import UnsupportedGEOSOperation # NOQA
ImportError: DLL load failed while importing lib: The specified procedure could not be found.
Would really appreciate any help in making this work. Please throw any suggestions you might have at me.
install pygeos i.e conda install pygeos
it worked for me
I found same issue when running example code from a couple of years ago. The pysal API has changed.
Import libpysal first then import the esda libraries eg
import libpysal
from esda.moran import Moran
from esda.smaup import Smaup
see
https://pysal.org/esda/generated/esda.Moran.html
I run this simple code on google colab.
###cell 1 : `!pip install syft`
###cell 2 : `import syft as sy`
and i got this error :
ModuleNotFoundError: No module named 'syft_proto.messaging.v1.protocol_pb2'
There is the full error message
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-3-9aeadc8ee442> in <module>()
----> 1 import syft as sy
7 frames
/usr/local/lib/python3.6/dist-packages/syft/__init__.py in <module>()
41
42 # Import grids
---> 43 from syft.grid.private_grid import PrivateGridNetwork
44 from syft.grid.public_grid import PublicGridNetwork
45
/usr/local/lib/python3.6/dist-packages/syft/grid/private_grid.py in <module>()
9 # Syft imports
10 from syft.grid.abstract_grid import AbstractGrid
---> 11 from syft.workers.node_client import NodeClient
12 from syft.messaging.plan.plan import Plan
13 from syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor
/usr/local/lib/python3.6/dist-packages/syft/workers/node_client.py in <module>()
5
6 # Syft imports
----> 7 from syft.serde import serialize
8 from syft.messaging.plan import Plan
9 from syft.codes import REQUEST_MSG, RESPONSE_MSG
/usr/local/lib/python3.6/dist-packages/syft/serde/__init__.py in <module>()
----> 1 from syft.serde.serde import *
/usr/local/lib/python3.6/dist-packages/syft/serde/serde.py in <module>()
10 from syft.workers.abstract import AbstractWorker
11
---> 12 from syft.serde import msgpack
13
14 ## SECTION: High Level Public Functions (these are the ones you use)
/usr/local/lib/python3.6/dist-packages/syft/serde/msgpack/__init__.py in <module>()
----> 1 from syft.serde.msgpack import serde
2 from syft.serde.msgpack import native_serde
3 from syft.serde.msgpack import torch_serde
4 from syft.serde.msgpack import proto
5
/usr/local/lib/python3.6/dist-packages/syft/serde/msgpack/serde.py in <module>()
57 from syft.messaging.plan import Plan
58 from syft.messaging.plan.state import State
---> 59 from syft.messaging.protocol import Protocol
60 from syft.messaging.message import Message
61 from syft.messaging.message import Operation
/usr/local/lib/python3.6/dist-packages/syft/messaging/protocol.py in <module>()
11 from syft.workers.abstract import AbstractWorker
12 from syft.workers.base import BaseWorker
---> 13 from syft_proto.messaging.v1.protocol_pb2 import Protocol as ProtocolPB
14
15
ModuleNotFoundError: No module named 'syft_proto.messaging.v1.protocol_pb2'
Hope that you can help me. Thank you
Thank you for answers. It works when i downgrade these two packages, it is a temporary problem due to pysyft developers.
!pip install syft=="0.2.2a1"
!pip install syft_proto=="0.1.1a1.post17"
It looks the module you are trying to use is either deprecated or has a new version. See here the file is not active.
Try searching if a newer version of this protocol exists or any similar protocol that can cater to your needs.
Edit:
It may also be that you are using an older version Syft and so I recommend you to upgrade your pip and Syft. Follow the instructions here
you can upgrade both to their latest version which are kept compatible!
!pip install --upgrade syft
!pip install --upgrade syft_proto
ImportError Traceback (most recent call last)
<ipython-input-1-76a01d9c502b> in <module>
----> 1 import spacy
~\Anaconda3\envs\nlp_course\lib\site-packages\spacy\__init__.py in <module>
8 from thinc.neural.util import prefer_gpu, require_gpu
9
---> 10 from .cli.info import info as cli_info
11 from .glossary import explain
12 from .about import __version__
~\Anaconda3\envs\nlp_course\lib\site-packages\spacy\cli\__init__.py in <module>
----> 1 from .download import download
2 from .info import info
3 from .link import link
4 from .package import package
5 from .profile import profile
~\Anaconda3\envs\nlp_course\lib\site-packages\spacy\cli\download.py in <module>
9
10 from ._messages import Messages
---> 11 from .link import link
12 from ..util import prints, get_package_path
13 from .. import about
~\Anaconda3\envs\nlp_course\lib\site-packages\spacy\cli\link.py in <module>
7 from ._messages import Messages
8 from ..compat import symlink_to, path2str
----> 9 from ..util import prints
10 from .. import util
11
~\Anaconda3\envs\nlp_course\lib\site-packages\spacy\util.py in <module>
25 # Import these directly from Thinc, so that we're sure we always have the
26 # same version.
---> 27 from thinc.neural._classes.model import msgpack
28 from thinc.neural._classes.model import msgpack_numpy
29
ImportError: cannot import name 'msgpack' from 'thinc.neural._classes.model' (C:\Users\salwa\Anaconda3\envs\nlp_course\lib\site-packages\thinc\neural\_classes\model.py)
The problem is with thinc, a dependency of spaCy, as you can see here: ImportError: cannot import name 'msgpack' from 'thinc.neural._classes.model'
Follow Ines's (a core developer of spaCy) suggestion that you can find here,
It looks like you might have ended up with conflicting installationds
and dependencies – for example, the latest version of spaCy, but an
older version of its dependency, Thinc. In cases like this, it often
helps to just start out with a clean environment and reinstall from
scratch.
I have tensorflow version "1.14.0" and pip3 is also updated. But when I do:-
import tensorflow as tf
I get this error:
AttributeError: module 'tensorflow.python.ops.resource_variable_ops' has no attribute 'UninitializedVariable'
Tensorflow hub has version 0.5.0. Some answers on stack overflow have suggested that tensorflow version should be greater than 1.7.0 and mine is greater and still I don't know why is hub not being imported. The complete traceback of error is:-
AttributeError Traceback (most recent call last)
<ipython-input-24-d70b4e927300> in <module>
1 from sklearn.model_selection import train_test_split
2 import tensorflow as tf
----> 3 import tensorflow_hub as hub
~/.local/lib/python3.6/site-packages/tensorflow_hub/__init__.py in <module>
27 # error message is thrown instead of an obscure error of missing
28 # symbols at executing the imports.
---> 29 from tensorflow_hub.estimator import LatestModuleExporter
30 from tensorflow_hub.estimator import register_module_for_export
31 from tensorflow_hub.feature_column import image_embedding_column
~/.local/lib/python3.6/site-packages/tensorflow_hub/estimator.py in <module>
23 from absl import logging
24 import tensorflow as tf
---> 25 from tensorflow_hub import tf_utils
26 from tensorflow_hub import tf_v1
27
~/.local/lib/python3.6/site-packages/tensorflow_hub/tf_utils.py in <module>
31 # depending on TensorFlow internal implementation details.
32 # pylint: disable=g-direct-tensorflow-import
---> 33 from tensorflow.python.feature_column import feature_column_v2
34 # pylint: enable=g-direct-tensorflow-import
35
~/.local/lib/python3.6/site-packages/tensorflow/python/feature_column/feature_column_v2.py in <module>
164 from tensorflow.python.platform import tf_logging as logging
165 from tensorflow.python.training import checkpoint_utils
--> 166 from tensorflow.python.training.tracking import tracking
167 from tensorflow.python.util import deprecation
168 from tensorflow.python.util import nest
~/.local/lib/python3.6/site-packages/tensorflow/python/training/tracking/tracking.py in <module>
21 import weakref
22
---> 23 from tensorflow.python.eager import def_function
24 from tensorflow.python.eager import function as defun
25 from tensorflow.python.framework import dtypes
~/.local/lib/python3.6/site-packages/tensorflow/python/eager/def_function.py in <module>
38
39
---> 40 class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable):
41 """Variable which does not lift its initializer out of function context.
42
AttributeError: module 'tensorflow.python.ops.resource_variable_ops' has no attribute 'UninitializedVariable'
I have tried uninstalling tf completely and then installing again, reinstalling tf hub and updating pip so far. Any help will be very much appreciated!