I want to load FaceNet in Keras but I am getting errors.
the modal facenet_keras.h5 is ready but I can't load it.
you can get facenet_keras.h5 from this link:
https://drive.google.com/drive/folders/1pwQ3H4aJ8a6yyJHZkTwtjcL4wYWQb7bn
My tensorflow version is:
tensorflow.__version__
'2.2.0'
and when i want to load data:
from tensorflow.keras.models import load_model
load_model('facenet_keras.h5')
get this error:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-2a20f38e8217> in <module>
----> 1 load_model('facenet_keras.h5')
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/save.py in load_model(filepath, custom_objects, compile)
182 if (h5py is not None and (
183 isinstance(filepath, h5py.File) or h5py.is_hdf5(filepath))):
--> 184 return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
185
186 if sys.version_info >= (3, 4) and isinstance(filepath, pathlib.Path):
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/hdf5_format.py in load_model_from_hdf5(filepath, custom_objects, compile)
175 raise ValueError('No model found in config file.')
176 model_config = json.loads(model_config.decode('utf-8'))
--> 177 model = model_config_lib.model_from_config(model_config,
178 custom_objects=custom_objects)
179
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/model_config.py in model_from_config(config, custom_objects)
53 '`Sequential.from_config(config)`?')
54 from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
---> 55 return deserialize(config, custom_objects=custom_objects)
56
57
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/serialization.py in deserialize(config, custom_objects)
103 config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name]
104
--> 105 return deserialize_keras_object(
106 config,
107 module_objects=globs,
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
367
368 if 'custom_objects' in arg_spec.args:
--> 369 return cls.from_config(
370 cls_config,
371 custom_objects=dict(
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py in from_config(cls, config, custom_objects)
984 ValueError: In case of improperly formatted config dict.
985 """
--> 986 input_tensors, output_tensors, created_layers = reconstruct_from_config(
987 config, custom_objects)
988 model = cls(inputs=input_tensors, outputs=output_tensors,
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py in reconstruct_from_config(config, custom_objects, created_layers)
2017 # First, we create all layers and enqueue nodes to be processed
2018 for layer_data in config['layers']:
-> 2019 process_layer(layer_data)
2020 # Then we process nodes in order of layer depth.
2021 # Nodes that cannot yet be processed (if the inbound node
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/network.py in process_layer(layer_data)
1999 from tensorflow.python.keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top
2000
-> 2001 layer = deserialize_layer(layer_data, custom_objects=custom_objects)
2002 created_layers[layer_name] = layer
2003
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/serialization.py in deserialize(config, custom_objects)
103 config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name]
104
--> 105 return deserialize_keras_object(
106 config,
107 module_objects=globs,
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/utils/generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
367
368 if 'custom_objects' in arg_spec.args:
--> 369 return cls.from_config(
370 cls_config,
371 custom_objects=dict(
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/core.py in from_config(cls, config, custom_objects)
988 def from_config(cls, config, custom_objects=None):
989 config = config.copy()
--> 990 function = cls._parse_function_from_config(
991 config, custom_objects, 'function', 'module', 'function_type')
992
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/core.py in _parse_function_from_config(cls, config, custom_objects, func_attr_name, module_attr_name, func_type_attr_name)
1040 elif function_type == 'lambda':
1041 # Unsafe deserialization from bytecode
-> 1042 function = generic_utils.func_load(
1043 config[func_attr_name], globs=globs)
1044 elif function_type == 'raw':
~/.local/lib/python3.8/site-packages/tensorflow/python/keras/utils/generic_utils.py in func_load(code, defaults, closure, globs)
469 except (UnicodeEncodeError, binascii.Error):
470 raw_code = code.encode('raw_unicode_escape')
--> 471 code = marshal.loads(raw_code)
472 if globs is None:
473 globs = globals()
ValueError: bad marshal data (unknown type code)
thank you.
The possible solutions to this error are shown below:
The Model might have been built and saved in Python 2.x and you might be using Python 3.x. Solution is to use the same Python Version using which the Model has been Built and Saved.
Use the same version of Keras (and, may be, tensorflow), on which your Model was Built and Saved.
The Saved Model might contain Custom Objects. If so, you need to load the Model using the code,
new_model = tf.keras.models.load_model('model.h5', custom_objects={'CustomLayer': CustomLayer})
If you can recreate the architecture (i.e. you have the original code used to generate it), you can instantiate the model from that code and then use model.load_weights('your_model_file.hdf5') to load in the weights. This isn't an option if you don't have the code used to create the original architecture.
For more details, please refer this Github Issue. For more details regarding Saving and Loading the Model with Custom Objects, please refer this Tensorflow Documentation and this Stack Overflow Answer.
I change python version(3.10 to 3.7) and its solved for me.
Related
I am trying to create a feature extractor using from torchvision.models.feature_extraction import create_feature_extractor.
The model I am trying to use is from the vit_pytorch (link: https://github.com/lucidrains/vit-pytorch). The problem I face is that when I create a model from this lib:
from vit_pytorch import ViT
from torchvision.models.feature_extraction import create_feature_extractor
model = ViT(image_size=28,
patch_size=7,
num_classes=10,
dim=16,
depth=6,
heads=16,
mlp_dim=256,
dropout=0.1,
emb_dropout=0.1,
channels=1)
random_layer_name = 'transformer.layers.1.1.fn.net.4'
feature_extractor = create_feature_extractor(model,
return_nodes=random_layer_name)
and when trying to use the create_feature_extractor() on this model I always get this error:
RuntimeError Traceback (most recent call last)
Cell In[17], line 2
1 # torch.fx.wrap('len')
----> 2 feature_extractor = create_feature_extractor(model,
3 return_nodes=['transformer.layers.1.1.fn.net.4'])
File ~/Mokslas/AI/venv/lib/python3.10/site-packages/torchvision/models/feature_extraction.py:485, in create_feature_extractor(model, return_nodes, train_return_nodes, eval_return_nodes, tracer_kwargs, suppress_diff_warning)
483 # Instantiate our NodePathTracer and use that to trace the model
484 tracer = NodePathTracer(**tracer_kwargs)
--> 485 graph = tracer.trace(model)
487 name = model.__class__.__name__ if isinstance(model, nn.Module) else model.__name__
488 graph_module = fx.GraphModule(tracer.root, graph, name)
File ~/Mokslas/AI/venv/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py:756, in Tracer.trace(self, root, concrete_args)
749 for module in self._autowrap_search:
750 _autowrap_check(
751 patcher, module.__dict__, self._autowrap_function_ids
752 )
753 self.create_node(
754 "output",
755 "output",
--> 756 (self.create_arg(fn(*args)),),
757 {},
758 type_expr=fn.__annotations__.get("return", None),
759 )
761 self.submodule_paths = None
762 finally:
File ~/Mokslas/AI/venv/lib/python3.10/site-packages/vit_pytorch/vit.py:115, in ViT.forward(self, img)
114 def forward(self, img):
--> 115 x = self.to_patch_embedding(img)
116 b, n, _ = x.shape
118 cls_tokens = repeat(self.cls_token, '1 1 d -> b 1 d', b = b)
File ~/Mokslas/AI/venv/lib/python3.10/site-packages/torch/fx/_symbolic_trace.py:734, in Tracer.trace.<locals>.module_call_wrapper(mod, *args, **kwargs)
727 return _orig_module_call(mod, *args, **kwargs)
729 _autowrap_check(
730 patcher,
731 getattr(getattr(mod, "forward", mod), "__globals__", {}),
732 self._autowrap_function_ids,
733 )
--> 734 return self.call_module(mod, forward, args, kwargs)
File ~/Mokslas/AI/venv/lib/python3.10/site-packages/torchvision/models/feature_extraction.py:83, in NodePathTracer.call_module(self, m, forward, args, kwargs)
...
--> 396 raise RuntimeError("'len' is not supported in symbolic tracing by default. If you want "
397 "this call to be recorded, please call torch.fx.wrap('len') at "
398 "module scope")
RuntimeError: 'len' is not supported in symbolic tracing by default. If you want this call to be recorded, please call torch.fx.wrap('len') at module scope
It doesn't matter which model I choose from that library or which layer or layers I choose to be outputed I always get the same error.
I have tried to add torch.fx.wrap('len') but the same problem persisted. I know I could try to solve it by using the hook methods, but is there a way to solve this problem so that I could still use the create_feature_extractor() functionality?
I am trying to train a pytorch model using Sagemaker on local mode, but whenever I call estimator.fit the code hangs indefinitely and I have to interrupt the notebook kernel. This happens both in my local machine and in Sagemaker Studio. But when I use EC2, the training runs normally.
Here the call to the estimator, and the stack trace once I interrupt the kernel:
import sagemaker
from sagemaker.pytorch import PyTorch
bucket = "bucket-name"
role = sagemaker.get_execution_role()
training_input_path = f"s3://{bucket}/dataset/path"
sagemaker_session = sagemaker.LocalSession()
sagemaker_session.config = {"local": {"local_code": True}}
output_path = "file://."
estimator = PyTorch(
entry_point="train.py",
source_dir="src",
hyperparameters={"max-epochs": 1},
framework_version="1.8",
py_version="py3",
instance_count=1,
instance_type="local",
role=role,
output_path=output_path,
sagemaker_session=sagemaker_session,
)
estimator.fit({"training": training_input_path})
Stack trace:
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-9-35cdd6021288> in <module>
----> 1 estimator.fit({"training": training_input_path})
/opt/conda/lib/python3.7/site-packages/sagemaker/estimator.py in fit(self, inputs, wait, logs, job_name, experiment_config)
678 self._prepare_for_training(job_name=job_name)
679
--> 680 self.latest_training_job = _TrainingJob.start_new(self, inputs, experiment_config)
681 self.jobs.append(self.latest_training_job)
682 if wait:
/opt/conda/lib/python3.7/site-packages/sagemaker/estimator.py in start_new(cls, estimator, inputs, experiment_config)
1450 """
1451 train_args = cls._get_train_args(estimator, inputs, experiment_config)
-> 1452 estimator.sagemaker_session.train(**train_args)
1453
1454 return cls(estimator.sagemaker_session, estimator._current_job_name)
/opt/conda/lib/python3.7/site-packages/sagemaker/session.py in train(self, input_mode, input_config, role, job_name, output_config, resource_config, vpc_config, hyperparameters, stop_condition, tags, metric_definitions, enable_network_isolation, image_uri, algorithm_arn, encrypt_inter_container_traffic, use_spot_instances, checkpoint_s3_uri, checkpoint_local_path, experiment_config, debugger_rule_configs, debugger_hook_config, tensorboard_output_config, enable_sagemaker_metrics, profiler_rule_configs, profiler_config, environment, retry_strategy)
572 LOGGER.info("Creating training-job with name: %s", job_name)
573 LOGGER.debug("train request: %s", json.dumps(train_request, indent=4))
--> 574 self.sagemaker_client.create_training_job(**train_request)
575
576 def _get_train_request( # noqa: C901
/opt/conda/lib/python3.7/site-packages/sagemaker/local/local_session.py in create_training_job(self, TrainingJobName, AlgorithmSpecification, OutputDataConfig, ResourceConfig, InputDataConfig, **kwargs)
184 hyperparameters = kwargs["HyperParameters"] if "HyperParameters" in kwargs else {}
185 logger.info("Starting training job")
--> 186 training_job.start(InputDataConfig, OutputDataConfig, hyperparameters, TrainingJobName)
187
188 LocalSagemakerClient._training_jobs[TrainingJobName] = training_job
/opt/conda/lib/python3.7/site-packages/sagemaker/local/entities.py in start(self, input_data_config, output_data_config, hyperparameters, job_name)
219
220 self.model_artifacts = self.container.train(
--> 221 input_data_config, output_data_config, hyperparameters, job_name
222 )
223 self.end_time = datetime.datetime.now()
/opt/conda/lib/python3.7/site-packages/sagemaker/local/image.py in train(self, input_data_config, output_data_config, hyperparameters, job_name)
200 data_dir = self._create_tmp_folder()
201 volumes = self._prepare_training_volumes(
--> 202 data_dir, input_data_config, output_data_config, hyperparameters
203 )
204 # If local, source directory needs to be updated to mounted /opt/ml/code path
/opt/conda/lib/python3.7/site-packages/sagemaker/local/image.py in _prepare_training_volumes(self, data_dir, input_data_config, output_data_config, hyperparameters)
487 os.mkdir(channel_dir)
488
--> 489 data_source = sagemaker.local.data.get_data_source_instance(uri, self.sagemaker_session)
490 volumes.append(_Volume(data_source.get_root_dir(), channel=channel_name))
491
/opt/conda/lib/python3.7/site-packages/sagemaker/local/data.py in get_data_source_instance(data_source, sagemaker_session)
52 return LocalFileDataSource(parsed_uri.netloc + parsed_uri.path)
53 if parsed_uri.scheme == "s3":
---> 54 return S3DataSource(parsed_uri.netloc, parsed_uri.path, sagemaker_session)
55 raise ValueError(
56 "data_source must be either file or s3. parsed_uri.scheme: {}".format(parsed_uri.scheme)
/opt/conda/lib/python3.7/site-packages/sagemaker/local/data.py in __init__(self, bucket, prefix, sagemaker_session)
183 working_dir = "/private{}".format(working_dir)
184
--> 185 sagemaker.utils.download_folder(bucket, prefix, working_dir, sagemaker_session)
186 self.files = LocalFileDataSource(working_dir)
187
/opt/conda/lib/python3.7/site-packages/sagemaker/utils.py in download_folder(bucket_name, prefix, target, sagemaker_session)
286 raise
287
--> 288 _download_files_under_prefix(bucket_name, prefix, target, s3)
289
290
/opt/conda/lib/python3.7/site-packages/sagemaker/utils.py in _download_files_under_prefix(bucket_name, prefix, target, s3)
314 if exc.errno != errno.EEXIST:
315 raise
--> 316 obj.download_file(file_path)
317
318
/opt/conda/lib/python3.7/site-packages/boto3/s3/inject.py in object_download_file(self, Filename, ExtraArgs, Callback, Config)
313 return self.meta.client.download_file(
314 Bucket=self.bucket_name, Key=self.key, Filename=Filename,
--> 315 ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
316
317
/opt/conda/lib/python3.7/site-packages/boto3/s3/inject.py in download_file(self, Bucket, Key, Filename, ExtraArgs, Callback, Config)
171 return transfer.download_file(
172 bucket=Bucket, key=Key, filename=Filename,
--> 173 extra_args=ExtraArgs, callback=Callback)
174
175
/opt/conda/lib/python3.7/site-packages/boto3/s3/transfer.py in download_file(self, bucket, key, filename, extra_args, callback)
305 bucket, key, filename, extra_args, subscribers)
306 try:
--> 307 future.result()
308 # This is for backwards compatibility where when retries are
309 # exceeded we need to throw the same error from boto3 instead of
/opt/conda/lib/python3.7/site-packages/s3transfer/futures.py in result(self)
107 except KeyboardInterrupt as e:
108 self.cancel()
--> 109 raise e
110
111 def cancel(self):
/opt/conda/lib/python3.7/site-packages/s3transfer/futures.py in result(self)
104 # however if a KeyboardInterrupt is raised we want want to exit
105 # out of this and propogate the exception.
--> 106 return self._coordinator.result()
107 except KeyboardInterrupt as e:
108 self.cancel()
/opt/conda/lib/python3.7/site-packages/s3transfer/futures.py in result(self)
258 # possible value integer value, which is on the scale of billions of
259 # years...
--> 260 self._done_event.wait(MAXINT)
261
262 # Once done waiting, raise an exception if present or return the
/opt/conda/lib/python3.7/threading.py in wait(self, timeout)
550 signaled = self._flag
551 if not signaled:
--> 552 signaled = self._cond.wait(timeout)
553 return signaled
554
/opt/conda/lib/python3.7/threading.py in wait(self, timeout)
294 try: # restore state no matter what (e.g., KeyboardInterrupt)
295 if timeout is None:
--> 296 waiter.acquire()
297 gotit = True
298 else:
KeyboardInterrupt:
SageMaker Studio does not natively support local mode. Studio Apps are themselves docker containers and therefore they require privileged access if they were to be able to build and run docker containers.
As an alternative solution, you can create a remote docker host on an EC2 instance and setup docker on your Studio App. There is quite a bit of networking and package installation involved, but the solution will enable you to use full docker functionality. Additionally, as of version 2.80.0 of SageMaker Python SDK, it now supports local mode when you are using remote docker host.
sdockerSageMaker Studio Docker CLI extension (see this repo) can simplify deploying the above solution in simple two steps (only works for Studio Domain in VPCOnly mode) and it has an easy to follow example here.
UPDATE:
There is now a UI extension (see repo) which can make the experience much smoother and easier to manage.
I have trained a model on Kaggle on this link :
https://www.kaggle.com/dcosmin/shufflenet-with-keras
using the source code from this link : https://github.com/opconty/keras-shufflenetV2/blob/master/shufflenetv2.py. After the training has finished, I saved a model called ShuffleNetV2.h5 and the weights.hdf5. When i try to run the code on my computer:
# model = tf.keras.models.load_model('L-CNN v4.0.h5')
# model = tf.keras.models.load_model('MobileNetV2 - 131 - 2.0.h5')
model = tf.keras.models.load_model('ShuffleNetV2 - 131.h5')
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
I need to run this code on the latest version of Keras and Tensorflow with Python 3.7 - I need to run it on Raspberry Pi. The error:
SystemError Traceback (most recent call last)
<ipython-input-15-914c5b8863a1> in <module>
1 #model = tf.keras.models.load_model('L-CNN v4.0.h5')
2 #model = tf.keras.models.load_model('MobileNetV2 - 131 - 2.0.h5')
----> 3 model = tf.keras.models.load_model('ShuffleNetV2 - 131.h5')
4 #model=load_model('ShuffleNetV2 - 131.h5')
5 model.compile(loss='categorical_crossentropy',
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\saving\save.py in load_model(filepath, custom_objects, compile)
144 if (h5py is not None and (
145 isinstance(filepath, h5py.File) or h5py.is_hdf5(filepath))):
--> 146 return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
147
148 if isinstance(filepath, six.string_types):
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\saving\hdf5_format.py in load_model_from_hdf5(filepath, custom_objects, compile)
166 model_config = json.loads(model_config.decode('utf-8'))
167 model = model_config_lib.model_from_config(model_config,
--> 168 custom_objects=custom_objects)
169
170 # set weights
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\saving\model_config.py in model_from_config(config, custom_objects)
53 '`Sequential.from_config(config)`?')
54 from tensorflow.python.keras.layers import deserialize # pylint: disable=g-import-not-at-top
---> 55 return deserialize(config, custom_objects=custom_objects)
56
57
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\layers\serialization.py in deserialize(config, custom_objects)
104 module_objects=globs,
105 custom_objects=custom_objects,
--> 106 printable_module_name='layer')
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\utils\generic_utils.py in deserialize_keras_object(identifier, module_objects, custom_objects, printable_module_name)
301 custom_objects=dict(
302 list(_GLOBAL_CUSTOM_OBJECTS.items()) +
--> 303 list(custom_objects.items())))
304 with CustomObjectScope(custom_objects):
305 return cls.from_config(cls_config)
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\engine\network.py in from_config(cls, config, custom_objects)
935 """
936 input_tensors, output_tensors, created_layers = reconstruct_from_config(
--> 937 config, custom_objects)
938 model = cls(inputs=input_tensors, outputs=output_tensors,
939 name=config.get('name'))
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\engine\network.py in reconstruct_from_config(config, custom_objects, created_layers)
1901 if layer in unprocessed_nodes:
1902 for node_data in unprocessed_nodes.pop(layer):
-> 1903 process_node(layer, node_data)
1904
1905 input_tensors = []
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\engine\network.py in process_node(layer, node_data)
1849 if not isinstance(input_tensors, dict) and len(flat_input_tensors) == 1:
1850 input_tensors = flat_input_tensors[0]
-> 1851 output_tensors = layer(input_tensors, **kwargs)
1852
1853 # Update node index map.
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
771 not base_layer_utils.is_in_eager_or_tf_function()):
772 with auto_control_deps.AutomaticControlDependencies() as acd:
--> 773 outputs = call_fn(cast_inputs, *args, **kwargs)
774 # Wrap Tensors in `outputs` in `tf.identity` to avoid
775 # circular dependencies.
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\layers\core.py in call(self, inputs, mask, training)
844 with backprop.GradientTape(watch_accessed_variables=True) as tape,\
845 variable_scope.variable_creator_scope(_variable_creator):
--> 846 result = self.function(inputs, **kwargs)
847 self._check_variables(created_variables, tape.watched_variables())
848 return result
C:\ProgramData\Anaconda3\envs\Computer Vision\lib\site-packages\tensorflow_core\python\keras\layers\core.py in channel_shuffle(x)
SystemError: unknown opcode
I really need to fix this incompatibilities. Can you tell me if I have to change something on Kaggle to train it again or i don;t know..
This error is due to a version difference. You can solve this by saving the architecture as code, and the weights in an h5. This will be compatible across versions.
This was raised in Keras repository.
https://github.com/keras-team/keras/issues/9595
The opcodes in the model are not recognised by your Python interpreter. When loading the model, ensure you are running the same version of Python that was used to create the model.
Goal: TFX -> TF Lite Converter -> Deploy models on mobile/IoT devices
I am currently learning the Tensorflow Extended with its Chicago Taxi Pipeline Example.
The pipeline is done running (although through a lot of hardships) and the Pusher Component has emitted a Tensorflow SavedModel file (.pb).
However, a new problem is encountered here:
By Tensorflow nightly/1.13.1 (tried both) and Python 2.7.6, I can generate, save and load a SavedModel (a model for mnist digit data for testing the utility) with some simple python code, such as saved_model.simple_save and saved_model.loader.load, but I keep running into errors when applying on the models the TFX Pusher emits, as follows.
(Maybe I did something wrong with the TFX Pipeline?)
The code I used:
import tensorflow as tf
with tf.Session(graph=tf.Graph()) as sess:
tf.compat.v1.saved_model.loader.load(sess, ["serve"], "/home/tigerpaws/taxi/serving_model/taxi_simple/1553187887")#"/home/tigerpaws/saved_model_example/model")
graph=tf.get_default_graph()
Error:
KeyError Traceback (most recent call last)
<ipython-input-11-a6978b82c3d2> in <module>()
1 with tf.Session(graph=tf.Graph()) as sess:
----> 2 tf.compat.v1.saved_model.loader.load(sess, ["serve"], "/home/tigerpaws/taxi/serving_model/taxi_simple/1553187887")#"/home/tigerpaws/saved_model_example/model")
3 graph=tf.get_default_graph()
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
322 'in a future version' if date is None else ('after %s' % date),
323 instructions)
--> 324 return func(*args, **kwargs)
325 return tf_decorator.make_decorator(
326 func, new_func, 'deprecated',
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/saved_model/loader_impl.pyc in load(sess, tags, export_dir, import_scope, **saver_kwargs)
267 """
268 loader = SavedModelLoader(export_dir)
--> 269 return loader.load(sess, tags, import_scope, **saver_kwargs)
270
271
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/saved_model/loader_impl.pyc in load(self, sess, tags, import_scope, **saver_kwargs)
418 with sess.graph.as_default():
419 saver, _ = self.load_graph(sess.graph, tags, import_scope,
--> 420 **saver_kwargs)
421 self.restore_variables(sess, saver, import_scope)
422 self.run_init_ops(sess, tags, import_scope)
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/saved_model/loader_impl.pyc in load_graph(self, graph, tags, import_scope, **saver_kwargs)
348 with graph.as_default():
349 return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
--> 350 meta_graph_def, import_scope=import_scope, **saver_kwargs)
351
352 def restore_variables(self, sess, saver, import_scope=None):
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/training/saver.pyc in _import_meta_graph_with_return_elements(meta_graph_or_file, clear_devices, import_scope, return_elements, **kwargs)
1455 import_scope=import_scope,
1456 return_elements=return_elements,
-> 1457 **kwargs))
1458
1459 saver = _create_saver_from_imported_meta_graph(
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/meta_graph.pyc in import_scoped_meta_graph_with_return_elements(meta_graph_or_file, clear_devices, graph, import_scope, input_map, unbound_inputs_col_name, restore_collections_predicate, return_elements)
804 input_map=input_map,
805 producer_op_list=producer_op_list,
--> 806 return_elements=return_elements)
807
808 # Restores all the other collections.
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/util/deprecation.pyc in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/importer.pyc in import_graph_def(graph_def, input_map, return_elements, name, op_dict, producer_op_list)
397 if producer_op_list is not None:
398 # TODO(skyewm): make a copy of graph_def so we're not mutating the argument?
--> 399 _RemoveDefaultAttrs(op_dict, producer_op_list, graph_def)
400
401 graph = ops.get_default_graph()
/home/tigerpaws/anaconda/lib/python2.7/site-packages/tensorflow/python/framework/importer.pyc in _RemoveDefaultAttrs(op_dict, producer_op_list, graph_def)
157 # Remove any default attr values that aren't in op_def.
158 if node.op in producer_op_dict:
--> 159 op_def = op_dict[node.op]
160 producer_op_def = producer_op_dict[node.op]
161 # We make a copy of node.attr to iterate through since we may modify
KeyError: u'BucketizeWithInputBoundaries'
There was also another attempt, where I tried to convert the SavedModel into a GraphDef (Frozen Graph) so I could give the converter another try.
The conversion would need a output_node_names, which I don't know;
Neither could I find where the model is saved in the code (so maybe I can spot the output node names somewhere).
Any ideas on the problem or alternative ways? Thanks in advance.
Edit: can somebody help create tags? I have not reached 1500 reputation, but this question is really about tfx / tensorflow-extended
Sorry for the confusion caused; the problem actually is caused by the reading of the SavedModel file.
In the SavedModel, there is an operation BucketizeWithInputBoundaries, that is not defined in op_dict.
This is still in Google's TODO list, commented in two of their scripts.
Here and Here. (Github links):
# TODO(jyzhao): BucketizeWithInputBoundaries error without this.
After importing the specified script this problem is solved.
from tensorflow.contrib.boosted_trees.python.ops import quantile_ops # pylint: disable=unused-import
I installed spacy using pip and then downloaded the English model using
$ python -m spacy download en
which after downloading gave me the message
You can now load the model via spacy.load('en')
Using IPython,
import spacy
nlp=spacy.load('en')
AttributeError Traceback (most recent call last)
<ipython-input-5-a32b6d2b36d8> in <module>()
----> 1 nlp=spacy.load('en')
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\__init__.pyc in load(n
ame, **overrides)
13 from .deprecated import resolve_load_name
14 name = resolve_load_name(name, **overrides)
---> 15 return util.load_model(name, **overrides)
16
17
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in load_model
(name, **overrides)
102 if isinstance(name, basestring_):
103 if name in set([d.name for d in data_path.iterdir()]): # in data
dir / shortcut
--> 104 return load_model_from_link(name, **overrides)
105 if is_package(name): # installed as package
106 return load_model_from_package(name, **overrides)
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in load_model
_from_link(name, **overrides)
121 "Cant' load '%s'. If you're using a shortcut link, make sure
it "
122 "points to a valid model package (not just a data directory)
." % name)
--> 123 return cls.load(**overrides)
124
125
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\data\en\__init__.pyc i
n load(**overrides)
10
11 def load(**overrides):
---> 12 return load_model_from_init_py(__file__, **overrides)
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in load_model
_from_init_py(init_file, **overrides)
165 if not model_path.exists():
166 raise ValueError("Can't find model directory: %s" % path2str(dat
a_path))
--> 167 return load_model_from_path(data_path, meta, **overrides)
168
169
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in load_model
_from_path(model_path, meta, **overrides)
148 component = nlp.create_pipe(name, config=config)
149 nlp.add_pipe(component, name=name)
--> 150 return nlp.from_disk(model_path)
151
152
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\language.pyc in from_d
isk(self, path, disable)
571 if not (path / 'vocab').exists():
572 exclude['vocab'] = True
--> 573 util.from_disk(path, deserializers, exclude)
574 return self
575
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in from_disk(
path, readers, exclude)
495 for key, reader in readers.items():
496 if key not in exclude:
--> 497 reader(path / key)
498 return path
499
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\language.pyc in <lambd
a>(p)
558 path = util.ensure_path(path)
559 deserializers = OrderedDict((
--> 560 ('vocab', lambda p: self.vocab.from_disk(p)),
561 ('tokenizer', lambda p: self.tokenizer.from_disk(p, vocab=Fa
lse)),
562 ('meta.json', lambda p: p.open('w').write(json_dumps(self.me
ta)))
vocab.pyx in spacy.vocab.Vocab.from_disk()
vectors.pyx in spacy.vectors.Vectors.from_disk()
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\spacy\util.pyc in from_disk(
path, readers, exclude)
495 for key, reader in readers.items():
496 if key not in exclude:
--> 497 reader(path / key)
498 return path
499
vectors.pyx in spacy.vectors.Vectors.from_disk.load_keys()
C:\Users\PARVATHY SARAT\Anaconda2\lib\site-packages\numpy\lib\npyio.pyc in load(
file, mmap_mode, allow_pickle, fix_imports, encoding)
389 _ZIP_PREFIX = asbytes('PK\x03\x04')
390 N = len(format.MAGIC_PREFIX)
--> 391 magic = fid.read(N)
392 fid.seek(-N, 1) # back-up
393 if magic.startswith(_ZIP_PREFIX):
AttributeError: 'WindowsPath' object has no attribute 'read'
I have the English model files(en_core_web_sm) downloaded to the working directory, am I missing something? Do I need to set a path variable? Any help is much appreciated, thanks!
If anybody else receives this error : I opened this as an issue with spaCy's developers on Github. I was suggested using Python 3.6 instead of 2.7 for the moment as there is no alternate workaround to the problem. The next spaCy version should cover this bugfix (I'm told).
Yes, there is glitch involving language downloads in anaconda environments. Here is the pending pull request
https://github.com/explosion/spaCy/pull/1792