As the self guide says, I've installed it with (conda environment)
conda install -c conda-forge spacy
python -m spacy download en_core_web_trf
I have spacy-transformers already installed. But when I simply do:
import spacy
spacy.load("en_core_web_trf")
It shows me this error:
ValueError: [E002] Can't find factory for 'transformer' for language English (en). This usually happens when spaCy calls `nlp.create_pipe` with a custom component name that's not registered on the current language class. If you're using a Transformer, make sure to install 'spacy-transformers'. If you're using a custom component, make sure you've added the decorator `#Language.component` (for function components) or `#Language.factory` (for class components).
Available factories: attribute_ruler, tok2vec, merge_noun_chunks, merge_entities, merge_subtokens, token_splitter, parser, beam_parser, entity_linker, ner, beam_ner, entity_ruler, lemmatizer, tagger, morphologizer, senter, sentencizer, textcat, spancat, textcat_multilabel, en.lemmatizer
More info about the error:
ValueError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_11108/2648447056.py in <module>
----> 1 nlp_en = spacy.load("en_core_web_trf")
~\Anaconda3\envs\rl\lib\site-packages\spacy\__init__.py in load(name, vocab, disable, exclude, config)
49 RETURNS (Language): The loaded nlp object.
50 """
---> 51 return util.load_model(
52 name, vocab=vocab, disable=disable, exclude=exclude, config=config
53 )
~\Anaconda3\envs\rl\lib\site-packages\spacy\util.py in load_model(name, vocab, disable, exclude, config)
345 return get_lang_class(name.replace("blank:", ""))()
346 if is_package(name): # installed as package
--> 347 return load_model_from_package(name, **kwargs)
348 if Path(name).exists(): # path to model data directory
349 return load_model_from_path(Path(name), **kwargs)
~\Anaconda3\envs\rl\lib\site-packages\spacy\util.py in load_model_from_package(name, vocab, disable, exclude, config)
378 """
379 cls = importlib.import_module(name)
--> 380 return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config)
381
382
~\Anaconda3\envs\rl\lib\site-packages\en_core_web_trf\__init__.py in load(**overrides)
8
9 def load(**overrides):
---> 10 return load_model_from_init_py(__file__, **overrides)
~\Anaconda3\envs\rl\lib\site-packages\spacy\util.py in load_model_from_init_py(init_file, vocab, disable, exclude, config)
538 if not model_path.exists():
539 raise IOError(Errors.E052.format(path=data_path))
--> 540 return load_model_from_path(
541 data_path,
542 vocab=vocab,
~\Anaconda3\envs\rl\lib\site-packages\spacy\util.py in load_model_from_path(model_path, meta, vocab, disable, exclude, config)
413 overrides = dict_to_dot(config)
414 config = load_config(config_path, overrides=overrides)
--> 415 nlp = load_model_from_config(config, vocab=vocab, disable=disable, exclude=exclude)
416 return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
417
~\Anaconda3\envs\rl\lib\site-packages\spacy\util.py in load_model_from_config(config, vocab, disable, exclude, auto_fill, validate)
450 # registry, including custom subclasses provided via entry points
451 lang_cls = get_lang_class(nlp_config["lang"])
--> 452 nlp = lang_cls.from_config(
453 config,
454 vocab=vocab,
~\Anaconda3\envs\rl\lib\site-packages\spacy\language.py in from_config(cls, config, vocab, disable, exclude, meta, auto_fill, validate)
1712 # The pipe name (key in the config) here is the unique name
1713 # of the component, not necessarily the factory
-> 1714 nlp.add_pipe(
1715 factory,
1716 name=pipe_name,
~\Anaconda3\envs\rl\lib\site-packages\spacy\language.py in add_pipe(self, factory_name, name, before, after, first, last, source, config, raw_config, validate)
774 lang_code=self.lang,
775 )
--> 776 pipe_component = self.create_pipe(
777 factory_name,
778 name=name,
~\Anaconda3\envs\rl\lib\site-packages\spacy\language.py in create_pipe(self, factory_name, name, config, raw_config, validate)
639 lang_code=self.lang,
640 )
--> 641 raise ValueError(err)
642 pipe_meta = self.get_factory_meta(factory_name)
643 # This is unideal, but the alternative would mean you always need to
Are you sure you did install spacy-transformers?
After installing spacy?
I am using pip:
pip install spacy-transformers
and I have no problems loading the en_core_web_trf.
For anyone who tried this solution but still did not get it to work. Something that they did not mention (because it is trivial) but got me staring at it for ages was that after the
!pip install spacy-transformers
You still need to place
import spacy_transformers
at the top of your code.
# !pip install spacy
# !pip install spacy-transformers
# !python3 -m spacy download en_core_web_trf
It's working fine, if you are working in google colab, use this
# !pip install spacy==(last version of spacy(3.2.4))
Remaining all are same, colab downloading the older version. Hope you get the answers!
Related
Here, I'm attaching actual error showed. im using mlrun with docker. specifically mlrun 1.2.0.
--------------------------------------------------------------------------
RunError Traceback (most recent call last)
<ipython-input-20-aab97e08b914> in <module>
1 serving_fn.with_code(body=" ") # adds the serving wrapper, not required with MLRun >= 1.0.3
----> 2 project.deploy_function(serving_fn)
/opt/conda/lib/python3.8/site-packages/mlrun/projects/project.py in deploy_function(self, function, dashboard, models, env, tag, verbose, builder_env, mock)
2307 :param mock: deploy mock server vs a real Nuclio function (for local simulations)
2308 """
-> 2309 return deploy_function(
2310 function,
2311 dashboard=dashboard,
/opt/conda/lib/python3.8/site-packages/mlrun/projects/operations.py in deploy_function(function, dashboard, models, env, tag, verbose, builder_env, project_object, mock)
344 )
345
--> 346 address = function.deploy(
347 dashboard=dashboard, tag=tag, verbose=verbose, builder_env=builder_env
348 )
/opt/conda/lib/python3.8/site-packages/mlrun/runtimes/serving.py in deploy(self, dashboard, project, tag, verbose, auth_info, builder_env)
621 logger.info(f"deploy root function {self.metadata.name} ...")
622
--> 623 return super().deploy(
624 dashboard, project, tag, verbose, auth_info, builder_env=builder_env
625 )
/opt/conda/lib/python3.8/site-packages/mlrun/runtimes/function.py in deploy(self, dashboard, project, tag, verbose, auth_info, builder_env)
550 self.status = data["data"].get("status")
551 self._update_credentials_from_remote_build(data["data"])
--> 552 self._wait_for_function_deployment(db, verbose=verbose)
553
554 # NOTE: on older mlrun versions & nuclio versions, function are exposed via NodePort
/opt/conda/lib/python3.8/site-packages/mlrun/runtimes/function.py in _wait_for_function_deployment(self, db, verbose)
620 if state != "ready":
621 logger.error("Nuclio function failed to deploy", function_state=state)
--> 622 raise RunError(f"function {self.metadata.name} deployment failed")
623
624 #min_nuclio_versions("1.5.20", "1.6.10")
RunError: function serving deployment failed
I don't have any idea what is the reason behind this error. as I'm new bee here. so someone pls help me to resolve this error.
I see two steps, how to solve the issue:
1. Relevant installation
The MLRun Community Edition in desktop docker has to be install under relevant HOST_IP (not with localhost or 127.0.0.1, but with stable IP address, see ipconfig) and with relevant SHARED_DIR. See relevant command line (from OS windows):
set HOST_IP=192.168.0.150
set SHARED_DIR=c:\Apps\mlrun-data
set TAG=1.2.0
mkdir %SHARED_DIR%
docker-compose -f "c:\Apps\Desktop Docker Tools\compose.with-jupyter.yaml" up
BTW: YAML file see https://docs.mlrun.org/en/latest/install/local-docker.html
2. Access to the port
In case of call serving_fn.invoke you have to open relevant port (from deploy_function) on your IP address (based on setting of HOST_IP, see the first point).
Typically this port can be blocked based on your firewall policy or your local antivirus. It means, you have to open access to this port before invoke call.
BTW: You can see focus on the issue https://github.com/mlrun/mlrun/issues/2102
I am trying to install Pyspark on windows since yesterday but I am constantly getting this error. It's been more then 48 hours, I tried everything to resolve the problem. Reinstalled Pyspark from scratch numerous times but still could not get it to work.
Whenever I am running -
spark = SparkSession.builder.getOrCreate()
I am getting this error -
RuntimeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_20592/2335384691.py in <module>
1 # create a spark session
----> 2 spark = SparkSession.builder.getOrCreate()
c:\users\bhola\appdata\local\programs\python\python38\lib\site-packages\pyspark\sql\session.py in getOrCreate(self)
226 sparkConf.set(key, value)
227 # This SparkContext may be an existing one.
--> 228 sc = SparkContext.getOrCreate(sparkConf)
229 # Do not update `SparkConf` for existing `SparkContext`, as it's shared
230 # by all sessions.
c:\users\bhola\appdata\local\programs\python\python38\lib\site-packages\pyspark\context.py in getOrCreate(cls, conf)
390 with SparkContext._lock:
391 if SparkContext._active_spark_context is None:
--> 392 SparkContext(conf=conf or SparkConf())
393 return SparkContext._active_spark_context
394
c:\users\bhola\appdata\local\programs\python\python38\lib\site-packages\pyspark\context.py in __init__(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer, conf, gateway, jsc, profiler_cls)
142 " is not allowed as it is a security risk.")
143
--> 144 SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
145 try:
146 self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
c:\users\bhola\appdata\local\programs\python\python38\lib\site-packages\pyspark\context.py in _ensure_initialized(cls, instance, gateway, conf)
337 with SparkContext._lock:
338 if not SparkContext._gateway:
--> 339 SparkContext._gateway = gateway or launch_gateway(conf)
340 SparkContext._jvm = SparkContext._gateway.jvm
341
c:\users\bhola\appdata\local\programs\python\python38\lib\site-packages\pyspark\java_gateway.py in launch_gateway(conf, popen_kwargs)
106
107 if not os.path.isfile(conn_info_file):
--> 108 raise RuntimeError("Java gateway process exited before sending its port number")
109
110 with open(conn_info_file, "rb") as info:
RuntimeError: Java gateway process exited before sending its port number
I Tried the solution given in this stackoveflow post and in this stackoverflow2 post.
export PYSPARK_SUBMIT_ARGS="--master local[2] pyspark-shell"
In my windows system I used variable name = PYSPARK_SUBMIT_ARGS and variable value = "--master local[2] pyspark-shell"
But it's not working.
Other system variables that is set on my machine are during installations are-
SPARK_HOME = D:\spark\spark-3.2.0-bin-hadoop3.2
HADOOP_HOME = D:\spark\spark-3.2.0-bin-hadoop3.2
Path = D:\spark\spark-3.2.0-bin-hadoop3.2\bin
PYSPARK_DRIVER_PYTHON = jupyter
PYSPARK_DRIVER_PYTHON_OPTS = jupyter
JAVA_HOME = C:\Program Files\Java\jdk1.8.0_301
Can anyone help me with this?
Did you download the winutils.exe from https://github.com/kontext-tech/winutils? You'll need to put that in \Hadoop\bin and add paths, etc.
I am trying to install pretrained pipelines in spark-nlp in windows 10 with python.
The following is the code I have tried so far in the Jupyter notebook in the local system:
! java -version
# should be Java 8 (Oracle or OpenJDK)
! conda create -n sparknlp python=3.7 -y
! conda activate sparknlp
! pip install --user spark-nlp==2.6.4 pyspark==2.4.5
from sparknlp.base import *
from sparknlp.annotator import *
from sparknlp.pretrained import PretrainedPipeline
import sparknlp
# Start Spark Session with Spark NLP
# start() functions has two parameters: gpu and spark23
# sparknlp.start(gpu=True) will start the session with GPU support
# sparknlp.start(sparrk23=True) is when you have Apache Spark 2.3.x installed
spark = sparknlp.start()
# Download a pre-trained pipeline
pipeline = PretrainedPipeline('explain_document_ml', lang='en')
I am getting the following error:
explain_document_ml download started this may take some time.
Approx size to download 9.4 MB
[OK!]
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
~\AppData\Roaming\Python\Python37\site-packages\pyspark\sql\utils.py in deco(*a, **kw)
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
~\Anaconda3\envs\py37\lib\site-packages\py4j\protocol.py in get_return_value(answer, gateway_client, target_id, name)
327 "An error occurred while calling {0}{1}{2}.\n".
--> 328 format(target_id, ".", name), value)
329 else:
Py4JJavaError: An error occurred while calling z:com.johnsnowlabs.nlp.pretrained.PythonResourceDownloader.downloadPipeline.
: java.lang.IllegalArgumentException: requirement failed: Was not found appropriate resource to download for request: ResourceRequest(explain_document_ml,Some(en),public/models,2.6.4,2.4.4) with downloader: com.johnsnowlabs.nlp.pretrained.S3ResourceDownloader#2570f26e
at scala.Predef$.require(Predef.scala:224)
at com.johnsnowlabs.nlp.pretrained.ResourceDownloader$.downloadResource(ResourceDownloader.scala:345)
at com.johnsnowlabs.nlp.pretrained.ResourceDownloader$.downloadPipeline(ResourceDownloader.scala:376)
at com.johnsnowlabs.nlp.pretrained.ResourceDownloader$.downloadPipeline(ResourceDownloader.scala:371)
at com.johnsnowlabs.nlp.pretrained.PythonResourceDownloader$.downloadPipeline(ResourceDownloader.scala:474)
at com.johnsnowlabs.nlp.pretrained.PythonResourceDownloader.downloadPipeline(ResourceDownloader.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(Unknown Source)
at java.lang.reflect.Method.invoke(Unknown Source)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Unknown Source)
During handling of the above exception, another exception occurred:
IllegalArgumentException Traceback (most recent call last)
<ipython-input-2-d18238e76d9f> in <module>
11
12 # Download a pre-trained pipeline
---> 13 pipeline = PretrainedPipeline('explain_document_ml', lang='en')
~\Anaconda3\envs\py37\lib\site-packages\sparknlp\pretrained.py in __init__(self, name, lang, remote_loc, parse_embeddings, disk_location)
89 def __init__(self, name, lang='en', remote_loc=None, parse_embeddings=False, disk_location=None):
90 if not disk_location:
---> 91 self.model = ResourceDownloader().downloadPipeline(name, lang, remote_loc)
92 else:
93 self.model = PipelineModel.load(disk_location)
~\Anaconda3\envs\py37\lib\site-packages\sparknlp\pretrained.py in downloadPipeline(name, language, remote_loc)
58 t1.start()
59 try:
---> 60 j_obj = _internal._DownloadPipeline(name, language, remote_loc).apply()
61 jmodel = PipelineModel._from_java(j_obj)
62 finally:
~\Anaconda3\envs\py37\lib\site-packages\sparknlp\internal.py in __init__(self, name, language, remote_loc)
179 class _DownloadPipeline(ExtendedJavaWrapper):
180 def __init__(self, name, language, remote_loc):
--> 181 super(_DownloadPipeline, self).__init__("com.johnsnowlabs.nlp.pretrained.PythonResourceDownloader.downloadPipeline", name, language, remote_loc)
182
183
~\Anaconda3\envs\py37\lib\site-packages\sparknlp\internal.py in __init__(self, java_obj, *args)
127 super(ExtendedJavaWrapper, self).__init__(java_obj)
128 self.sc = SparkContext._active_spark_context
--> 129 self._java_obj = self.new_java_obj(java_obj, *args)
130 self.java_obj = self._java_obj
131
~\Anaconda3\envs\py37\lib\site-packages\sparknlp\internal.py in new_java_obj(self, java_class, *args)
137
138 def new_java_obj(self, java_class, *args):
--> 139 return self._new_java_obj(java_class, *args)
140
141 def new_java_array(self, pylist, java_class):
~\AppData\Roaming\Python\Python37\site-packages\pyspark\ml\wrapper.py in _new_java_obj(java_class, *args)
65 java_obj = getattr(java_obj, name)
66 java_args = [_py2java(sc, arg) for arg in args]
---> 67 return java_obj(*java_args)
68
69 #staticmethod
~\Anaconda3\envs\py37\lib\site-packages\py4j\java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
~\AppData\Roaming\Python\Python37\site-packages\pyspark\sql\utils.py in deco(*a, **kw)
77 raise QueryExecutionException(s.split(': ', 1)[1], stackTrace)
78 if s.startswith('java.lang.IllegalArgumentException: '):
---> 79 raise IllegalArgumentException(s.split(': ', 1)[1], stackTrace)
80 raise
81 return deco
IllegalArgumentException: 'requirement failed: Was not found appropriate resource to download for request: ResourceRequest(explain_document_ml,Some(en),public/models,2.6.4,2.4.4) with downloader: com.johnsnowlabs.nlp.pretrained.S3ResourceDownloader#2570f26e'
This is one of the common issues with Apache Spark & Spark NLP when the Java/Spark/Hadoop is not correctly setup on Windows:
You need to follow these steps correctly to avoid the common issues including failed pretrained() downloads:
Download OpenJDK from here: https://adoptopenjdk.net/?variant=openjdk8&jvmVariant=hotspot
Make sure it is 64-bit
Make sure you install it in the root C:\java Windows doesn't like space in the path.
During installation after changing the path, select setting Path
Download winutils and put it in C:\hadoop\bin https://github.com/cdarlint/winutils/blob/master/hadoop-2.7.3/bin/winutils.exe
Download Anaconda 3.6 from Archive, I didn't like the new 3.8 (Apache Spark 2.4.x only works with Python 3.6 and 3.7): https://repo.anaconda.com/archive/Anaconda3-2020.02-Windows-x86_64.exe
Download Apache Spark 2.4.6 and extract it in C:\spark\
Set the env for HADOOP_HOME to C:\hadoop and SPARK_HOME to C:\spark
Set Paths for %HADOOP_HOME%\bin and %SPARK_HOME%\bin
Install C++ (again the 64 bit) https://www.microsoft.com/en-us/download/confirmation.aspx?id=14632
Create C:\temp and C:\temp\hive
Fix permissions:
C:\Users\maz>%HADOOP_HOME%\bin\winutils.exe chmod 777 /tmp/hive
C:\Users\maz>%HADOOP_HOME%\bin\winutils.exe chmod 777 /tmp/
Either create a conda env for python 3.6, install pyspark==2.4.6 spark-nlp numpy and use Jupyter/python console, or in the same conda env you can go to spark bin for pyspark --packages com.johnsnowlabs.nlp:spark-nlp_2.11:2.6.5.
I want to detect objects in images, following this tutorial (https://medium.com/deepquestai/train-object-detection-ai-with-6-lines-of-code-6d087063f6ff). However I receive an error message, which I cannot resolve. What can I do about it, given that I cannot change the source code from imageai and therefore not fix the error this way (https://github.com/google/tangent/issues/95)?
These are my imports:
!pip3 install tensorflow-gpu==1.13.1
!pip install imageai --upgrade
from imageai.Detection.Custom import DetectionModelTrainer
I run this code:
data_path = 'leaf-images-with-pascal-voc-annotations/'
trainer = DetectionModelTrainer()
trainer.setModelTypeAsYOLOv3()
trainer.setDataDirectory(data_directory=data_path)
trainer.setTrainConfig(object_names_array=['leaf'], batch_size=16, num_experiments=100,
train_from_pretrained_model="pretrained-yolov3.h5")
trainer.trainModel()
I tried using different versions of tensorflow
But receive this error message:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-28-d42b2127d681> in <module>
6 trainer.setTrainConfig(object_names_array=['leaf'], batch_size=16, num_experiments=100,
7 train_from_pretrained_model="pretrained-yolov3.h5")
----> 8 trainer.trainModel()
/opt/conda/lib/python3.6/site-packages/imageai/Detection/Custom/__init__.py in trainModel(self)
272 noobj_scale=self.__train_noobj_scale,
273 xywh_scale=self.__train_xywh_scale,
--> 274 class_scale=self.__train_class_scale,
275 )
276
/opt/conda/lib/python3.6/site-packages/imageai/Detection/Custom/__init__.py in _create_model(self, nb_class, anchors, max_box_per_image, max_grid, batch_size, warmup_batches, ignore_thresh, multi_gpu, lr, grid_scales, obj_scale, noobj_scale, xywh_scale, class_scale)
551 noobj_scale=noobj_scale,
552 xywh_scale=xywh_scale,
--> 553 class_scale=class_scale
554 )
555 else:
/opt/conda/lib/python3.6/site-packages/imageai/Detection/Custom/yolo.py in create_yolov3_model(nb_class, anchors, max_box_per_image, max_grid, batch_size, warmup_batches, ignore_thresh, grid_scales, obj_scale, noobj_scale, xywh_scale, class_scale)
292 noobj_scale,
293 xywh_scale,
--> 294 class_scale)([input_image, pred_yolo_1, true_yolo_1, true_boxes])
295
296 # Layer 83 => 86
/opt/conda/lib/python3.6/site-packages/imageai/Detection/Custom/yolo.py in __init__(self, anchors, max_grid, batch_size, warmup_batches, ignore_thresh, grid_scale, obj_scale, noobj_scale, xywh_scale, class_scale, **kwargs)
22 max_grid_h, max_grid_w = max_grid
23
---> 24 cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(max_grid_w), [max_grid_h]), (1, max_grid_h, max_grid_w, 1, 1)))
25 cell_y = tf.transpose(cell_x, (0,2,1,3,4))
26 self.cell_grid = tf.tile(tf.concat([cell_x,cell_y],-1), [batch_size, 1, 1, 3, 1])
AttributeError: module 'tensorflow' has no attribute 'to_float'
I was also getting the same error while trying to use the to_float method in the version 2.3.0
It seems like, this method has been removed in the newer versions of the library.
To get it working, I have changed my code to use the cast method instead of to_float.
Below is the sample code which worked for me
num=5
#as_float = tf.to_float(num)
#Change the above code line and use cast method instead
as_float=tf.cast(num, tf.float32)
as_float
There appears to be a currently unfixed state of the ImageAI library where it is not compatible with the newest versions of tensorflow, etc.
Using these versions worked for me:
#Currently I found these to work together:
pip install opencv-python==4.1.2.30
pip install keras==2.3.1
pip install tensorflow==1.14.0
pip install tensorflow-gpu==1.14.0
pip install imageai --upgrade
NOTE: using imageai == 2.1.5
MacOS high sierra, MBP 2016, in terminal.
I'm following the directions here:
https://github.com/tensorflow/models/tree/master/research/syntaxnet
All options for ./configure chosen as default (and all python directories double-checked.). All steps have completed cleanly until this:
bazel test ...
# On Mac, run the following:
bazel test --linkopt=-headerpad_max_install_names \
dragnn/... syntaxnet/... util/utf8/...
I assume I'm supposed to run the latter line ("bazel test --linkopt" etc.). But I get the same result either way, interestingly.
This throws about 10 errors, each of the same type "trying to mutate a frozen object", and concludes tests not run, error loading package dragnn/protos, and couldn't start build.
This is the general form of the errors:
syntaxnet>> bazel test --linkopt=-headerpad_max_install_names
dragnn/... syntaxnet/... util/utf8/...
.
ERROR:
/Users/XXX/Desktop/NLP/syntaxnet/models/research/syntaxnet/dragnn/protos/BUILD:35:1:
Traceback (most recent call last): File
"/Users/XXX/Desktop/NLP/syntaxnet/models/research/syntaxnet/dragnn/protos/BUILD",
line 35 tf_proto_library_py(name = "data_py_pb2", srcs = ["dat..."])
File
"/Users/XXX/Desktop/NLP/syntaxnet/models/research/syntaxnet/syntaxnet/syntaxnet.bzl",
line 53, in tf_proto_library_py py_proto_library(name = name, srcs =
srcs, srcs_versi...", <5 more arguments>) File
"/private/var/tmp/_bazel_XXX/f74e5a21c3ad09aeb110d9de15110035/external/protobuf_archive/protobuf.bzl",
line 374, in py_proto_library py_libs += [default_runtime] trying to
mutate a frozen object ERROR: package contains errors: dragnn/protos
... [same error for various 'name = "...pb2"' files] ...
INFO: Elapsed time: 0.709s FAILED: Build did NOT complete successfully
(17 packages loaded) ERROR: Couldn't start the build. Unable to run
tests
Any idea what could be doing this? Thanks.
This error indicates a bug in the py_proto_library rule implementation.
tf_proto_library_py is defined in syntaxnet.bzl. It is a wrapper around py_proto_library, which is defined by the tf_workspace macro's protobuf_archive rule.
"protobuf_archive" downloads Protobuf 3.3.0, which contains //:protobuf.bzl with the buggy py_proto_library rule implementation: in line #374 it tries to mutate an immutable object py_libs.
Make sure you use the latest Bazel version, currently that's 0.8.1.
If the problem still persists, then:
I suggest filing a bug with:
Protobuf, to fix the py_proto_library rule
TensorFlow, to update their Protobuf version in tf_workspace, and
Syntaxnet to update their TF submodule reference in //research/syntaxnet to the bugfixed version.
As a workaround, perhaps you can patch protobuf.bzl.
The patch is to change these lines:
373 if default_runtime and not default_runtime in py_libs + deps:
374 py_libs += [default_runtime]
375
376 native.py_library(
377 name=name,
378 srcs=outs+py_extra_srcs,
379 deps=py_libs+deps,
380 imports=includes,
381 **kargs)
to these:
373 if default_runtime and not default_runtime in py_libs + deps:
374 py_libs2 = py_libs + [default_runtime]
375 else:
376 py_libs2 = py_libs
377
378 native.py_library(
379 name=name,
380 srcs=outs+py_extra_srcs,
381 deps=py_libs2+deps,
382 imports=includes,
383 **kargs)
Disclaimer: this is a "blind" fix; I have not tried whether it works.
Tried same pattern patch for cc_libs.
if default_runtime and not default_runtime in cc_libs:
cc_libs2 = cc_libs + [default_runtime]
else:
cc_libs2 = cc_libs
if use_grpc_plugin:
cc_libs += ["//external:grpc_lib"]
native.cc_library(
name=name,
srcs=gen_srcs,
hdrs=gen_hdrs,
deps=cc_libs2 + deps,
includes=includes,
**kargs)
Shows new error, but keeps compiling. (Ubuntu 16 on Windows System for Linux--don't ask, native tensorflow 1.4 winx64 works, but not syntaxnet).
greg#FX11:/mnt/c/code/models/research/syntaxnet$ bazel test ...
ERROR: /home/greg/.cache/bazel/_bazel_greg/adb8eb0eab8b9680449366fbebe59ec2/external/org_tensorflow/tensorflow/core/kernels/BUILD:451:1: in _transitive_hdrs rule #org_tensorflow//tensorflow/core/kernels:bounds_check_lib_gather:
Traceback (most recent call last):
File "/home/greg/.cache/bazel/_bazel_greg/adb8eb0eab8b9680449366fbebe59ec2/external/org_tensorflow/tensorflow/core/kernels/BUILD", line 451
_transitive_hdrs(name = 'bounds_check_lib_gather')
File "/home/greg/.cache/bazel/_bazel_greg/adb8eb0eab8b9680449366fbebe59ec2/external/org_tensorflow/tensorflow/tensorflow.bzl", line 869, in _transitive_hdrs_impl
set()
Just changed set() to depset() and that seems to have avoided the error.
To make a long story short. I was inspired by a sstrasburg's comment.
Firstly, uninstall a fresh version of bazel.
brew uninstall bazel
Download bazel 0.5.4 from here.
chmod +x bazel-0.5.4-without-jdk-installer-darwin-x86_64.sh
./bazel-0.5.4-without-jdk-installer-darwin-x86_64.sh
After that, again run
bazel test --linkopt=-headerpad_max_install_names dragnn/... syntaxnet/... util/utf8/...
Finally, I got
Executed 57 out of 57 tests: 57 tests pass.