Replicate a dataset with dask to all workers - python

I am using dask with distributed scheduler. I am trying to replicate a dataset read through csv on s3 to all worker nodes. Example:
from distributed import Executor
import dask.dataframe as dd
e= Executor('127.0.0.1:8786',set_as_default=True)
df = dd.read_csv('s3://bucket/file.csv', blocksize=None)
df = e.persist(df)
e.replicate(df)
distributed.utils - ERROR - unhashable type: 'list'
Traceback (most recent call last):
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/utils.py", line 102, in f
result[0] = yield gen.maybe_future(func(*args, **kwargs))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/executor.py", line 1347, in _replicate
branching_factor=branching_factor)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 444, in send_recv_from_rpc
result = yield send_recv(stream=stream, op=key, **kwargs)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1024, in run
yielded = self.gen.send(value)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 345, in send_recv
six.reraise(*clean_exception(**response))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/six.py", line 685, in reraise
raise value.with_traceback(tb)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/core.py", line 211, in handle_stream
result = yield gen.maybe_future(handler(stream, **msg))
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "<string>", line 3, in raise_exc_info
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/tornado/gen.py", line 285, in wrapper
yielded = next(result)
File "/root/.miniconda/envs/dask_env/lib/python3.5/site-packages/distributed/scheduler.py", line 1324, in replicate
keys = set(keys)
TypeError: unhashable type: 'list'
Is this the correct way to replicate a dataframe? It appears that e.persist(df) returned object does not work with e.replicate for some reason.

This was a bug and has been resolved in https://github.com/dask/distributed/pull/473

Related

Kernel ERROR using Jupyter-matlab notebook. Seems like the kernel initialization cant find one file, how can I fix it?

I want to use Jupyter-matlab notebooks. I've downloaded Anaconda navigator and followed the steps from this web: http://jmlilly.net/jupyter-matlab
Unfortunately, when I try to open a matlab notebook:
Seems like I've installed properly
It happends:
Kernel Error...
Error text (It's very unclear):
Traceback (most recent call last):
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\web.py", line 1704, in _execute
result = await result
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\tasks.py", line 328, in __wakeup
future.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 769, in run
yielded = self.gen.throw(*exc_info) # type: ignore
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\notebook\services\sessions\handlers.py", line 74, in post
model = yield maybe_future(
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 762, in run
value = future.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 769, in run
yielded = self.gen.throw(*exc_info) # type: ignore
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\notebook\services\sessions\sessionmanager.py", line 98, in create_session
kernel_id = yield self.start_kernel_for_session(session_id, path, name, type, kernel_name)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 762, in run
value = future.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 769, in run
yielded = self.gen.throw(*exc_info) # type: ignore
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\notebook\services\sessions\sessionmanager.py", line 110, in start_kernel_for_session
kernel_id = yield maybe_future(
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\tornado\gen.py", line 762, in run
value = future.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\futures.py", line 201, in result
raise self._exception
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\tasks.py", line 256, in __step
result = coro.send(None)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\notebook\services\kernels\kernelmanager.py", line 176, in start_kernel
kernel_id = await maybe_future(self.pinned_superclass.start_kernel(self, **kwargs))
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 26, in wrapped
raise e
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 23, in wrapped
return loop.run_until_complete(future)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\nest_asyncio.py", line 89, in run_until_complete
return f.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\futures.py", line 201, in result
raise self._exception
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\tasks.py", line 256, in __step
result = coro.send(None)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\multikernelmanager.py", line 207, in _async_start_kernel
starter = ensure_async(km.start_kernel(**kwargs))
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 26, in wrapped
raise e
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 23, in wrapped
return loop.run_until_complete(future)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\nest_asyncio.py", line 89, in run_until_complete
return f.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\futures.py", line 201, in result
raise self._exception
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\tasks.py", line 256, in __step
result = coro.send(None)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\manager.py", line 79, in wrapper
raise e
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\manager.py", line 71, in wrapper
out = await method(self, *args, **kwargs)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\manager.py", line 381, in _async_start_kernel
await ensure_async(self._launch_kernel(kernel_cmd, **kw))
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 26, in wrapped
raise e
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\utils.py", line 23, in wrapped
return loop.run_until_complete(future)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\nest_asyncio.py", line 89, in run_until_complete
return f.result()
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\futures.py", line 201, in result
raise self._exception
File "C:\Users\lucas\anaconda3\envs\jlab\lib\asyncio\tasks.py", line 256, in __step
result = coro.send(None)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\manager.py", line 301, in _async_launch_kernel
connection_info = await self.provisioner.launch_kernel(kernel_cmd, **kw)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\provisioning\local_provisioner.py", line 204, in launch_kernel
self.process = launch_kernel(cmd, **scrubbed_kwargs)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\launcher.py", line 169, in launch_kernel
raise ex
File "C:\Users\lucas\anaconda3\envs\jlab\lib\site-packages\jupyter_client\launcher.py", line 157, in launch_kernel
proc = Popen(cmd, **kwargs)
File "C:\Users\lucas\anaconda3\envs\jlab\lib\subprocess.py", line 951, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "C:\Users\lucas\anaconda3\envs\jlab\lib\subprocess.py", line 1420, in _execute_child
hp, ht, pid, tid = _winapi.CreateProcess(executable, args,
FileNotFoundError: [WinError 2] The system cannot find the file specified
I've been trying re-install anaconda, jupyter, matlab-kernel, change environment python version (3.7, 3.8, 3.9) etc. but it doesn't work.
Matlab version: R2021b

spacy.load error: RuntimeError: dictionary changed size during iteration

I am loading a spaCy model as part of a step in my Dataflow streaming pipeline. To load the pre-downloaded spaCy model for a specific language I am using
nlp_model = spacy.load(SPACY_KEYS[lang])
where SPACY_KEYS is a dictionary containing the names of the models for each language (e.g. 'en': 'en_core_web_sm').
This works without any issues for the majority of the jobs run by the pipeline, but for a few iterations I am getting the following error:
Error message from worker: generic::unknown: Traceback (most recent call last):
File "apache_beam/runners/common.py", line 1232, in apache_beam.runners.common.DoFnRunner.process
File "apache_beam/runners/common.py", line 752, in apache_beam.runners.common.PerWindowInvoker.invoke_process
File "apache_beam/runners/common.py", line 870, in apache_beam.runners.common.PerWindowInvoker._invoke_process_per_window
File "apache_beam/runners/common.py", line 1368, in apache_beam.runners.common._OutputProcessor.process_outputs
File "/usr/local/lib/python3.7/site-packages/submodules/entities_and_pii_removal.py", line 259, in entities_and_PII
nlp_model = spacy.load(SPACY_KEYS[lang]) # load spacy model
File "/usr/local/lib/python3.7/site-packages/spacy/__init__.py", line 52, in load
name, vocab=vocab, disable=disable, exclude=exclude, config=config
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 420, in load_model
return load_model_from_package(name, **kwargs) # type: ignore[arg-type]
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 453, in load_model_from_package
return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined]
File "/usr/local/lib/python3.7/site-packages/de_core_news_sm/__init__.py", line 10, in load
return load_model_from_init_py(__file__, **overrides)
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 621, in load_model_from_init_py
config=config,
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 489, in load_model_from_path
return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
File "/usr/local/lib/python3.7/site-packages/spacy/language.py", line 2042, in from_disk
util.from_disk(path, deserializers, exclude) # type: ignore[arg-type]
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 1299, in from_disk
reader(path / key)
File "/usr/local/lib/python3.7/site-packages/spacy/language.py", line 2037, in <lambda>
p, exclude=["vocab"]
File "spacy/pipeline/trainable_pipe.pyx", line 343, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 1299, in from_disk
reader(path / key)
File "spacy/pipeline/trainable_pipe.pyx", line 333, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk.load_model
File "spacy/pipeline/trainable_pipe.pyx", line 334, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk.load_model
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 593, in from_bytes
return self.from_dict(msg)
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 624, in from_dict
loaded_value = deserialize_attr(default_value, value, attr, node)
File "/usr/local/lib/python3.7/functools.py", line 840, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 804, in deserialize_attr
return srsly.msgpack_loads(value)
File "/usr/local/lib/python3.7/site-packages/srsly/_msgpack_api.py", line 27, in msgpack_loads
msg = msgpack.loads(data, raw=False, use_list=use_list)
File "/usr/local/lib/python3.7/site-packages/srsly/msgpack/__init__.py", line 76, in unpackb
for decoder in msgpack_decoders.get_all().values():
File "/usr/local/lib/python3.7/site-packages/catalogue/__init__.py", line 110, in get_all
for keys, value in REGISTRY.items():
RuntimeError: dictionary changed size during iteration
I have not been able to identify the cause of this problem. Is there a way of getting around it?

Why does python's numba run 2D array program on Spyder but not on remote server?

I created a small program that uses numba on a method containing a 2D array (list of lists). This program runs just fine on Spyder, but when I try to run the exact same program on a remote Ubuntu server, I get a long error saying "unsupported nested memory-managed object" (listed below the program code).
The code:
from numba import jit
#jit(nopython=True)
def test():
num1 = 10
num2 = 5
array = [ [ 1 for i in range(num1) ] for j in range(num2) ]
#array = [2 for i in range(num1) ]
sum = 0
for i in range(0,num1):
for j in range(0,num2):
sum = sum + array[i][j]
print(sum)
test()
The error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 186, in _call_incref_decref
meminfo = data_model.get_nrt_meminfo(builder, value)
File "/usr/lib/python3/dist-packages/numba/datamodel/models.py", line 329, in get_nrt_meminfo
"unsupported nested memory-managed object")
NotImplementedError: unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
NotImplementedError: list(list(int64)): unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "2DArrayTest.py", line 15, in <module>
test()
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 740, in compile_extra
return pipeline.compile_extra(func)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 699, in _compile_bytecode
return self._compile_core()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 686, in _compile_core
res = pm.run(self.status)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 238, in run
stage()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 621, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 576, in _backend
lowered = lowerfn()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 563, in backend_nopython_mode
self.flags)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 858, in native_lowering_stage
lower.lower()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 249, in new_error_context
six.reraise(type(newerr), newerr, sys.exc_info()[2])
File "/usr/lib/python3/dist-packages/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
list(list(int64)): unsupported nested memory-managed object
File "2DArrayTest.py", line 7
[1] During: lowering "$56 = build_list(items=[])" at 2DArrayTest.py (7)
I have seen elsewhere that numba doesn't like 2D arrays and lists of lists because of memory. Is there a way to make it work on the remote server, since it works on Spyder? What is it that Spyder does differently to make it work?

Pytorch lightning, tensorboard: TypeError: can't pickle _thread.lock objects

this is my error. I don't know what's my error is. And I really do not know what to do right now. I am using PyTorch-lightning with tensorboard. I do not where this error comes from, I can provide more details if you ask. I delete several error information because StackOverflow doesn't allow me to post so much code.
File "/home/jq/PycharmProjects/Unet/Code/Lit_train.py", line 49, in <module>
trainer.fit(model)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 859, in fit
self.single_gpu_train(model)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/distrib_parts.py", line 503, in single_gpu_train
self.run_pretrain_routine(model)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/trainer.py", line 1015, in run_pretrain_routine
self.train()
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 347, in train
self.run_training_epoch()
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/training_loop.py", line 451, in run_training_epoch
self.run_evaluation(test_mode=self.testing)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/evaluation_loop.py", line 391, in run_evaluation
self.log_metrics(log_metrics, {})
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/trainer/logging.py", line 74, in log_metrics
self.logger.save()
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/utilities/distributed.py", line 10, in wrapped_fn
return fn(*args, **kwargs)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/loggers/tensorboard.py", line 161, in save
save_hparams_to_yaml(hparams_file, self.hparams)
File "/home/jq/.local/lib/python3.6/site-packages/pytorch_lightning/core/saving.py", line 151, in save_hparams_to_yaml
yaml.dump(hparams, fp)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 52, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 343, in represent_object
'tag:yaml.org,2002:python/object:'+function_name, state)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 52, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 346, in represent_object
return self.represent_sequence(tag+function_name, args)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 92, in represent_sequence
node_item = self.represent_data(item)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 52, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 343, in represent_object
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 52, in represent_data
node = self.yaml_multi_representers[data_type](self, data)
File "/home/jq/.local/lib/python3.6/site-packages/yaml/representer.py", line 317, in represent_object
reduce = data.__reduce_ex__(2)
TypeError: can't pickle _thread.lock objects
Exception ignored in: <object repr() failed>
Traceback (most recent call last):
File "/home/jq/.local/lib/python3.6/site-packages/tqdm/std.py", line 1086, in __del__
File "/home/jq/.local/lib/python3.6/site-packages/tqdm/std.py", line 1293, in close
File "/home/jq/.local/lib/python3.6/site-packages/tqdm/std.py", line 1471, in display
File "/home/jq/.local/lib/python3.6/site-packages/tqdm/std.py", line 1089, in __repr__
File "/home/jq/.local/lib/python3.6/site-packages/tqdm/std.py", line 1433, in format_dict
TypeError: 'NoneType' object is not iterable

Python client for AWS Redis Cluster

Can anyone suggest a Python client for AWS Redis Cluster enabled?
I'm using redis-py-cluster, but it fails:
Sample code:
from rediscluster import StrictRedisCluster
startup_nodes = [{"host": "xxxx.clustercfg.apn2.cache.amazonaws.com", "port": "6379"}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
r.set('foo', 'bar')
value = r.get('foo')
======
Exception:
Traceback (most recent call last):
File "testRedisCluster.py", line 11, in
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
File "/Library/Python/2.7/site-packages/rediscluster/client.py", line 181, in init
**kwargs
File "/Library/Python/2.7/site-packages/rediscluster/connection.py", line 141, in init
self.nodes.initialize()
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 228, in initialize
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in cluster_require_full_coverage
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 267, in node_require_full_coverage
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
File "/Library/Python/2.7/site-packages/redis/client.py", line 715, in config_get
return self.execute_command('CONFIG GET', pattern)
File "/Library/Python/2.7/site-packages/redis/client.py", line 668, in execute_command
return self.parse_response(connection, command_name, **options)
File "/Library/Python/2.7/site-packages/redis/client.py", line 680, in parse_response
response = connection.read_response()
File "/Library/Python/2.7/site-packages/redis/connection.py", line 629, in read_response
raise response
redis.exceptions.ResponseError: unknown command 'CONFIG'
I'm using redis-py-cluster 1.3.4.
Any idea?
Change the parameter skip_full_coverage_check=False to skip_full_coverage_check=True

Categories