Related
I am trying to get Terraclimate Data from Microsoft Planetary and facing time out error. Is there a possiblity of increasing the timeout time ? Please find the code below and the error I am facing. I am using fsspec and xarray for downloading spatial data from MS Planetary portal.
import fsspec
import xarray as xr
store = fsspec.get_mapper(asset.href)
data = xr.open_zarr(store, **asset.extra_fields["xarray:open_kwargs"])
clipped_data = data.sel(time=slice('2015-01-01','2019-12-31'),lon=slice(min_lon,max_lon),lat=slice(max_lat,min_lat))
parsed_data = clipped_data[['tmax', 'tmin', 'ppt', 'soil']]
lat_list = parsed_data['lat'].values.tolist()
lon_list = parsed_data['lon'].values.tolist()
filename = "Soil_Moisture_sample.csv"
for(i,j) in zip(lat_list,lon_list):
parsed_data[["soil","tmax","tmin","ppt"]].sel(lon=i, lat=j, method="nearest").to_dataframe().to_csv(filename,mode='a',index=False, header=False)
I am getting the following error
TimeoutError Traceback (most recent call last)
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:53, in _runner(event, coro, result, timeout)
52 try:
---> 53 result[0] = await coro
54 except Exception as ex:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:423, in AsyncFileSystem._cat(self, path, recursive, on_error, batch_size, **kwargs)
422 if ex:
--> 423 raise ex
424 if (
425 len(paths) > 1
426 or isinstance(path, list)
427 or paths[0] != self._strip_protocol(path)
428 ):
File ~\Anaconda3\envs\satellite\lib\asyncio\tasks.py:455, in wait_for(fut, timeout, loop)
454 if timeout is None:
--> 455 return await fut
457 if timeout <= 0:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\implementations\http.py:221, in HTTPFileSystem._cat_file(self, url, start, end, **kwargs)
220 async with session.get(url, **kw) as r:
--> 221 out = await r.read()
222 self._raise_not_found_for_status(r, url)
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\client_reqrep.py:1036, in ClientResponse.read(self)
1035 try:
-> 1036 self._body = await self.content.read()
1037 for trace in self._traces:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:375, in StreamReader.read(self, n)
374 while True:
--> 375 block = await self.readany()
376 if not block:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:397, in StreamReader.readany(self)
396 while not self._buffer and not self._eof:
--> 397 await self._wait("readany")
399 return self._read_nowait(-1)
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:304, in StreamReader._wait(self, func_name)
303 with self._timer:
--> 304 await waiter
305 else:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\helpers.py:721, in TimerContext.__exit__(self, exc_type, exc_val, exc_tb)
720 if exc_type is asyncio.CancelledError and self._cancelled:
--> 721 raise asyncio.TimeoutError from None
722 return None
TimeoutError:
The above exception was the direct cause of the following exception:
FSTimeoutError Traceback (most recent call last)
Input In [62], in <cell line: 3>()
1 # Flood Region Point - Thiruvanthpuram
2 filename = "Soil_Moisture_sample.csv"
----> 3 parsed_data[["soil","tmax","tmin","ppt"]].sel(lon=8.520833, lat=76.4375, method="nearest").to_dataframe().to_csv(filename,mode='a',index=False, header=False)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5898, in Dataset.to_dataframe(self, dim_order)
5870 """Convert this dataset into a pandas.DataFrame.
5871
5872 Non-index variables in this dataset form the columns of the
(...)
5893
5894 """
5896 ordered_dims = self._normalize_dim_order(dim_order=dim_order)
-> 5898 return self._to_dataframe(ordered_dims=ordered_dims)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5862, in Dataset._to_dataframe(self, ordered_dims)
5860 def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
5861 columns = [k for k in self.variables if k not in self.dims]
-> 5862 data = [
5863 self._variables[k].set_dims(ordered_dims).values.reshape(-1)
5864 for k in columns
5865 ]
5866 index = self.coords.to_index([*ordered_dims])
5867 return pd.DataFrame(dict(zip(columns, data)), index=index)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5863, in <listcomp>(.0)
5860 def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
5861 columns = [k for k in self.variables if k not in self.dims]
5862 data = [
-> 5863 self._variables[k].set_dims(ordered_dims).values.reshape(-1)
5864 for k in columns
5865 ]
5866 index = self.coords.to_index([*ordered_dims])
5867 return pd.DataFrame(dict(zip(columns, data)), index=index)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\variable.py:527, in Variable.values(self)
524 #property
525 def values(self):
526 """The variable's data as a numpy.ndarray"""
--> 527 return _as_array_or_item(self._data)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\variable.py:267, in _as_array_or_item(data)
253 def _as_array_or_item(data):
254 """Return the given values as a numpy array, or as an individual item if
255 it's a 0d datetime64 or timedelta64 array.
256
(...)
265 TODO: remove this (replace with np.asarray) once these issues are fixed
266 """
--> 267 data = np.asarray(data)
268 if data.ndim == 0:
269 if data.dtype.kind == "M":
File ~\AppData\Roaming\Python\Python38\site-packages\dask\array\core.py:1696, in Array.__array__(self, dtype, **kwargs)
1695 def __array__(self, dtype=None, **kwargs):
-> 1696 x = self.compute()
1697 if dtype and x.dtype != dtype:
1698 x = x.astype(dtype)
File ~\AppData\Roaming\Python\Python38\site-packages\dask\base.py:315, in DaskMethodsMixin.compute(self, **kwargs)
291 def compute(self, **kwargs):
292 """Compute this dask collection
293
294 This turns a lazy Dask collection into its in-memory equivalent.
(...)
313 dask.base.compute
314 """
--> 315 (result,) = compute(self, traverse=False, **kwargs)
316 return result
File ~\AppData\Roaming\Python\Python38\site-packages\dask\base.py:600, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
597 keys.append(x.__dask_keys__())
598 postcomputes.append(x.__dask_postcompute__())
--> 600 results = schedule(dsk, keys, **kwargs)
601 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
File ~\AppData\Roaming\Python\Python38\site-packages\dask\threaded.py:89, in get(dsk, keys, cache, num_workers, pool, **kwargs)
86 elif isinstance(pool, multiprocessing.pool.Pool):
87 pool = MultiprocessingPoolExecutor(pool)
---> 89 results = get_async(
90 pool.submit,
91 pool._max_workers,
92 dsk,
93 keys,
94 cache=cache,
95 get_id=_thread_get_id,
96 pack_exception=pack_exception,
97 **kwargs,
98 )
100 # Cleanup pools associated to dead threads
101 with pools_lock:
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:511, in get_async(submit, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, chunksize, **kwargs)
509 _execute_task(task, data) # Re-execute locally
510 else:
--> 511 raise_exception(exc, tb)
512 res, worker_id = loads(res_info)
513 state["cache"][key] = res
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:319, in reraise(exc, tb)
317 if exc.__traceback__ is not tb:
318 raise exc.with_traceback(tb)
--> 319 raise exc
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:224, in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
222 try:
223 task, data = loads(task_info)
--> 224 result = _execute_task(task, data)
225 id = get_id()
226 result = dumps((result, id))
File ~\AppData\Roaming\Python\Python38\site-packages\dask\core.py:119, in _execute_task(arg, cache, dsk)
115 func, args = arg[0], arg[1:]
116 # Note: Don't assign the subtask results to a variable. numpy detects
117 # temporaries by their reference count and can execute certain
118 # operations in-place.
--> 119 return func(*(_execute_task(a, cache) for a in args))
120 elif not ishashable(arg):
121 return arg
File ~\AppData\Roaming\Python\Python38\site-packages\dask\array\core.py:128, in getter(a, b, asarray, lock)
123 # Below we special-case `np.matrix` to force a conversion to
124 # `np.ndarray` and preserve original Dask behavior for `getter`,
125 # as for all purposes `np.matrix` is array-like and thus
126 # `is_arraylike` evaluates to `True` in that case.
127 if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):
--> 128 c = np.asarray(c)
129 finally:
130 if lock:
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:459, in ImplicitToExplicitIndexingAdapter.__array__(self, dtype)
458 def __array__(self, dtype=None):
--> 459 return np.asarray(self.array, dtype=dtype)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:623, in CopyOnWriteArray.__array__(self, dtype)
622 def __array__(self, dtype=None):
--> 623 return np.asarray(self.array, dtype=dtype)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:524, in LazilyIndexedArray.__array__(self, dtype)
522 def __array__(self, dtype=None):
523 array = as_indexable(self.array)
--> 524 return np.asarray(array[self.key], dtype=None)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\backends\zarr.py:76, in ZarrArrayWrapper.__getitem__(self, key)
74 array = self.get_array()
75 if isinstance(key, indexing.BasicIndexer):
---> 76 return array[key.tuple]
77 elif isinstance(key, indexing.VectorizedIndexer):
78 return array.vindex[
79 indexing._arrayize_vectorized_indexer(key, self.shape).tuple
80 ]
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:788, in Array.__getitem__(self, selection)
786 result = self.vindex[selection]
787 else:
--> 788 result = self.get_basic_selection(pure_selection, fields=fields)
789 return result
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:914, in Array.get_basic_selection(self, selection, out, fields)
911 return self._get_basic_selection_zd(selection=selection, out=out,
912 fields=fields)
913 else:
--> 914 return self._get_basic_selection_nd(selection=selection, out=out,
915 fields=fields)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:957, in Array._get_basic_selection_nd(self, selection, out, fields)
951 def _get_basic_selection_nd(self, selection, out=None, fields=None):
952 # implementation of basic selection for array with at least one dimension
953
954 # setup indexer
955 indexer = BasicIndexer(selection, self)
--> 957 return self._get_selection(indexer=indexer, out=out, fields=fields)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:1247, in Array._get_selection(self, indexer, out, fields)
1241 if not hasattr(self.chunk_store, "getitems") or \
1242 any(map(lambda x: x == 0, self.shape)):
1243 # sequentially get one key at a time from storage
1244 for chunk_coords, chunk_selection, out_selection in indexer:
1245
1246 # load chunk selection into output array
-> 1247 self._chunk_getitem(chunk_coords, chunk_selection, out, out_selection,
1248 drop_axes=indexer.drop_axes, fields=fields)
1249 else:
1250 # allow storage to get multiple items at once
1251 lchunk_coords, lchunk_selection, lout_selection = zip(*indexer)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:1939, in Array._chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection, drop_axes, fields)
1935 ckey = self._chunk_key(chunk_coords)
1937 try:
1938 # obtain compressed data for chunk
-> 1939 cdata = self.chunk_store[ckey]
1941 except KeyError:
1942 # chunk not initialized
1943 if self._fill_value is not None:
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\storage.py:717, in KVStore.__getitem__(self, key)
716 def __getitem__(self, key):
--> 717 return self._mutable_mapping[key]
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\mapping.py:137, in FSMap.__getitem__(self, key, default)
135 k = self._key_to_str(key)
136 try:
--> 137 result = self.fs.cat(k)
138 except self.missing_exceptions:
139 if default is not None:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:111, in sync_wrapper.<locals>.wrapper(*args, **kwargs)
108 #functools.wraps(func)
109 def wrapper(*args, **kwargs):
110 self = obj or args[0]
--> 111 return sync(self.loop, func, *args, **kwargs)
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:94, in sync(loop, func, timeout, *args, **kwargs)
91 return_result = result[0]
92 if isinstance(return_result, asyncio.TimeoutError):
93 # suppress asyncio.TimeoutError, raise FSTimeoutError
---> 94 raise FSTimeoutError from return_result
95 elif isinstance(return_result, BaseException):
96 raise return_result
FSTimeoutError:
In the line:
store = fsspec.get_mapper(asset.href)
You can pass extra arguments to the fsspec backend, in this case HTTP, see fsspec.implementations.http.HTTPFileSystem. In this case, client_kwargs get passed to aiohttp.ClientSession, and include an optional timeout argument. Your call may look something like
from aiohttp import ClientTimeout
store = get_mapper(asset.href, client_kwargs={"timeout": ClientTimeout(total=5000, connect=1000)})
I am using multiple db environment using DJANGO_DB_ROUTERS. But I am facing this error.
TransactionManagementError: select_for_update cannot be used outside of a transaction.
Django transaction is used inside django library so I can't use using=db_name. Both read and write are only happening from master but even after I am getting this error
----> 1 create_type(t())
~/setup.py in create_type(t)
1791 object_identifier=oi,
1792 tenant=t,
-> 1793 defaults=data
1794 )
1795
~/backend/venv/lib/python3.6/site-packages/django/db/models/manager.py in manager_method(self, *args, **kwargs)
80 def create_method(name, method):
81 def manager_method(self, *args, **kwargs):
---> 82 return getattr(self.get_queryset(), name)(*args, **kwargs)
83 manager_method.__name__ = method.__name__
84 manager_method.__doc__ = method.__doc__
~/backend/venv/lib/python3.6/site-packages/django/db/models/query.py in update_or_create(self, defaults, **kwargs)
501 with transaction.atomic(using=self.db):
502 try:
--> 503 obj = self.select_for_update().get(**lookup)
504 except self.model.DoesNotExist:
505 obj, created = self._create_object_from_params(lookup, params)
~/backend/venv/lib/python3.6/site-packages/django/db/models/query.py in get(self, *args, **kwargs)
395 if self.query.can_filter() and not self.query.distinct_fields:
396 clone = clone.order_by()
--> 397 num = len(clone)
398 if num == 1:
399 return clone._result_cache[0]
~/cftr/backend/venv/lib/python3.6/site-packages/django/db/models/query.py in __len__(self)
252
253 def __len__(self):
--> 254 self._fetch_all()
255 return len(self._result_cache)
256
~/backend/venv/lib/python3.6/site-packages/django/db/models/query.py in _fetch_all(self)
1177 def _fetch_all(self):
1178 if self._result_cache is None:
-> 1179 self._result_cache = list(self._iterable_class(self))
1180 if self._prefetch_related_lookups and not self._prefetch_done:
1181 self._prefetch_related_objects()
~/backend/venv/lib/python3.6/site-packages/django/db/models/query.py in __iter__(self)
51 # Execute the query. This will also fill compiler.select, klass_info,
52 # and annotations.
---> 53 results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
54 select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
55 compiler.annotation_col_map)
~/backend/venv/lib/python3.6/site-packages/django/db/models/sql/compiler.py in execute_sql(self, result_type, chunked_fetch, chunk_size)
1054 try:
1055 sql, params = self.as_sql()
-> 1056 if not sql:
1057 raise EmptyResultSet
1058 except EmptyResultSet:
~/backend/venv/lib/python3.6/site-packages/django/db/models/sql/compiler.py in as_sql(self, with_limits, with_col_aliases)
487 if self.connection.get_autocommit():
488 raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
--> 489
490 if with_limits and not self.connection.features.supports_select_for_update_with_limit:
491 raise NotSupportedError(
TransactionManagementError: select_for_update cannot be used outside of a transaction.
I am trying to create a custom Numba Type. I am having issues boxing and unboxing Numba Numpy Arrays to a Native Numpy Arrays.
I have searched online for similar issues and followed the documentation example to the best of my ability. (https://numba.pydata.org/numba-doc/latest/extending/interval-example.html).
I have tried to interpret (https://github.com/numba/numba/blob/master/numba/targets/boxing.py) but it is quite difficult. Therefore, I think I might be doing something small wrong.
Below is my current attempt at including a Numpy array in my custom type.
import numpy as np
from numba import types, cgutils
from numba.extending import typeof_impl, type_callable, models
from numba.extending import register_model, make_attribute_wrapper, overload_attribute
from numba.extending import lower_builtin, unbox, NativeValue, box
class BMatrix(object):
"""
A empty wrapper for a Binary Matrix
"""
def __init__(self, m, n, row_index):#, col_index):
self.m = m
self.n = n
self.row_index = row_index
# self.col_i = col_index
def __repr__(self):
return 'BMatrix(%d, %d)' % (self.m, self.n)
#property
def shape(self):
return (self.m, self.n)
class BMatrixType(types.Type):
def __init__(self):
super(BMatrixType, self).__init__(name='BMatrix')
bmatrix_type = BMatrixType()
#typeof_impl.register(BMatrix)
def typeof_index(val, c):
return bmatrix_type
#type_callable(BMatrix)
def type_bmatrix(context):
def typer(m, n, row_index):
if (isinstance(m, types.Integer)
and isinstance(n, types.Integer)
and isinstance(row_index, nb.types.Array)):
# and isinstance(col_index, nb.types.Array)):
return bmatrix_type
return typer
#register_model(BMatrixType)
class BMatrixModel(models.StructModel):
def __init__(self, dmm, fe_type):
members = [
('m', types.int64),
('n', types.int64),
('row_index', types.Array(types.int64, 1, 'C'))
]
models.StructModel.__init__(self, dmm, fe_type, members)
make_attribute_wrapper(BMatrixType, 'm', 'm')
make_attribute_wrapper(BMatrixType, 'n', 'n')
make_attribute_wrapper(BMatrixType, 'row_index', 'row_index')
#overload_attribute(BMatrixType, "shape")
def get_shape(bmatrix):
def getter(bmatrix):
return (bmatrix.m, bmatrix.n)
return getter
#lower_builtin(BMatrix, types.Integer, types.Integer, types.Array) #nb.types.Array, #nb.types.Array)
def impl_bmatrix(context, builder, sig, args):
typ = sig.return_type
m, n, row_index = args
bmatrix = cgutils.create_struct_proxy(typ)(context, builder)
bmatrix.m = m
bmatrix.n = n
bmatrix.row_index = row_index
return bmatrix._getvalue()
#unbox(BMatrixType)
def unbox_bmatrix(typ, obj, c):
"""
Convert a BMatrixType object to a native interval structure.
"""
m_obj = c.pyapi.object_getattr_string(obj, "m")
n_obj = c.pyapi.object_getattr_string(obj, "n")
row_index_obj = c.pyapi.object_getattr_string(obj, "row_index")
BMatrix = cgutils.create_struct_proxy(typ)(c.context, c.builder)
BMatrix.m = c.pyapi.long_as_longlong(m_obj)
BMatrix.n = c.pyapi.long_as_longlong(n_obj)
BMatrix.row_index = nb.targets.boxing.unbox_array(types.Array(types.int64, 1, 'C'),
row_index_obj, c)
c.pyapi.decref(m_obj)
c.pyapi.decref(n_obj)
c.pyapi.decref(row_index_obj)
is_error = cgutils.is_not_null(c.builder, c.pyapi.err_occurred())
return NativeValue(BMatrix._getvalue(), is_error=is_error)
#box(BMatrixType)
def box_bmatrix(typ, val, c):
"""
Convert a native bmatrix structure to an BMatrix object.
"""
Bmatrix = cgutils.create_struct_proxy(typ)(c.context, c.builder, value=val)
m_obj = c.pyapi.long_from_longlong(Bmatrix.m)
n_obj = c.pyapi.long_from_longlong(Bmatrix.n)
row_index_obj = nb.targets.boxing.box_array(types.Array(types.int64, 1, 'C'),
Bmatrix.row_index, c)
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Bmatrix))
res = c.pyapi.call_function_objargs(class_obj, (m_obj, n_obj))
c.pyapi.decref(m_obj)
c.pyapi.decref(n_obj)
c.pyapi.decref(row_index_obj)
c.pyapi.decref(class_obj)
return res
Test Cases (The error Tracebacks are absolutely massive for test_2 and test_3).
#nb.jit(nopython=True)
def test_1(): #Runs
x = BMatrix(10, 10, np.array([10,10,10]))
def test_2(): #Errors
x = BMatrix(10, 10, np.array([10,10,10]))
#nb.jit(nopython=True)
def _test_2(y):
return y
return _test_2(x)
#nb.jit(nopython=True)
def test_3(): #Errors
return BMatrix(10, 10, np.array([10,10,10]))
#nb.jit(nopython=True)
def test_4():
return BMatrix(10, 10, np.array([10,10,10])).row_index
These are the error when I run the test cases
test_1() #Runs
test_2()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-52-0f6d1bdba40b> in <module>
----> 1 test_2()
<ipython-input-51-60141c9792c1> in test_2()
9 return y
10
---> 11 return _test_2(x)
12 #nb.jit(nopython=True)
13 def test_3():
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
368 e.patch_message(''.join(e.args) + help_msg)
369 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 370 raise e
371
372 def inspect_llvm(self, signature=None):
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
325 argtypes.append(self.typeof_pyval(a))
326 try:
--> 327 return self.compile(tuple(argtypes))
328 except errors.TypingError as e:
329 # Intercept typing error that may be due to an argument
//anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
657
658 self._cache_misses[sig] += 1
--> 659 cres = self._compiler.compile(args, return_type)
660 self.add_overload(cres)
661 self._cache.save_overload(sig, cres)
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
81 args=args, return_type=return_type,
82 flags=flags, locals=self.locals,
---> 83 pipeline_class=self.pipeline_class)
84 # Check typing error if object mode is used
85 if cres.typing_error is not None and not flags.enable_pyobject:
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
953 pipeline = pipeline_class(typingctx, targetctx, library,
954 args, return_type, flags, locals)
--> 955 return pipeline.compile_extra(func)
956
957
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
375 self.lifted = ()
376 self.lifted_from = None
--> 377 return self._compile_bytecode()
378
379 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
884 """
885 assert self.func_ir is None
--> 886 return self._compile_core()
887
888 def _compile_ir(self):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
871 self.define_pipelines(pm)
872 pm.finalize()
--> 873 res = pm.run(self.status)
874 if res is not None:
875 # Early pipeline completion
//anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
252 # No more fallback pipelines?
253 if is_final_pipeline:
--> 254 raise patched_exception
255 # Go to next fallback pipeline
256 else:
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
243 try:
244 event("-- %s" % stage_name)
--> 245 stage()
246 except _EarlyPipelineCompletion as e:
247 return e.result
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
745 """
746 lowerfn = self.backend_nopython_mode
--> 747 self._backend(lowerfn, objectmode=False)
748
749 def stage_compile_interp_mode(self):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
685 self.library.enable_object_caching()
686
--> 687 lowered = lowerfn()
688 signature = typing.signature(self.return_type, *self.args)
689 self.cr = compile_result(
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
672 self.calltypes,
673 self.flags,
--> 674 self.metadata)
675
676 def _backend(self, lowerfn, objectmode):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags, metadata)
1124 lower.lower()
1125 if not flags.no_cpython_wrapper:
-> 1126 lower.create_cpython_wrapper(flags.release_gil)
1127 env = lower.env
1128 call_helper = lower.call_helper
//anaconda3/lib/python3.7/site-packages/numba/lowering.py in create_cpython_wrapper(self, release_gil)
269 self.context.create_cpython_wrapper(self.library, self.fndesc,
270 self.env, self.call_helper,
--> 271 release_gil=release_gil)
272
273 def setup_function(self, fndesc):
//anaconda3/lib/python3.7/site-packages/numba/targets/cpu.py in create_cpython_wrapper(self, library, fndesc, env, call_helper, release_gil)
155 fndesc, env, call_helper=call_helper,
156 release_gil=release_gil)
--> 157 builder.build()
158 library.add_ir_module(wrapper_module)
159
//anaconda3/lib/python3.7/site-packages/numba/callwrapper.py in build(self)
120
121 api = self.context.get_python_api(builder)
--> 122 self.build_wrapper(api, builder, closure, args, kws)
123
124 return wrapper, api
//anaconda3/lib/python3.7/site-packages/numba/callwrapper.py in build_wrapper(self, api, builder, closure, args, kws)
153 innerargs.append(None)
154 else:
--> 155 val = cleanup_manager.add_arg(builder.load(obj), ty)
156 innerargs.append(val)
157
//anaconda3/lib/python3.7/site-packages/numba/callwrapper.py in add_arg(self, obj, ty)
30 """
31 # Unbox argument
---> 32 native = self.api.to_native_value(ty, obj)
33
34 # If an error occurred, go to the cleanup block for the previous argument.
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in to_native_value(self, typ, obj)
1423 impl = _unboxers.lookup(typ.__class__, unbox_unsupported)
1424 c = _UnboxContext(self.context, self.builder, self)
-> 1425 return impl(typ, obj, c)
1426
1427 def from_native_return(self, typ, val, env_manager):
<ipython-input-45-d8ac5afde794> in unbox_bmatrix(typ, obj, c)
85 BMatrix.n = c.pyapi.long_as_longlong(n_obj)
86 BMatrix.row_index = nb.targets.boxing.unbox_array(types.Array(types.int64, 1, 'C'),
---> 87 row_index_obj, c)
88 c.pyapi.decref(m_obj)
89 c.pyapi.decref(n_obj)
//anaconda3/lib/python3.7/site-packages/numba/cgutils.py in __setattr__(self, field, value)
162 if field.startswith('_'):
163 return super(_StructProxy, self).__setattr__(field, value)
--> 164 self[self._datamodel.get_field_position(field)] = value
165
166 def __getitem__(self, index):
//anaconda3/lib/python3.7/site-packages/numba/cgutils.py in __setitem__(self, index, value)
177 ptr = self._get_ptr_by_index(index)
178 value = self._cast_member_from_value(index, value)
--> 179 if value.type != ptr.type.pointee:
180 if (is_pointer(value.type) and is_pointer(ptr.type.pointee)
181 and value.type.pointee == ptr.type.pointee.pointee):
AttributeError: Failed in nopython mode pipeline (step: nopython mode backend)
'NativeValue' object has no attribute 'type'
test_3()
KeyError Traceback (most recent call last)
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in serialize_object(self, obj)
1403 try:
-> 1404 gv = self.module.__serialized[obj]
1405 except KeyError:
KeyError: <numba.cgutils.ValueStructProxy_BMatrix object at 0x11e693f28>
During handling of the above exception, another exception occurred:
PicklingError Traceback (most recent call last)
<ipython-input-53-8d78c7c0acee> in <module>
----> 1 test_3()
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
368 e.patch_message(''.join(e.args) + help_msg)
369 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 370 raise e
371
372 def inspect_llvm(self, signature=None):
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
325 argtypes.append(self.typeof_pyval(a))
326 try:
--> 327 return self.compile(tuple(argtypes))
328 except errors.TypingError as e:
329 # Intercept typing error that may be due to an argument
//anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
657
658 self._cache_misses[sig] += 1
--> 659 cres = self._compiler.compile(args, return_type)
660 self.add_overload(cres)
661 self._cache.save_overload(sig, cres)
//anaconda3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
81 args=args, return_type=return_type,
82 flags=flags, locals=self.locals,
---> 83 pipeline_class=self.pipeline_class)
84 # Check typing error if object mode is used
85 if cres.typing_error is not None and not flags.enable_pyobject:
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
953 pipeline = pipeline_class(typingctx, targetctx, library,
954 args, return_type, flags, locals)
--> 955 return pipeline.compile_extra(func)
956
957
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
375 self.lifted = ()
376 self.lifted_from = None
--> 377 return self._compile_bytecode()
378
379 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
884 """
885 assert self.func_ir is None
--> 886 return self._compile_core()
887
888 def _compile_ir(self):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
871 self.define_pipelines(pm)
872 pm.finalize()
--> 873 res = pm.run(self.status)
874 if res is not None:
875 # Early pipeline completion
//anaconda3/lib/python3.7/site-packages/numba/compiler_lock.py in _acquire_compile_lock(*args, **kwargs)
30 def _acquire_compile_lock(*args, **kwargs):
31 with self:
---> 32 return func(*args, **kwargs)
33 return _acquire_compile_lock
34
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
252 # No more fallback pipelines?
253 if is_final_pipeline:
--> 254 raise patched_exception
255 # Go to next fallback pipeline
256 else:
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
243 try:
244 event("-- %s" % stage_name)
--> 245 stage()
246 except _EarlyPipelineCompletion as e:
247 return e.result
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
745 """
746 lowerfn = self.backend_nopython_mode
--> 747 self._backend(lowerfn, objectmode=False)
748
749 def stage_compile_interp_mode(self):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
685 self.library.enable_object_caching()
686
--> 687 lowered = lowerfn()
688 signature = typing.signature(self.return_type, *self.args)
689 self.cr = compile_result(
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
672 self.calltypes,
673 self.flags,
--> 674 self.metadata)
675
676 def _backend(self, lowerfn, objectmode):
//anaconda3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags, metadata)
1124 lower.lower()
1125 if not flags.no_cpython_wrapper:
-> 1126 lower.create_cpython_wrapper(flags.release_gil)
1127 env = lower.env
1128 call_helper = lower.call_helper
//anaconda3/lib/python3.7/site-packages/numba/lowering.py in create_cpython_wrapper(self, release_gil)
269 self.context.create_cpython_wrapper(self.library, self.fndesc,
270 self.env, self.call_helper,
--> 271 release_gil=release_gil)
272
273 def setup_function(self, fndesc):
//anaconda3/lib/python3.7/site-packages/numba/targets/cpu.py in create_cpython_wrapper(self, library, fndesc, env, call_helper, release_gil)
155 fndesc, env, call_helper=call_helper,
156 release_gil=release_gil)
--> 157 builder.build()
158 library.add_ir_module(wrapper_module)
159
//anaconda3/lib/python3.7/site-packages/numba/callwrapper.py in build(self)
120
121 api = self.context.get_python_api(builder)
--> 122 self.build_wrapper(api, builder, closure, args, kws)
123
124 return wrapper, api
//anaconda3/lib/python3.7/site-packages/numba/callwrapper.py in build_wrapper(self, api, builder, closure, args, kws)
174
175 retty = self._simplified_return_type()
--> 176 obj = api.from_native_return(retty, retval, env_manager)
177 builder.ret(obj)
178
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in from_native_return(self, typ, val, env_manager)
1429 "prevented the return of " \
1430 "optional value"
-> 1431 out = self.from_native_value(typ, val, env_manager)
1432 return out
1433
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in from_native_value(self, typ, val, env_manager)
1443
1444 c = _BoxContext(self.context, self.builder, self, env_manager)
-> 1445 return impl(typ, val, c)
1446
1447 def reflect_native_value(self, typ, val, env_manager=None):
<ipython-input-45-d8ac5afde794> in box_bmatrix(typ, val, c)
104 Bmatrix.row_index, c)
105
--> 106 class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(Bmatrix))
107 res = c.pyapi.call_function_objargs(class_obj, (m_obj, n_obj))
108 c.pyapi.decref(m_obj)
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in serialize_object(self, obj)
1404 gv = self.module.__serialized[obj]
1405 except KeyError:
-> 1406 struct = self.serialize_uncached(obj)
1407 name = ".const.picklebuf.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
1408 gv = self.context.insert_unique_const(self.module, name, struct)
//anaconda3/lib/python3.7/site-packages/numba/pythonapi.py in serialize_uncached(self, obj)
1383 """
1384 # First make the array constant
-> 1385 data = pickle.dumps(obj, protocol=-1)
1386 assert len(data) < 2**31
1387 name = ".const.pickledata.%s" % (id(obj) if config.DIFF_IR == 0 else "DIFF_IR")
PicklingError: Failed in nopython mode pipeline (step: nopython mode backend)
Can't pickle <class 'numba.cgutils.ValueStructProxy_BMatrix'>: attribute lookup ValueStructProxy_BMatrix on numba.cgutils failed
test_4() #Runs Wrong
array([-2387225703656530210, -2387225703656530210, -2387225703656530210])
unbox_array returns a NativeValue. Inside NativeValue is the actual value which is what you want to assign to row_index. So, just add ".value" to the end of the following line to extract the value from the NativeValue.
BMatrix.row_index = nb.targets.boxing.unbox_array(types.Array(types.int64, 1, 'C'), row_index_obj, c)
I'm following this https://www.mongodb.com/blog/post/getting-started-with-python-and-mongodb introductory tutorial. I can connect to the cluster fine with mongo shell, but not with pymongo (Python: 3.6.1, Pymongo 3.4.0). Pymongo works okay with a local mongodb. What is the problem? Below is the exception I get:
----------------------------------------------------------------------
-----
KeyError Traceback (most recent call
last)
<ipython-input-22-1c9d47341338> in <module>()
----> 1 server_status_result = db.command('serverStatus')
2 pprint(server_status_result)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/database.py in command(self, command, value, check,
allowable_errors, read_preference, codec_options, **kwargs)
489 """
490 client = self.__client
--> 491 with client._socket_for_reads(read_preference) as
(sock_info, slave_ok):
492 return self._command(sock_info, command, slave_ok,
value,
493 check, allowable_errors,
read_preference,
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _socket_for_reads(self,
read_preference)
857 topology = self._get_topology()
858 single = topology.description.topology_type ==
TOPOLOGY_TYPE.Single
--> 859 with self._get_socket(read_preference) as sock_info:
860 slave_ok = (single and not sock_info.is_mongos) or (
861 preference != ReadPreference.PRIMARY)
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _get_socket(self, selector)
823 server = self._get_topology().select_server(selector)
824 try:
--> 825 with server.get_socket(self.__all_credentials) as
sock_info:
826 yield sock_info
827 except NetworkTimeout:
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/server.py in get_socket(self, all_credentials,
checkout)
166 #contextlib.contextmanager
167 def get_socket(self, all_credentials, checkout=False):
--> 168 with self.pool.get_socket(all_credentials, checkout)
as sock_info:
169 yield sock_info
170
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in get_socket(self, all_credentials,
checkout)
790 sock_info = self._get_socket_no_auth()
791 try:
--> 792 sock_info.check_auth(all_credentials)
793 yield sock_info
794 except:
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in check_auth(self, all_credentials)
510
511 for credentials in cached - authset:
--> 512 auth.authenticate(credentials, self)
513 self.authset.add(credentials)
514
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in authenticate(credentials, sock_info)
468 mechanism = credentials.mechanism
469 auth_func = _AUTH_MAP.get(mechanism)
--> 470 auth_func(credentials, sock_info)
471
472
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_default(credentials,
sock_info)
448 def _authenticate_default(credentials, sock_info):
449 if sock_info.max_wire_version >= 3:
--> 450 return _authenticate_scram_sha1(credentials,
sock_info)
451 else:
452 return _authenticate_mongo_cr(credentials, sock_info)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_scram_sha1(credentials,
sock_info)
227 ('conversationId', res['conversationId']),
228 ('payload', Binary(client_final))])
--> 229 res = sock_info.command(source, cmd)
230
231 parsed = _parse_scram_response(res['payload'])
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
422 # Catch socket.error, KeyboardInterrupt, etc. and close
ourselves.
423 except BaseException as error:
--> 424 self._raise_connection_failure(error)
425
426 def send_message(self, message, max_doc_size):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in _raise_connection_failure(self, error)
550 _raise_connection_failure(self.address, error)
551 else:
--> 552 raise error
553
554 def __eq__(self, other):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
417 read_concern,
418
parse_write_concern_error=parse_write_concern_error,
--> 419 collation=collation)
420 except OperationFailure:
421 raise
/home/tim/.virtualenvs/main/lib/python3.6/site-p
ackages/pymongo/network.py in command(sock, dbname, spec, slave_ok,
is_mongos, read_preference, codec_options, check, allowable_errors,
address, check_keys, listeners, max_bson_size, read_concern,
parse_write_concern_error, collation)
108 response = receive_message(sock, 1, request_id)
109 unpacked = helpers._unpack_response(
--> 110 response, codec_options=codec_options)
111
112 response_doc = unpacked['data'][0]
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/helpers.py in _unpack_response(response, cursor_id,
codec_options)
126 # Fake the ok field if it doesn't exist.
127 error_object.setdefault("ok", 0)
--> 128 if error_object["$err"].startswith("not master"):
129 raise NotMasterError(error_object["$err"],
error_object)
130 elif error_object.get("code") == 50:
KeyError: '$err'
I believe this is an Atlas bug, I've reported it to the team. The bug is, if you fail to log in to Atlas because your username or password are incorrect, it replies in a way that makes PyMongo throw a KeyError instead of the proper OperationFailure("auth failed").
PyMongo does work with Atlas, however, if you properly format your connection string with your username and password. Make sure your username and password are URL-quoted. Substitute your username and password into this Python code:
from urllib import quote_plus
print(quote_plus('MY USERNAME'))
print(quote_plus('MY PASSWORD'))
Take the output and put it into the connection string Atlas gave you, e.g. if your username is jesse#example.com and your password is "foo:bar", put that in the first part of the string, and get the rest of the string from the Atlas control panel for your account:
mongodb://jesse%40example.com:foo%3Abar/#cluster0-shard-00-00-abc.mongodb.net:27017,cluster0-shard-00-01-abc.mongodb.net:27017,cluster0-shard-00-02-abc.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin
Note how "jesse#example.com" has become "jesse%40example.com", and "foo:bar" has become "foo%3Abar".
I am new to odo. I have a simple function for migrating databases:
from blaze import Data, odo, like
def migrate(source, target, table_names=None):
src_db = Data(source)
tbl_names = (src_db.fields, table_names)[table_names is not None]
for tbl in tbl_names:
display("Migrating Tablename ::" + tbl)
odo?
odo('{0}::{1}'.format(source, tbl), '{0}::{1}'.format(target, tbl))
migrate(url_config, url_config_bcp)
On running this on a SQLite database.
url_config = 'sqlite:///DB_CONFIG.db'
url_config_bcp = 'sqlite:///DB_CONFIG_BCP.db'
migrate(url_config, url_config_bcp)
I get following error.
How can I resolve this ?
OperationalError: (sqlite3.OperationalError) Could not decode to UTF-8
column 'V_WIMS_WELLS$COMMENTS' with text ' 29/12/2013 : SAP issue
related , WO# was closed but still apprear in WIMS for confirmation .
WPH-41 Cut Surface CSG 9-5/8¿ on b
P.S :- I am aware that we can set textfactory expilicitly in sqlalchemy
db = create_engine('sqlite:///{0}'.format(sqlite_name))
conn = db.raw_connection()
conn.connection.text_factory = str
I don't know how to fix it within odo function
#
A second attempt at writing function by passing metadata explicitly results in following error
def migrate(source, target, table_names=None):
src_db = Data(source)
engine = sa.create_engine(source)
conn = engine.raw_connection()
conn.connection.text_factory = str
metadata = sa.MetaData()
metadata.bind = engine
tbl_names = (src_db.fields, table_names)[table_names is not None]
for tbl in tbl_names:
display("Migrating Tablename ::" + tbl)
odo(sa.Table(tbl, metadata), '{0}::{1}'.format(target, tbl))
ERROR:-
---> 26 migrate(url_config, url_config_bcp)
<ipython-input-21-146061c63d09> in migrate(source, target, table_names)
21 for tbl in tbl_names:
22 display("Migrating Tablename ::" + tbl)
---> 23 odo(sa.Table(tbl, metadata), '{0}::{1}'.format(target, tbl))
25
C:\Anaconda\lib\site-packages\odo\odo.pyc in odo(source, target, **kwargs)
89 odo.append.append - Add things onto existing things
90 """
---> 91 return into(target, source, **kwargs)
C:\Anaconda\lib\site-packages\multipledispatch\dispatcher.pyc in __call__(self, *args, **kwargs)
162 self._cache[types] = func
163 try:
--> 164 return func(*args, **kwargs)
165
166 except MDNotImplementedError:
C:\Anaconda\lib\site-packages\odo\into.pyc in wrapped(*args, **kwargs)
41 raise TypeError('dshape argument is not an instance of DataShape')
42 kwargs['dshape'] = dshape
---> 43 return f(*args, **kwargs)
44 return wrapped
45
C:\Anaconda\lib\site-packages\odo\into.pyc in into_string(uri, b, dshape, **kwargs)
141
142 a = resource(uri, dshape=resource_ds, expected_dshape=dshape, **kwargs)
--> 143 return into(a, b, dshape=dshape, **kwargs)
144
145
C:\Anaconda\lib\site-packages\multipledispatch\dispatcher.pyc in __call__(self, *args, **kwargs)
162 self._cache[types] = func
163 try:
--> 164 return func(*args, **kwargs)
165
166 except MDNotImplementedError:
C:\Anaconda\lib\site-packages\odo\into.pyc in wrapped(*args, **kwargs)
41 raise TypeError('dshape argument is not an instance of DataShape')
42 kwargs['dshape'] = dshape
---> 43 return f(*args, **kwargs)
44 return wrapped
45
C:\Anaconda\lib\site-packages\odo\into.pyc in into_object(target, source, dshape, **kwargs)
129 if dshape is None:
130 dshape = discover(source)
--> 131 return append(target, source, dshape=dshape, **kwargs)
132
133
C:\Anaconda\lib\site-packages\multipledispatch\dispatcher.pyc in __call__(self, *args, **kwargs)
162 self._cache[types] = func
163 try:
--> 164 return func(*args, **kwargs)
165
166 except MDNotImplementedError:
C:\Anaconda\lib\site-packages\odo\backends\sql.pyc in append_table_to_sql_Table(t, o, **kwargs)
460 def append_table_to_sql_Table(t, o, **kwargs):
461 s = sa.select([o])
--> 462 return append(t, s, **kwargs)
463
464
C:\Anaconda\lib\site-packages\multipledispatch\dispatcher.pyc in __call__(self, *args, **kwargs)
162 self._cache[types] = func
163 try:
--> 164 return func(*args, **kwargs)
165
166 except MDNotImplementedError:
C:\Anaconda\lib\site-packages\odo\backends\sql.pyc in append_select_statement_to_sql_Table(t, o, bind, **kwargs)
470 return append(
471 t,
--> 472 convert(Iterator, o, bind=bind, **kwargs),
473 bind=bind,
474 **kwargs
C:\Anaconda\lib\site-packages\odo\core.pyc in __call__(self, *args, **kwargs)
42
43 def __call__(self, *args, **kwargs):
---> 44 return _transform(self.graph, *args, **kwargs)
45
46
C:\Anaconda\lib\site-packages\odo\core.pyc in _transform(graph, target, source, excluded_edges, ooc_types, **kwargs)
58 try:
59 for (A, B, f) in pth:
---> 60 x = f(x, excluded_edges=excluded_edges, **kwargs)
61 return x
62 except NotImplementedError as e:
C:\Anaconda\lib\site-packages\odo\backends\sql.pyc in select_or_selectable_to_frame(el, bind, **kwargs)
598 #convert.register(pd.DataFrame, (sa.sql.Select, sa.sql.Selectable), cost=200.0)
599 def select_or_selectable_to_frame(el, bind=None, **kwargs):
--> 600 columns, rows = batch(el, bind=bind)
601 row = next(rows, None)
602 if row is None:
C:\Anaconda\lib\site-packages\odo\backends\sql.pyc in batch(sel, chunksize, bind)
136 return
137 terator = rowterator(sel)
--> 138 return next(terator), concat(terator)
139
140
C:\Anaconda\lib\site-packages\odo\backends\sql.pyc in rowterator(sel, chunksize)
126 def rowterator(sel, chunksize=chunksize):
127 with getbind(sel, bind).connect() as conn:
--> 128 result = conn.execute(sel)
129 yield result.keys()
130
C:\Anaconda\lib\site-packages\sqlalchemy\engine\base.pyc in execute(self, object, *multiparams, **params)
912 type(object))
913 else:
--> 914 return meth(self, multiparams, params)
915
916 def _execute_function(self, func, multiparams, params):
C:\Anaconda\lib\site-packages\sqlalchemy\sql\elements.pyc in _execute_on_connection(self, connection, multiparams, params)
321
322 def _execute_on_connection(self, connection, multiparams, params):
--> 323 return connection._execute_clauseelement(self, multiparams, params)
324
325 def unique_params(self, *optionaldict, **kwargs):
C:\Anaconda\lib\site-packages\sqlalchemy\engine\base.pyc in _execute_clauseelement(self, elem, multiparams, params)
1008 compiled_sql,
1009 distilled_params,
-> 1010 compiled_sql, distilled_params
1011 )
1012 if self._has_events or self.engine._has_events:
C:\Anaconda\lib\site-packages\sqlalchemy\engine\base.pyc in _execute_context(self, dialect, constructor, statement, parameters, *args)
1144 parameters,
1145 cursor,
-> 1146 context)
1147
1148 if self._has_events or self.engine._has_events:
C:\Anaconda\lib\site-packages\sqlalchemy\engine\base.pyc in _handle_dbapi_exception(self, e, statement, parameters, cursor, context)
1339 util.raise_from_cause(
1340 sqlalchemy_exception,
-> 1341 exc_info
1342 )
1343 else:
C:\Anaconda\lib\site-packages\sqlalchemy\util\compat.pyc in raise_from_cause(exception, exc_info)
198 exc_info = sys.exc_info()
199 exc_type, exc_value, exc_tb = exc_info
--> 200 reraise(type(exception), exception, tb=exc_tb)
201
202 if py3k:
C:\Anaconda\lib\site-packages\sqlalchemy\engine\base.pyc in _execute_context(self, dialect, constructor, statement, parameters, *args)
1137 statement,
1138 parameters,
-> 1139 context)
1140 except Exception as e:
1141 self._handle_dbapi_exception(
C:\Anaconda\lib\site-packages\sqlalchemy\engine\default.pyc in do_execute(self, cursor, statement, parameters, context)
448
449 def do_execute(self, cursor, statement, parameters, context=None):
--> 450 cursor.execute(statement, parameters)
451
452 def do_execute_no_params(self, cursor, statement, context=None):
OperationalError: (sqlite3.OperationalError) near "FROM": syntax error [SQL: u'SELECT \nFROM "C_PATTERN"']
One thing that worked for me while in pandas (which will hopefully carry over into odo) was to convert each column via df['Column'] = df['Column'].str.encode('utf-8')
You can pass encoding argument while migrating in odo
odo(source, target, encoding='cp1252')