Trying to apply fftconvolve over xarray - python

I have a 3D xarray DataArray and a 1D xarray DataArray. One dimension of the 3D array has the same size as the single dimension of the other. I want to apply scipy.signal.fftconvolve along these corresponding dimensions. But I get an error. Here is some minimal example code:
import numpy as np
from scipy.signal import fftconvolve
import xarray as xr
xarr1 = xr.DataArray(np.random.random([10,20,500]),
dims=('dim1', 'dim2', 'sample'))
xarr2 = xr.DataArray(np.random.random(500),
dims=('sample',))
res = xr.apply_ufunc(fftconvolve, xarr1, xarr2,
input_core_dims=[['sample'], ['sample']],
kwargs={'mode': 'same'},
vectorize=True)
What I want to do is the equivalent of this:
res = np.zeros(xarr1.shape)
for i1 in range(xarr1.shape[0]):
for i2 in range(xarr1.shape[1]):
res[i1, i2, :] = fftconvolve(xarr1[i1, i2, :], xarr2, mode='same')
But I get the following error with the apply_ufunc version:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-174-4f45f68ab252> in <module>()
7 xarr2 = xr.DataArray(np.random.random(500),
8 dims=('sample',))
----> 9 res = xr.apply_ufunc(fftconvolve, xarr1, xarr2, input_core_dims=[['sample'], ['sample']], kwargs={'mode': 'same'}, vectorize=True)
~\Anaconda3\envs\dataproc\lib\site-packages\xarray\core\computation.py in apply_ufunc(func, *args, **kwargs)
932 join=join,
933 exclude_dims=exclude_dims,
--> 934 keep_attrs=keep_attrs)
935 elif any(isinstance(a, Variable) for a in args):
936 return variables_ufunc(*args)
~\Anaconda3\envs\dataproc\lib\site-packages\xarray\core\computation.py in apply_dataarray_ufunc(func, *args, **kwargs)
209
210 data_vars = [getattr(a, 'variable', a) for a in args]
--> 211 result_var = func(*data_vars)
212
213 if signature.num_outputs > 1:
~\Anaconda3\envs\dataproc\lib\site-packages\xarray\core\computation.py in apply_variable_ufunc(func, *args, **kwargs)
563 raise ValueError('unknown setting for dask array handling in '
564 'apply_ufunc: {}'.format(dask))
--> 565 result_data = func(*input_data)
566
567 if signature.num_outputs > 1:
~\Anaconda3\envs\dataproc\lib\site-packages\numpy\lib\function_base.py in __call__(self, *args, **kwargs)
2737 vargs.extend([kwargs[_n] for _n in names])
2738
-> 2739 return self._vectorize_call(func=func, args=vargs)
2740
2741 def _get_ufunc_and_otypes(self, func, args):
~\Anaconda3\envs\dataproc\lib\site-packages\numpy\lib\function_base.py in _vectorize_call(self, func, args)
2803 """Vectorized call to `func` over positional `args`."""
2804 if self.signature is not None:
-> 2805 res = self._vectorize_call_with_signature(func, args)
2806 elif not args:
2807 res = func()
~\Anaconda3\envs\dataproc\lib\site-packages\numpy\lib\function_base.py in _vectorize_call_with_signature(self, func, args)
2867
2868 for output, result in zip(outputs, results):
-> 2869 output[index] = result
2870
2871 if outputs is None:
ValueError: setting an array element with a sequence.

Specifying output_core_dims solves this,
res = xr.apply_ufunc(fftconvolve, xarr1, xarr2,
input_core_dims=[['sample'], ['sample']],
output_core_dims=[['sample']],
kwargs={'mode': 'same'}, vectorize=True)

Related

TypeError: Wrong number of dimensions: expected 0, got 1 with shape (6,)

I am performing a linear regression analysis using bayessian method.
X = data.drop(columns='Power')
Y = data['Power']
a_0=1.
b_0=1.
mu_0=(0.,0.,0.,0.,0.,0.)
row1=[1.,0.,0.,0.,0.,0.]
row2=[0.,1.,0.,0.,0.,0.]
row3=[0.,0.,1.,0.,0.,0.]
row4=[0.,0.,0.,1.,0.,0.]
row5=[0.,0.,0.,0.,1.,0.]
row6=[0.,0.,0.,0.,0.,1.]
Sigma_0 =np.array([row1,row2,row3,row4,row5,row6])
with pm.Model() as model:
sigma_square = 1.0 / pm.Gamma('sigma square', alpha=a_0, beta=b_0)
w = pm.MvNormal("w", mu=mu_0, cov=Sigma_0)
likelihood = pm.Normal('likelihood', mu=np.dot(X,w), cov=sigma_square + np.dot(X.T ,np.dot(Sigma_n, X)), observed=Y)
trace = sample(3000, return_inferencedata=True)
This returns the following error:
TypeError Traceback (most recent call last)
in ()
2 sigma_square = 1.0 / pm.Gamma('sigma square', alpha=a_0, beta=b_0)
3
----> 4 w = pm.MvNormal("w", mu=mu_0, cov=Sigma_0)
5 likelihood = pm.Normal('likelihood', mu=np.dot(X,w), cov=sigma_square + np.dot(X.T ,np.dot(Sigma_n, X)), observed=Y)
6 trace = sample(3000, return_inferencedata=True)
/usr/local/lib/python3.7/dist-packages/pymc3/distributions/distribution.py in __new__(cls, name, *args, **kwargs)
120 else:
121 dist = cls.dist(*args, **kwargs)
--> 122 return model.Var(name, dist, data, total_size, dims=dims)
123
124 def getnewargs(self):
/usr/local/lib/python3.7/dist-packages/pymc3/model.py in Var(self, name, dist, data, total_size, dims)
1136 if getattr(dist, "transform", None) is None:
1137 with self:
-> 1138 var = FreeRV(name=name, distribution=dist, total_size=total_size, model=self)
1139 self.free_RVs.append(var)
1140 else:
/usr/local/lib/python3.7/dist-packages/pymc3/model.py in init(self, type, owner, index, name, distribution, total_size, model)
1667 self.distribution = distribution
1668 self.tag.test_value = (
-> 1669 np.ones(distribution.shape, distribution.dtype) * distribution.default()
1670 )
1671 self.logp_elemwiset = distribution.logp(self)
/usr/local/lib/python3.7/dist-packages/theano/graph/utils.py in setattr(self, attr, obj)
264
265 if getattr(self, "attr", None) == attr:
--> 266 obj = self.attr_filter(obj)
267
268 return object.setattr(self, attr, obj)
/usr/local/lib/python3.7/dist-packages/theano/tensor/type.py in filter(self, data, strict, allow_downcast)
180 if self.ndim != data.ndim:
181 raise TypeError(
--> 182 f"Wrong number of dimensions: expected {self.ndim},"
183 f" got {data.ndim} with shape {data.shape}."
184 )
TypeError: Wrong number of dimensions: expected 0, got 1 with shape (6,).
It seems my covariance matrix does not meet the required type\format. I don't know how to fix this.

Unit issue with MetPy's parcel_profile function

I have been working on programming to plot Skew_Ts from Wyoming's weather servers. The issue I am having is I get an error when attempting to run the parcel_profile function, it says it can not convert from dimensionless to hectopascals. The pressure array being fed into the function as well as the temperature and dewpoint data point have the appropriate units attached though. To add to my confusion, I have the exact same coding on another machine with the same library versions and it runs fine on that one. Am I missing an obvious problem? Code and relevant library versions are listed below:
import metpy as mp
from metpy.units import units
import metpy.calc as mpcalc
from siphon.simplewebservice.wyoming import WyomingUpperAir
from datetime import datetime
import pandas as pd
import numpy as np
final_time = datetime(2022, 1, 21, 12)
station = 'ABQ'
df = WyomingUpperAir.request_data(final_time, station)
data_dict = {"Press":"", "Temp": "", "Dew_Point": "", "Height":"",
"Mask": "", "Parcel": "", "Idx": "", "U": "", "V": ""}
data_dict['Press'] = df['pressure'].values * units(df.units['pressure'])
data_dict['Temp'] = df['temperature'].values * units(df.units['temperature'])
data_dict['Dew_Point'] = df['dewpoint'].values * units(df.units['dewpoint'])
data_dict['Height'] = df['height'].values * units(df.units['height'])
data_dict['U'] = df['u_wind'].values * units(df.units['u_wind'])
data_dict['V'] = df['v_wind'].values * units(df.units['v_wind'])
data_dict['Parcel'] = mpcalc.parcel_profile(data_dict['Press'],
data_dict['Temp'][0],
data_dict['Dew_Point'][0]).to('degC')
Error:
DimensionalityError Traceback (most recent call last)
C:\Users\####################.py in <module>
----> 1 data_dict['Parcel'] = mpcalc.parcel_profile(data_dict['Press'],
2 data_dict['Temp'][0],
3 data_dict['Dew_Point'][0]).to('degC')
~\anaconda3\envs\Met_World\lib\site-packages\metpy\xarray.py in wrapper(*args, **kwargs)
1214
1215 # Evaluate inner calculation
-> 1216 result = func(*bound_args.args, **bound_args.kwargs)
1217
1218 # Wrap output based on match and match_unit
~\anaconda3\envs\Met_World\lib\site-packages\metpy\units.py in wrapper(*args, **kwargs)
244 'that the function is being called properly.\n') + msg
245 raise ValueError(msg)
--> 246 return func(*args, **kwargs)
247
248 return wrapper
~\anaconda3\envs\Met_World\lib\site-packages\metpy\calc\thermo.py in parcel_profile(pressure, temperature, dewpoint)
737
738 """
--> 739 _, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
740 return concatenate((t_l, t_u))
741
~\anaconda3\envs\Met_World\lib\site-packages\metpy\calc\thermo.py in _parcel_profile_helper(pressure, temperature, dewpoint)
892
893 # If the pressure profile doesn't make it to the lcl, we can stop here
--> 894 if _greater_or_close(np.nanmin(pressure), press_lcl):
895 return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
896 temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
~\anaconda3\envs\Met_World\lib\site-packages\metpy\calc\tools.py in _greater_or_close(a, value, **kwargs)
738
739 """
--> 740 return (a > value) | np.isclose(a, value, **kwargs)
741
742
~\anaconda3\envs\Met_World\lib\site-packages\pint\quantity.py in __array_ufunc__(self, ufunc, method, *inputs, **kwargs)
1721 )
1722
-> 1723 return numpy_wrap("ufunc", ufunc, inputs, kwargs, types)
1724
1725 def __array_function__(self, func, types, args, kwargs):
~\anaconda3\envs\Met_World\lib\site-packages\pint\numpy_func.py in numpy_wrap(func_type, func, args, kwargs, types)
919 if name not in handled or any(is_upcast_type(t) for t in types):
920 return NotImplemented
--> 921 return handled[name](*args, **kwargs)
~\anaconda3\envs\Met_World\lib\site-packages\pint\numpy_func.py in implementation(*args, **kwargs)
284 if input_units == "all_consistent":
285 # Match all input args/kwargs to same units
--> 286 stripped_args, stripped_kwargs = convert_to_consistent_units(
287 *args, pre_calc_units=first_input_units, **kwargs
288 )
~\anaconda3\envs\Met_World\lib\site-packages\pint\numpy_func.py in convert_to_consistent_units(pre_calc_units, *args, **kwargs)
105 """
106 return (
--> 107 tuple(convert_arg(arg, pre_calc_units=pre_calc_units) for arg in args),
108 {
109 key: convert_arg(arg, pre_calc_units=pre_calc_units)
~\anaconda3\envs\Met_World\lib\site-packages\pint\numpy_func.py in <genexpr>(.0)
105 """
106 return (
--> 107 tuple(convert_arg(arg, pre_calc_units=pre_calc_units) for arg in args),
108 {
109 key: convert_arg(arg, pre_calc_units=pre_calc_units)
~\anaconda3\envs\Met_World\lib\site-packages\pint\numpy_func.py in convert_arg(arg, pre_calc_units)
87 return arg
88 else:
---> 89 raise DimensionalityError("dimensionless", pre_calc_units)
90 elif _is_quantity(arg):
91 return arg.m
DimensionalityError: Cannot convert from 'dimensionless' to 'hectopascal'
Libraries used:
python 3.9.7
metpy 1.1.0
pandas 1.2.4
numpy 1.22.0
xarray 0.20.2
My first guess is that this is a problem with multiplying whatever e.g. df['u_wind'].values is returning by units. While it's a nicer syntax, the more robust way is to use the Quantity constructor:
data_dict['Press'] = units.Quantity(df['pressure'].values, units(df.units['pressure']))
You can shorten all of that, though, and use the Quantity() method by using MetPy's helper metpy.units.pandas_dataframe_to_unit_arrays:
data_dict = units.pandas_dataframe_to_unit_arrays(df)
If you want the column names you were originally using, you can change them with df.rename().

TypeError: unhashable type: 'numpy.ndarray' with Pandas after importing data from MYSQL

I ran the same program on two different data sources (a CSV file & MYSQL database), CSV import runs fine but MYSQL import throws the numpy type error:
I'm guessing issue may lie with these 2 points:
1. Data import issues - INT, TEXT etc? I'm using VARCHAR for the data.
2. Issue with how matplotlib works with Panda dataframes?
I'm new so please treat me as one :)
import pandas as pd
import numpy as np
import matplotlib.pyplot as pp
import seaborn
from sqlalchemy import create_engine
import pymysql
engine = create_engine("mysql+pymysql://root:root#127.0.0.1:3306/babynames",echo=False)
names = pd.read_sql_query('select * from BABYNAMES',engine)
names_indexed = names.set_index(['gender','name','year']).sort_index()
def plotname(gender, name):
data = names_indexed.loc[gender, name]
pp.plot(data.index, data.values)
plotname('F','Nancy')
Error code:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-32-9d981bcf8365> in <module>()
----> 1 plotname('F','Nancy')
<ipython-input-31-85c728659ad0> in plotname(gender, name)
1 def plotname(gender, name):
2 data = allyears_indexed.loc[gender, name]
----> 3 pp.plot(data.index, data.values)
~/anaconda3/lib/python3.7/site-packages/matplotlib/pyplot.py in plot(*args, **kwargs)
3361 mplDeprecation)
3362 try:
-> 3363 ret = ax.plot(*args, **kwargs)
3364 finally:
3365 ax._hold = washold
~/anaconda3/lib/python3.7/site-packages/matplotlib/__init__.py in inner(ax, *args, **kwargs)
1865 "the Matplotlib list!)" % (label_namer, func.__name__),
1866 RuntimeWarning, stacklevel=2)
-> 1867 return func(ax, *args, **kwargs)
1868
1869 inner.__doc__ = _add_data_doc(inner.__doc__,
~/anaconda3/lib/python3.7/site-packages/matplotlib/axes/_axes.py in plot(self, *args, **kwargs)
1526 kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
1527
-> 1528 for line in self._get_lines(*args, **kwargs):
1529 self.add_line(line)
1530 lines.append(line)
~/anaconda3/lib/python3.7/site-packages/matplotlib/axes/_base.py in _grab_next_args(self, *args, **kwargs)
404 this += args[0],
405 args = args[1:]
--> 406 for seg in self._plot_args(this, kwargs):
407 yield seg
408
~/anaconda3/lib/python3.7/site-packages/matplotlib/axes/_base.py in _plot_args(self, tup, kwargs)
381 x, y = index_of(tup[-1])
382
--> 383 x, y = self._xy_from_xy(x, y)
384
385 if self.command == 'plot':
~/anaconda3/lib/python3.7/site-packages/matplotlib/axes/_base.py in _xy_from_xy(self, x, y)
214 if self.axes.xaxis is not None and self.axes.yaxis is not None:
215 bx = self.axes.xaxis.update_units(x)
--> 216 by = self.axes.yaxis.update_units(y)
217
218 if self.command != 'plot':
~/anaconda3/lib/python3.7/site-packages/matplotlib/axis.py in update_units(self, data)
1467 neednew = self.converter != converter
1468 self.converter = converter
-> 1469 default = self.converter.default_units(data, self)
1470 if default is not None and self.units is None:
1471 self.set_units(default)
~/anaconda3/lib/python3.7/site-packages/matplotlib/category.py in default_units(data, axis)
113 # default_units->axis_info->convert
114 if axis.units is None:
--> 115 axis.set_units(UnitData(data))
116 else:
117 axis.units.update(data)
~/anaconda3/lib/python3.7/site-packages/matplotlib/category.py in __init__(self, data)
180 self._counter = itertools.count(start=0)
181 if data is not None:
--> 182 self.update(data)
183
184 def update(self, data):
~/anaconda3/lib/python3.7/site-packages/matplotlib/category.py in update(self, data)
197 data = np.atleast_1d(np.array(data, dtype=object))
198
--> 199 for val in OrderedDict.fromkeys(data):
200 if not isinstance(val, VALID_TYPES):
201 raise TypeError("{val!r} is not a string".format(val=val))
TypeError: unhashable type: 'numpy.ndarray'
I was right, the issue was with the data types. I created a new table replacing VARCHAR for INT and then used the same code from above and the program worked perfectly.

Get output of statsmodels acf function with a Pandas rolling window

I can extract an autocorrelation value for a specific lag time with this:
df.rolling(window = 10).apply(lambda x: acf(x, nlags = 5)[5]).plot()
However since acf is actually doing all the calculations anyway, I'd like to get all the results calculated, not just a single one. The idea would be that I could then unpack this single returned array/list into a bunch of columns and plot each one separately but not run through acf so many unnecessary times. So I tried:
df.rolling(window = 10).apply(lambda x: list(acf(x, nlags = 5)))
This throws the following error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-63-e5f337100eb5> in <module>()
----> 1 df.rolling(window = 10).apply(lambda x: list(acf(x, nlags = 5)))
/Users/a/anaconda3/lib/python3.5/site-packages/pandas/core/window.py in apply(self, func, args, kwargs)
861 #Appender(_shared_docs['apply'])
862 def apply(self, func, args=(), kwargs={}):
--> 863 return super(Rolling, self).apply(func, args=args, kwargs=kwargs)
864
865 #Substitution(name='rolling')
/Users/a/anaconda3/lib/python3.5/site-packages/pandas/core/window.py in apply(self, func, args, kwargs)
619
620 return self._apply(f, func, args=args, kwargs=kwargs,
--> 621 center=False)
622
623 def sum(self, **kwargs):
/Users/a/anaconda3/lib/python3.5/site-packages/pandas/core/window.py in _apply(self, func, name, window, center, check_minp, how, **kwargs)
556
557 if values.ndim > 1:
--> 558 result = np.apply_along_axis(calc, self.axis, values)
559 else:
560 result = calc(values)
/Users/a/anaconda3/lib/python3.5/site-packages/numpy/lib/shape_base.py in apply_along_axis(func1d, axis, arr, *args, **kwargs)
89 outshape = asarray(arr.shape).take(indlist)
90 i.put(indlist, ind)
---> 91 res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
92 # if res is a number, then we have a smaller output array
93 if isscalar(res):
/Users/a/anaconda3/lib/python3.5/site-packages/pandas/core/window.py in calc(x)
553
554 def calc(x):
--> 555 return func(x, window, min_periods=self.min_periods)
556
557 if values.ndim > 1:
/Users/a/anaconda3/lib/python3.5/site-packages/pandas/core/window.py in f(arg, window, min_periods)
616 minp = _use_window(min_periods, window)
617 return algos.roll_generic(arg, window, minp, offset, func, args,
--> 618 kwargs)
619
620 return self._apply(f, func, args=args, kwargs=kwargs,
pandas/algos.pyx in pandas.algos.roll_generic (pandas/algos.c:51581)()
TypeError: a float is required
Does this mean apply style operations with rolling can only handle floats? At least for groupby I have often had occasion to return lists or sets, but perhaps rolling is not so flexible?
To plot acf results you may want to try tsaplots.plot_acf():
from statsmodels.graphics import tsaplots
tsaplots.plot_acf(x, lags = 5, alpha = 0.05)

pymc3: Disaster example with deterministic switchpoint function

I'm trying to reproduce coal mining example with deterministic function for switchpoint instead of using theano's switch function. Code:
%matplotlib inline
import matplotlib.pyplot as plt
import pymc3
import numpy as np
import theano.tensor as t
import theano
data = np.hstack((np.random.poisson(15,1000),np.random.poisson(2,100)))
plt.plot(data)
#theano.compile.ops.as_op(itypes=[t.lscalar, t.dscalar,t.dscalar],otypes=[t.dvector])
def rate1(sw,mu1,mu2):
n = len(data)
out = np.empty(n)
out[:sw] = mu1
out[sw:] = mu2
return out
with pymc3.Model() as dis:
switchpoint = pymc3.DiscreteUniform('switchpoint',lower=0, upper=len(data)-1)
mu1 = pymc3.Exponential('mu1', lam=1.)
mu2 = pymc3.Exponential('mu2',lam=1.)
disasters=pymc3.Poisson('disasters', mu=rate1, observed = data)
But this code rise an error:
--------------------------------------------------------------------------- KeyError Traceback (most recent call
last) c:\program files\git\theano\theano\tensor\type.py in
dtype_specs(self)
266 'complex64': (complex, 'theano_complex64', 'NPY_COMPLEX64')
--> 267 }[self.dtype]
268 except KeyError:
KeyError: 'object'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call
last) c:\program files\git\theano\theano\tensor\basic.py in
constant_or_value(x, rtype, name, ndim, dtype)
407 rval = rtype(
--> 408 TensorType(dtype=x_.dtype, broadcastable=bcastable),
409 x_.copy(),
c:\program files\git\theano\theano\tensor\type.py in init(self,
dtype, broadcastable, name, sparse_grad)
49 self.broadcastable = tuple(bool(b) for b in broadcastable)
---> 50 self.dtype_specs() # error checking is done there
51 self.name = name
c:\program files\git\theano\theano\tensor\type.py in dtype_specs(self)
269 raise TypeError("Unsupported dtype for %s: %s"
--> 270 % (self.class.name, self.dtype))
271
TypeError: Unsupported dtype for TensorType: object
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call
last) c:\program files\git\theano\theano\tensor\basic.py in
as_tensor_variable(x, name, ndim)
201 try:
--> 202 return constant(x, name=name, ndim=ndim)
203 except TypeError:
c:\program files\git\theano\theano\tensor\basic.py in constant(x,
name, ndim, dtype)
421 ret = constant_or_value(x, rtype=TensorConstant, name=name, ndim=ndim,
--> 422 dtype=dtype)
423
c:\program files\git\theano\theano\tensor\basic.py in
constant_or_value(x, rtype, name, ndim, dtype)
416 except Exception:
--> 417 raise TypeError("Could not convert %s to TensorType" % x, type(x))
418
TypeError: ('Could not convert FromFunctionOp{rate1} to TensorType',
)
During handling of the above exception, another exception occurred:
AsTensorError Traceback (most recent call
last) in ()
14 mu2 = pymc3.Exponential('mu2',lam=1.)
15 #rate1 = pymc3.switch(switchpoint >= np.arange(len(data)), mu1,mu2)
---> 16 disasters=pymc3.Poisson('disasters', mu=rate1, observed = data)
C:\Users\User\Anaconda3\lib\site-packages\pymc3\distributions\distribution.py
in new(cls, name, *args, **kwargs)
19 if isinstance(name, str):
20 data = kwargs.pop('observed', None)
---> 21 dist = cls.dist(*args, **kwargs)
22 return model.Var(name, dist, data)
23 elif name is None:
C:\Users\User\Anaconda3\lib\site-packages\pymc3\distributions\distribution.py
in dist(cls, *args, **kwargs)
32 def dist(cls, *args, **kwargs):
33 dist = object.new(cls)
---> 34 dist.init(*args, **kwargs)
35 return dist
36
C:\Users\User\Anaconda3\lib\site-packages\pymc3\distributions\discrete.py
in init(self, mu, *args, **kwargs)
185 super(Poisson, self).init(*args, **kwargs)
186 self.mu = mu
--> 187 self.mode = floor(mu).astype('int32')
188
189 def random(self, point=None, size=None, repeat=None):
c:\program files\git\theano\theano\gof\op.py in call(self,
*inputs, **kwargs)
598 """
599 return_list = kwargs.pop('return_list', False)
--> 600 node = self.make_node(*inputs, **kwargs)
601
602 if config.compute_test_value != 'off':
c:\program files\git\theano\theano\tensor\elemwise.py in
make_node(self, *inputs)
540 using DimShuffle.
541 """
--> 542 inputs = list(map(as_tensor_variable, inputs))
543 shadow = self.scalar_op.make_node(
544 *[get_scalar_type(dtype=i.type.dtype).make_variable()
c:\program files\git\theano\theano\tensor\basic.py in
as_tensor_variable(x, name, ndim)
206 except Exception:
207 str_x = repr(x)
--> 208 raise AsTensorError("Cannot convert %s to TensorType" % str_x, type(x))
209
210 # this has a different name, because _as_tensor_variable is the
AsTensorError: ('Cannot convert FromFunctionOp{rate1} to TensorType',
)
How i handle this?
The second thing - when i'm using the pymc3.switch function like this:
with pymc3.Model() as dis:
switchpoint = pymc3.DiscreteUniform('switchpoint',lower=0, upper=len(data)-1)
mu1 = pymc3.Exponential('mu1', lam=1.)
mu2 = pymc3.Exponential('mu2',lam=1.)
rate1 = pymc3.switch(switchpoint >= np.arange(len(data)), mu1,mu2)
disasters=pymc3.Poisson('disasters', mu=rate1, observed = data)
And next try to sample:
with dis:
step1 = pymc3.NUTS([mu1, mu2])
step2 = pymc3.Metropolis([switchpoint])
trace = pymc3.sample(10000, step = [step1,step2])
I get an error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
c:\program files\git\theano\theano\compile\function_module.py in __call__(self, *args, **kwargs)
858 try:
--> 859 outputs = self.fn()
860 except Exception:
TypeError: expected type_num 9 (NPY_INT64) got 7
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-4-3247d908f897> in <module>()
2 step1 = pymc3.NUTS([mu1, mu2])
3 step2 = pymc3.Metropolis([switchpoint])
----> 4 trace = pymc3.sample(10000, step = [step1,step2])
C:\Users\User\Anaconda3\lib\site-packages\pymc3\sampling.py in sample(draws, step, start, trace, chain, njobs, tune, progressbar, model, random_seed)
153 sample_args = [draws, step, start, trace, chain,
154 tune, progressbar, model, random_seed]
--> 155 return sample_func(*sample_args)
156
157
C:\Users\User\Anaconda3\lib\site-packages\pymc3\sampling.py in _sample(draws, step, start, trace, chain, tune, progressbar, model, random_seed)
162 progress = progress_bar(draws)
163 try:
--> 164 for i, strace in enumerate(sampling):
165 if progressbar:
166 progress.update(i)
C:\Users\User\Anaconda3\lib\site-packages\pymc3\sampling.py in _iter_sample(draws, step, start, trace, chain, tune, model, random_seed)
244 if i == tune:
245 step = stop_tuning(step)
--> 246 point = step.step(point)
247 strace.record(point)
248 yield strace
C:\Users\User\Anaconda3\lib\site-packages\pymc3\step_methods\compound.py in step(self, point)
11 def step(self, point):
12 for method in self.methods:
---> 13 point = method.step(point)
14 return point
C:\Users\User\Anaconda3\lib\site-packages\pymc3\step_methods\arraystep.py in step(self, point)
116 bij = DictToArrayBijection(self.ordering, point)
117
--> 118 apoint = self.astep(bij.map(point))
119 return bij.rmap(apoint)
120
C:\Users\User\Anaconda3\lib\site-packages\pymc3\step_methods\metropolis.py in astep(self, q0)
123
124
--> 125 q_new = metrop_select(self.delta_logp(q,q0), q, q0)
126
127 if q_new is q:
c:\program files\git\theano\theano\compile\function_module.py in __call__(self, *args, **kwargs)
869 node=self.fn.nodes[self.fn.position_of_error],
870 thunk=thunk,
--> 871 storage_map=getattr(self.fn, 'storage_map', None))
872 else:
873 # old-style linkers raise their own exceptions
c:\program files\git\theano\theano\gof\link.py in raise_with_op(node, thunk, exc_info, storage_map)
312 # extra long error message in that case.
313 pass
--> 314 reraise(exc_type, exc_value, exc_trace)
315
316
C:\Users\User\Anaconda3\lib\site-packages\six.py in reraise(tp, value, tb)
656 value = tp()
657 if value.__traceback__ is not tb:
--> 658 raise value.with_traceback(tb)
659 raise value
660
c:\program files\git\theano\theano\compile\function_module.py in __call__(self, *args, **kwargs)
857 t0_fn = time.time()
858 try:
--> 859 outputs = self.fn()
860 except Exception:
861 if hasattr(self.fn, 'position_of_error'):
TypeError: expected type_num 9 (NPY_INT64) got 7
Apply node that caused the error: Elemwise{Composite{Switch(GE(i0, i1), i2, i3)}}(InplaceDimShuffle{x}.0, TensorConstant{[ 0 1..1098 1099]}, InplaceDimShuffle{x}.0, InplaceDimShuffle{x}.0)
Toposort index: 11
Inputs types: [TensorType(int64, (True,)), TensorType(int32, vector), TensorType(float64, (True,)), TensorType(float64, (True,))]
Inputs shapes: [(1,), (1100,), (1,), (1,)]
Inputs strides: [(4,), (4,), (8,), (8,)]
Inputs values: [array([549]), 'not shown', array([ 1.07762995]), array([ 1.01502801])]
Outputs clients: [[Elemwise{eq,no_inplace}(Elemwise{Composite{Switch(GE(i0, i1), i2, i3)}}.0, TensorConstant{(1,) of 0}), Elemwise{Composite{Switch(GE(i0, i1), ((Switch(i2, i3, (i4 * log(i0))) - i5) - i0), i3)}}[(0, 0)](Elemwise{Composite{Switch(GE(i0, i1), i2, i3)}}.0, TensorConstant{(1,) of 0}, InplaceDimShuffle{x}.0, TensorConstant{(1,) of -inf}, TensorConstant{[ 13. 13... 0. 1.]}, TensorConstant{[ 22.55216... ]})]]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
Being simple analyst, should i learn all this stuff about theano to being able to work with my statistical problems? Is a new mcmc sampler with gradient feature is only one thing that should motivates me to switch from pymc2 to pymc3?
For your first question, it looks like you're trying to pass a theano function as a variable. You need to call the function with the other variables as arguments, which will then return a theano variable. Try changing your line to
disasters=pymc3.Poisson('disasters', mu=rate1(switchpoint, mu1, mu2), observed = data)
I couldn't reproduce the error in your second part; the sampling worked just fine for me.

Categories