Pandas pivot_table Assertion error: `result` has not been initialized - python

df:
avg
count
date
val
prop
unit
distance
d-atmp
d-clouds
d-dewpoint
0.0786107
12
2014-10-03 00:00:00
22
atmp
(Deg C)
24829.6
24829.6
nan
nan
0.0786107
12
2014-10-03 00:00:00
0
clouds
(oktas)
22000.6
nan
22000.6
nan
0.0786107
12
2014-10-03 00:00:00
32
dewpoint
(Deg C)
21344.1
nan
nan
21344.1
0.0684246
6
2014-10-04 00:00:00
21.5
atmp
(Deg C)
26345.1
26345.1
nan
nan
cols = ['avg', 'date', 'count', 'd-atmp', 'd-cloud', 'd-dewpoint']
d = pd.pivot_table(x, index=cols, columns=['prop', 'unit'], values='val', aggfunc=max)
Ideal result:
date
countObs
avg
d-atmp
atmp (Deg C)
d-clouds
clouds (oktas)
d-dewpoint
dewpoint (Deg C)
2014-10-03 00:00:00
12
0.0786107
24829.6
22
22000.6
0
21344.1
32
2014-10-04 00:00:00
6
0.0684246
26345.1
21.5
nan
nan
nan
nan
Error
--------------------------------------------------------------------------- NotImplementedError Traceback (most recent call last) ~/.local/lib/python3.9/site-packages/pandas/core/groupby/generic.py in array_func(values) 1067 try:
-> 1068 result = self.grouper._cython_operation( 1069 "aggregate", values, how, axis=data.ndim - 1, min_count=min_count
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in
_cython_operation(self, kind, values, how, axis, min_count, **kwargs)
998 ngroups = self.ngroups
--> 999 return cy_op.cython_operation( 1000 values=values,
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in cython_operation(self, values, axis, min_count, comp_ids, ngroups,
**kwargs)
659
--> 660 return self._cython_op_ndim_compat(
661 values,
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in
_cython_op_ndim_compat(self, values, min_count, ngroups, comp_ids, mask, **kwargs)
515
--> 516 return self._call_cython_op(
517 values,
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in
_call_cython_op(self, values, min_count, ngroups, comp_ids, mask, **kwargs)
561 out_shape = self._get_output_shape(ngroups, values)
--> 562 func, values = self.get_cython_func_and_vals(values, is_numeric)
563 out_dtype = self.get_out_dtype(values.dtype)
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in get_cython_func_and_vals(self, values, is_numeric)
204
--> 205 func = self._get_cython_function(kind, how, values.dtype, is_numeric)
206
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in
_get_cython_function(cls, kind, how, dtype, is_numeric)
169 # raise NotImplementedError here rather than TypeError later
--> 170 raise NotImplementedError(
171 f"function is not implemented for this dtype: "
NotImplementedError: function is not implemented for this dtype: [how->mean,dtype->object]
During handling of the above exception, another exception occurred:
AssertionError Traceback (most recent call last) <ipython-input-119-b64b487d2810> in <module>
5 # o
6 # cols += []
----> 7 d = pd.pivot_table(x, index=cols, columns=['osmcObsProperty', 'unit'], values='val') #, aggfunc=max #np.mean or max appear similar , dropna=False
8
9 d.reset_index(inplace=True)
~/.local/lib/python3.9/site-packages/pandas/core/reshape/pivot.py in pivot_table(data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort)
93 return table.__finalize__(data, method="pivot_table")
94
---> 95 table = __internal_pivot_table(
96 data,
97 values,
~/.local/lib/python3.9/site-packages/pandas/core/reshape/pivot.py in
__internal_pivot_table(data, values, index, columns, aggfunc, fill_value, margins, dropna, margins_name, observed, sort)
163
164 grouped = data.groupby(keys, observed=observed, sort=sort)
--> 165 agged = grouped.agg(aggfunc)
166 if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
167 agged = agged.dropna(how="all")
~/.local/lib/python3.9/site-packages/pandas/core/groupby/generic.py in aggregate(self, func, engine, engine_kwargs, *args, **kwargs)
977
978 op = GroupByApply(self, func, args, kwargs)
--> 979 result = op.agg()
980 if not is_dict_like(func) and result is not None:
981 return result
~/.local/lib/python3.9/site-packages/pandas/core/apply.py in agg(self)
156
157 if isinstance(arg, str):
--> 158 return self.apply_str()
159
160 if is_dict_like(arg):
~/.local/lib/python3.9/site-packages/pandas/core/apply.py in apply_str(self)
505 elif self.axis != 0:
506 raise ValueError(f"Operation {f} does not support axis=1")
--> 507 return self._try_aggregate_string_function(obj, f, *self.args, **self.kwargs)
508
509 def apply_multiple(self) -> FrameOrSeriesUnion:
~/.local/lib/python3.9/site-packages/pandas/core/apply.py in
_try_aggregate_string_function(self, obj, arg, *args, **kwargs)
575 if f is not None:
576 if callable(f):
--> 577 return f(*args, **kwargs)
578
579 # people may try to aggregate on a non-callable attribute
~/.local/lib/python3.9/site-packages/pandas/core/groupby/groupby.py in mean(self, numeric_only) 1685 numeric_only = self._resolve_numeric_only(numeric_only) 1686
-> 1687 result = self._cython_agg_general( 1688 "mean", 1689 alt=lambda x: Series(x).mean(numeric_only=numeric_only),
~/.local/lib/python3.9/site-packages/pandas/core/groupby/generic.py in
_cython_agg_general(self, how, alt, numeric_only, min_count) 1080 # TypeError -> we may have an exception in trying to aggregate 1081 # continue and exclude the block
-> 1082 new_mgr = data.grouped_reduce(array_func, ignore_failures=True) 1083 1084 if len(new_mgr) < len(data):
~/.local/lib/python3.9/site-packages/pandas/core/internals/managers.py in grouped_reduce(self, func, ignore_failures) 1233 for sb in blk._split(): 1234 try:
-> 1235 applied = sb.apply(func) 1236 except (TypeError, NotImplementedError): 1237 if not ignore_failures:
~/.local/lib/python3.9/site-packages/pandas/core/internals/blocks.py in apply(self, func, **kwargs)
379 """
380 with np.errstate(all="ignore"):
--> 381 result = func(self.values, **kwargs)
382
383 return self._split_op_result(result)
~/.local/lib/python3.9/site-packages/pandas/core/groupby/generic.py in array_func(values) 1074 # try to python agg 1075
# TODO: shouldn't min_count matter?
-> 1076 result = self._agg_py_fallback(values, ndim=data.ndim, alt=alt) 1077 1078 return result
~/.local/lib/python3.9/site-packages/pandas/core/groupby/groupby.py in
_agg_py_fallback(self, values, ndim, alt) 1396 # should always be preserved by the implemented aggregations 1397 # TODO: Is this exactly right; see WrappedCythonOp get_result_dtype?
-> 1398 res_values = self.grouper.agg_series(ser, alt, preserve_dtype=True) 1399 1400 if isinstance(values, Categorical):
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in agg_series(self, obj, func, preserve_dtype) 1047 1048 else:
-> 1049 result = self._aggregate_series_fast(obj, func) 1050 1051 npvalues = lib.maybe_convert_objects(result, try_float=False)
~/.local/lib/python3.9/site-packages/pandas/core/groupby/ops.py in
_aggregate_series_fast(self, obj, func) 1072 ids = ids.take(indexer) 1073 sgrouper = libreduction.SeriesGrouper(obj, func, ids, ngroups)
-> 1074 result, _ = sgrouper.get_result() 1075 return result 1076
~/.local/lib/python3.9/site-packages/pandas/_libs/reduction.pyx in pandas._libs.reduction.SeriesGrouper.get_result()
AssertionError: `result` has not been initialized.

IIUC, you can use groupby_agg:
out = df.groupby('date', as_index=False).agg(max)
Output:
date
avg
count
val
prop
unit
distance
d-atmp
d-clouds
d-dewpoint
2014-10-03 00:00:00
0.0786107
12
32
dewpoint
(oktas)
24829.6
24829.6
22000.6
21344.1
2014-10-04 00:00:00
0.0684246
6
21.5
atmp
(Deg C)
26345.1
26345.1
nan
nan

You could pivot; then use groupby + max:
cols = ['avg', 'date', 'count', 'd-atmp', 'd-clouds', 'd-dewpoint']
tmp = df.pivot(index=cols, columns=['prop', 'unit'], values='val')
tmp.columns = tmp.columns.map(' '.join)
out = tmp.reset_index().groupby('date', as_index=False).max()\
[['date', 'count', 'avg', 'd-atmp', 'atmp (Deg C)', 'd-clouds',
'clouds (oktas)', 'd-dewpoint', 'dewpoint (Deg C)']]
Output:
date count avg d-atmp atmp (Deg C) d-clouds clouds (oktas) d-dewpoint dewpoint (Deg C)
0 2014-10-03 00:00:00 12 0.078611 24829.6 22.0 22000.6 0.0 21344.1 32.0
1 2014-10-04 00:00:00 6 0.068425 26345.1 21.5 NaN NaN NaN NaN

Related

Resampling of categorical column in pandas data frame

I need some help in figuring out this. Have been trying a few things but not working. I have a pandas data frame shown below(in the end) :
The data is available at irregular intervals ( frequency not fixed). I am looking to sample the data at a fixed frequency for eg every 1 minute. If the column is a float then mean every 1 minute works fine
df1.resample('1T',base = 1).mean()
but since the data is categorical mean doesn't make sense, I also tried sum which is also not making sense from sampling. What essentially I need is the max count of the column when sampled at 1 minute To do this I used the following code to apply the custom function to the values that fall in 1 minute when resampling . .
def custome_mod(arraylike):
vals, counts = np.unique(arraylike, return_counts=True)
return (np.argwhere(counts == np.max(counts)))
df1.resample('1T',base = 1).apply(custome_mod)
The output I am expecting is : data frame available at every 1 minute and value with maximum count for the data that fall in that 1 minute .
For some reason it does not seem to work and gives me error . Have been trying to debugg for a very long time . Can somebody please provide some inputs/code check ?
The error I get is following :
ValueError: zero-size array to reduction operation maximum which has no identity
ValueError Traceback (most recent call last)
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/generic.py in aggregate(self, func, *args, **kwargs)
264 try:
--> 265 return self._python_agg_general(func, *args, **kwargs)
266 except (ValueError, KeyError):
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _python_agg_general(self, func, *args, **kwargs)
935
--> 936 result, counts = self.grouper.agg_series(obj, f)
937 assert result is not None
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/ops.py in agg_series(self, obj, func)
862 grouper = libreduction.SeriesBinGrouper(obj, func, self.bins, dummy)
--> 863 return grouper.get_result()
864
pandas/_libs/reduction.pyx in pandas._libs.reduction.SeriesBinGrouper.get_result()
pandas/_libs/reduction.pyx in pandas._libs.reduction._BaseGrouper._apply_to_group()
pandas/_libs/reduction.pyx in pandas._libs.reduction._check_result_array()
ValueError: Function does not reduce
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
/databricks/python/lib/python3.7/site-packages/pandas/core/resample.py in _groupby_and_aggregate(self, how, grouper, *args, **kwargs)
358 # Check if the function is reducing or not.
--> 359 result = grouped._aggregate_item_by_item(how, *args, **kwargs)
360 else:
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/generic.py in _aggregate_item_by_item(self, func, *args, **kwargs)
1171 try:
-> 1172 result[item] = colg.aggregate(func, *args, **kwargs)
1173
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/generic.py in aggregate(self, func, *args, **kwargs)
268 # see see test_groupby.test_basic
--> 269 result = self._aggregate_named(func, *args, **kwargs)
270
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/generic.py in _aggregate_named(self, func, *args, **kwargs)
453 if isinstance(output, (Series, Index, np.ndarray)):
--> 454 raise ValueError("Must produce aggregated value")
455 result[name] = output
ValueError: Must produce aggregated value
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<command-36984414005459> in <module>
----> 1 df1.resample('1T',base = 1).apply(custome_mod)
/databricks/python/lib/python3.7/site-packages/pandas/core/resample.py in aggregate(self, func, *args, **kwargs)
283 how = func
284 grouper = None
--> 285 result = self._groupby_and_aggregate(how, grouper, *args, **kwargs)
286
287 result = self._apply_loffset(result)
/databricks/python/lib/python3.7/site-packages/pandas/core/resample.py in _groupby_and_aggregate(self, how, grouper, *args, **kwargs)
380 # we have a non-reducing function
381 # try to evaluate
--> 382 result = grouped.apply(how, *args, **kwargs)
383
384 result = self._apply_loffset(result)
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in apply(self, func, *args, **kwargs)
733 with option_context("mode.chained_assignment", None):
734 try:
--> 735 result = self._python_apply_general(f)
736 except TypeError:
737 # gh-20949
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/groupby.py in _python_apply_general(self, f)
749
750 def _python_apply_general(self, f):
--> 751 keys, values, mutated = self.grouper.apply(f, self._selected_obj, self.axis)
752
753 return self._wrap_applied_output(
/databricks/python/lib/python3.7/site-packages/pandas/core/groupby/ops.py in apply(self, f, data, axis)
204 # group might be modified
205 group_axes = group.axes
--> 206 res = f(group)
207 if not _is_indexed_like(res, group_axes):
208 mutated = True
<command-36984414005658> in custome_mod(arraylike)
1 def custome_mod(arraylike):
2 vals, counts = np.unique(arraylike, return_counts=True)
----> 3 return (np.argwhere(counts == np.max(counts)))
<__array_function__ internals> in amax(*args, **kwargs)
/databricks/python/lib/python3.7/site-packages/numpy/core/fromnumeric.py in amax(a, axis, out, keepdims, initial, where)
2666 """
2667 return _wrapreduction(a, np.maximum, 'max', axis, None, out,
-> 2668 keepdims=keepdims, initial=initial, where=where)
2669
2670
/databricks/python/lib/python3.7/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
88 return reduction(axis=axis, out=out, **passkwargs)
89
---> 90 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
91
92
ValueError: zero-size array to reduction operation maximum which has no identity
Sample Dataframe and expected Output
Sample Df
6/3/2021 1:19:05 0
6/3/2021 1:19:15 1
6/3/2021 1:19:26 1
6/3/2021 1:19:38 1
6/3/2021 1:20:06 0
6/3/2021 1:20:16 0
6/3/2021 1:20:36 1
6/3/2021 1:21:09 1
6/3/2021 1:21:19 1
6/3/2021 1:21:45 0
6/4/2021 1:19:15 0
6/4/2021 1:19:25 0
6/4/2021 1:19:36 0
6/4/2021 1:19:48 1
6/4/2021 1:22:26 1
6/4/2021 1:22:36 0
6/4/2021 1:22:46 0
6/5/2021 2:20:19 0
6/5/2021 2:20:21 1
6/5/2021 2:20:40 0
Expected Output
6/3/2021 1:19 1
6/3/2021 1:20 0
6/3/2021 1:21 1
6/4/2021 1:19 0
6/4/2021 1:22 0
6/5/2021 2:20 0
Notice that original Data frame has data available at irregular frequency ( sometime every 5 second 20 seconds etc . The output expected is also show abover - need data every 1 minute ( resample to every minute instead of original irregular seconds) and the categorical column should have most frequent value during that minute. For ex : in orginal data at in 19minute there are four data points and the most frequent value in that is 1. Similarly at 20 minute there are three data points in original data and the most frquent is 0 . Similarly for 21 minutes there are three data points and the most frequent is 1. Also data I am working has 20 million rows . Hope it helps, This is an effort to reduce the data dimension .
After expected output I would do groupby column and count . This count will be in minutes and I will be able to know How long this column was 1 (in time )
Update after your edit:
out = df.set_index(pd.to_datetime(df.index).floor('T')) \
.groupby(level=0)['category'] \
.apply(lambda x: x.value_counts().idxmax())
print(out)
# Output
2021-06-03 01:19:00 1
2021-06-03 01:20:00 0
2021-06-03 01:21:00 1
2021-06-04 01:19:00 0
2021-06-04 01:22:00 0
2021-06-05 02:20:00 0
Name: category, dtype: int64
Old answer
# I used 'D' instead of 'T'
>>> df.set_index(df.index.floor('D')).groupby(level=0).count()
category
2021-06-03 6
2021-06-04 2
2021-06-06 1
2021-06-08 1
2021-06-25 1
2021-06-29 6
2021-06-30 3
# OR
>>> df.set_index(df.index.floor('D')).groupby(level=0).sum()
category
2021-06-03 2
2021-06-04 0
2021-06-06 1
2021-06-08 1
2021-06-25 0
2021-06-29 3
2021-06-30 1

Python Pandas apply qcut to grouped by level 0 of multi-index in multi-index dataframe

I have a multi-index dataframe in pandas (date and entity_id) and for each date/entity I have obseravtions of a number of variables (A, B ...). My goal is to create a dataframe with the same shape but where the values are replaced by their decile scores.
My test data looks like this:
I want to apply qcut to each column grouped by level 0 of the multi-index - the issue I have is creating a result Dataframe
This code
def qcut_sub_index(df_with_sub_index):
# create empty return value same shape as passed dataframe
df_return=pd.DataFrame()
for date, sub_df in df_with_sub_index.groupby(level=0):
df_return=df_return.append(pd.DataFrame(pd.qcut(sub_df, 10, labels=False, duplicates='drop')))
print(df_return)
return df_return
print(df_values.apply(lambda x: qcut_sub_index(x), axis=0))
returns
A
as_at_date entity_id
2008-01-27 2928 0
2932 3
3083 6
3333 9
2008-02-27 2928 3
2935 9
3333 0
3874 6
2008-03-27 2928 1
2932 2
2934 0
2936 9
2937 4
2939 9
2940 7
2943 3
2944 0
2945 8
2946 6
2947 5
2949 4
B
as_at_date entity_id
2008-01-27 2928 9
2932 6
3083 0
3333 3
2008-02-27 2928 6
2935 0
3333 3
3874 9
2008-03-27 2928 0
2932 9
2934 2
2936 8
2937 7
2939 6
2940 3
2943 1
2944 4
2945 9
2946 5
2947 4
2949 0
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-104-72ff0e6da288> in <module>
11
12
---> 13 print(df_values.apply(lambda x: qcut_sub_index(x), axis=0))
~\Anaconda3\lib\site-packages\pandas\core\frame.py in apply(self, func, axis, raw, result_type, args, **kwds)
7546 kwds=kwds,
7547 )
-> 7548 return op.get_result()
7549
7550 def applymap(self, func) -> "DataFrame":
~\Anaconda3\lib\site-packages\pandas\core\apply.py in get_result(self)
178 return self.apply_raw()
179
--> 180 return self.apply_standard()
181
182 def apply_empty_result(self):
~\Anaconda3\lib\site-packages\pandas\core\apply.py in apply_standard(self)
272
273 # wrap results
--> 274 return self.wrap_results(results, res_index)
275
276 def apply_series_generator(self) -> Tuple[ResType, "Index"]:
~\Anaconda3\lib\site-packages\pandas\core\apply.py in wrap_results(self, results, res_index)
313 # see if we can infer the results
314 if len(results) > 0 and 0 in results and is_sequence(results[0]):
--> 315 return self.wrap_results_for_axis(results, res_index)
316
317 # dict of scalars
~\Anaconda3\lib\site-packages\pandas\core\apply.py in wrap_results_for_axis(self, results, res_index)
369
370 try:
--> 371 result = self.obj._constructor(data=results)
372 except ValueError as err:
373 if "arrays must all be same length" in str(err):
~\Anaconda3\lib\site-packages\pandas\core\frame.py in __init__(self, data, index, columns, dtype, copy)
466
467 elif isinstance(data, dict):
--> 468 mgr = init_dict(data, index, columns, dtype=dtype)
469 elif isinstance(data, ma.MaskedArray):
470 import numpy.ma.mrecords as mrecords
~\Anaconda3\lib\site-packages\pandas\core\internals\construction.py in init_dict(data, index, columns, dtype)
281 arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
282 ]
--> 283 return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
284
285
~\Anaconda3\lib\site-packages\pandas\core\internals\construction.py in arrays_to_mgr(arrays, arr_names, index, columns, dtype, verify_integrity)
76 # figure out the index, if necessary
77 if index is None:
---> 78 index = extract_index(arrays)
79 else:
80 index = ensure_index(index)
~\Anaconda3\lib\site-packages\pandas\core\internals\construction.py in extract_index(data)
385
386 if not indexes and not raw_lengths:
--> 387 raise ValueError("If using all scalar values, you must pass an index")
388
389 if have_series:
ValueError: If using all scalar values, you must pass an index
so something is preventing the second application of the lambda function.
I'd appreciate your help, thanks for takign a look.
p.s. if this can be done implcitly without using apply would love to hear. thanks
You solution appears over complicated. Your terminology is none standard, multi-indexes have levels. Stated as qcut() by level 0 of multi-index (not talking about sub-frames which are not pandas concepts)
Bring it all back together
use **kwargs approach to pass arguments to assign() for all columns in data frame
groupby(level=0) is as_of_date
transform() to get a row back for every entry in index
s = 12
df = pd.DataFrame({"as_at_date":np.random.choice(pd.date_range(dt.date(2020,1,27), periods=3, freq="M"), s),
"entity_id":np.random.randint(2900, 3500, s),
"A":np.random.random(s),
"B":np.random.random(s)*(10**np.random.randint(8,10,s))
}).sort_values(["as_at_date","entity_id"])
df = df.set_index(["as_at_date","entity_id"])
df2 = df.assign(**{c:df.groupby(level=0)[c].transform(lambda x: pd.qcut(x, 10, labels=False))
for c in df.columns})
df
A B
as_at_date entity_id
2020-01-31 2926 0.770121 2.883519e+07
2943 0.187747 1.167975e+08
2973 0.371721 3.133071e+07
3104 0.243347 4.497294e+08
3253 0.591022 7.796131e+08
3362 0.810001 6.438441e+08
2020-02-29 3185 0.690875 4.513044e+08
3304 0.311436 4.561929e+07
2020-03-31 2953 0.325846 7.770111e+08
2981 0.918461 7.594753e+08
3034 0.133053 6.767501e+08
3355 0.624519 6.318104e+07
df2
A B
as_at_date entity_id
2020-01-31 2926 7 0
2943 0 3
2973 3 1
3104 1 5
3253 5 9
3362 9 7
2020-02-29 3185 9 9
3304 0 0
2020-03-31 2953 3 9
2981 9 6
3034 0 3
3355 6 0
Using concat inside an iteration on the original dataframe does the trick but is there a smarter way to do this?
thanks
def qcut_sub_index(df_with_sub_index):
# create empty return value same shape as passed dataframe
df_return=pd.DataFrame()
for date, sub_df in df_with_sub_index.groupby(level=0):
df_return=df_return.append(pd.DataFrame(pd.qcut(sub_df, 10, labels=False,
duplicates='drop')))
return df_return
df_x=pd.DataFrame()
for (columnName, columnData) in df_values.iteritems():
df_x=pd.concat([df_x, qcut_sub_index(columnData)], axis=1, join="outer")
df_x

Select filter an alphanumeric column by the dates it contains

I have a dataframe df and I want to get the rows where column Election is equal to a specific year.
Unnamed: 0 Map Level Precinct ID Precinct Name Election Invalid Ballots (%) More Ballots Than Votes (#) More Votes Than Ballots (#) Total Voter Turnout (#) Total Voter Turnout (%) ... Average votes per minute (17:00-20:00) CDM ED FG GD LP NR UNM Results others
0 0 Precinct 1 63-1 2008 Parliamentary 0.0 0.0 0.0 749 62.11 ... 1.01 0.0 0.0 0.0 0.0 0.0 0.0 77.17 United National Movement 22.83
1 1 Precinct 10 63-10 2008 Parliamentary 0.0 0.0 0.0 419 70.42 ... 0.61 0.0 0.0 0.0 0.0 0.0 0.0 71.12 United National Movement 28.87
...
136 159 Precinct 8 63-1 2013 Presidential 1.75 0.0 0.0 506 50.75 ... 0.52 2.96 0.20 0.00 0.00 1.19 0.00 0.00 Giorgi Margvelashvili 95.65
137 160 Precinct 9 63-10 2013 Presidential 2.50 0.0 0.0 625 48.04 ... 0.66 1.92 0.80 0.00 0.00 1.60 0.00 0.00 Giorgi Margvelashvili 95.68
Let say I want the 2008 election. So I did the following function :
def results_precinct_election(precinct,election_year):
df['Election'] = df['Election'].astype(int)
df_election = df.loc[df['Election'] == election_year]
x = df_election[["Christian-Democratic Movement","European Democrats","Free Georgia","Georgian Dream","Labour Party","New Right","United National Movement","others"]]
But I received :
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-203-317aa5d54b6d> in <module>
----> 1 x = results_precinct_election("63-1", 2008)
2 y = results_precinct_election("63-1", 2013)
3 random.seed(0)
4 beta = estimate_beta()
<ipython-input-202-1cd2d166f35a> in results_precinct_election(precinct, election)
1 # I want a line of a given precinct for a given election
2 def results_precinct_election(precinct,election):
----> 3 df['Election'] = df['Election'].astype(int)
4 df_election = df.loc[df['Election'] == election and df['Precinct Name'] == precinct]
5 x = df_election[["Christian-Democratic Movement","European Democrats","Free Georgia","Georgian Dream","Labour Party","New Right","United National Movement","others"]]
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\generic.py in astype(self, dtype, copy, errors, **kwargs)
5689 # else, only a single dtype is given
5690 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5691 **kwargs)
5692 return self._constructor(new_data).__finalize__(self)
5693
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\managers.py in astype(self, dtype, **kwargs)
529
530 def astype(self, dtype, **kwargs):
--> 531 return self.apply('astype', dtype=dtype, **kwargs)
532
533 def convert(self, **kwargs):
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\managers.py in apply(self, f, axes, filter, do_integrity_check, consolidate, **kwargs)
393 copy=align_copy)
394
--> 395 applied = getattr(b, f)(**kwargs)
396 result_blocks = _extend_blocks(applied, result_blocks)
397
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\blocks.py in astype(self, dtype, copy, errors, values, **kwargs)
532 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
533 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 534 **kwargs)
535
536 def _astype(self, dtype, copy=False, errors='raise', values=None,
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\internals\blocks.py in _astype(self, dtype, copy, errors, values, **kwargs)
631
632 # _astype_nansafe works fine with 1-d only
--> 633 values = astype_nansafe(values.ravel(), dtype, copy=True)
634
635 # TODO(extension)
C:\ProgramData\Anaconda3\lib\site-packages\pandas\core\dtypes\cast.py in astype_nansafe(arr, dtype, copy, skipna)
681 # work around NumPy brokenness, #1987
682 if np.issubdtype(dtype.type, np.integer):
--> 683 return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
684
685 # if we have a datetime/timedelta array of objects
pandas/_libs/lib.pyx in pandas._libs.lib.astype_intsafe()
ValueError: invalid literal for int() with base 10: '2008 Parliamentary - Majoritarian'
Problem is column contains some non numeric values.
Posible solutions is convert them to NaNs:
df['Election'] = pd.to_numeric(df['Election'], errors='coerce')
Or extract years - numbers, but if not exist are return again missing values:
df['Election'] = df['Election'].str.extract('(\d+)').astype(int)

DateTimeIndex.to_period raises a ValueError exception for many offset aliases

I am trying to solve a very simple problem, but am running into a wall.
I have a DateTimeIndex based on a simple dataframe like follows:
df=pd.DataFrame(
index=pd.date_range(
start='2017-01-01',
end='2017-03-04', closed=None),
data=np.arange(63), columns=['val']).rename_axis(index='date')
In [179]: df
Out[179]:
val
date
2017-01-01 0
2017-01-02 1
2017-01-03 2
2017-01-04 3
2017-01-05 4
... ...
2017-02-28 58
2017-03-01 59
2017-03-02 60
2017-03-03 61
2017-03-04 62
[63 rows x 1 columns]
I wish to summarize the values by periods of weekly, semi-monthly, monthly etc.
So I tried:
In [180]: df.to_period('W').groupby('date').sum()
Out[180]:
val
date
2016-12-26/2017-01-01 0
2017-01-02/2017-01-08 28
2017-01-09/2017-01-15 77
2017-01-16/2017-01-22 126
2017-01-23/2017-01-29 175
2017-01-30/2017-02-05 224
2017-02-06/2017-02-12 273
2017-02-13/2017-02-19 322
2017-02-20/2017-02-26 371
2017-02-27/2017-03-05 357
That works fine for offset aliases like Y, M, D, W, T, S, L, U, N.
But fails for SM, SMS and others listed here: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases
It raises a ValueError exception:
In [181]: df.to_period('SMS').groupby('date').sum()
--------------------------------------------------------------------------- KeyError Traceback (most recent call
last) pandas/_libs/tslibs/frequencies.pyx in
pandas._libs.tslibs.frequencies._period_str_to_code()
KeyError: 'SMS-15'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call
last) <ipython-input-181-6779559a0596> in <module>
----> 1 df.to_period('SMS').groupby('date').sum()
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/frame.py
in to_period(self, freq, axis, copy) 8350 axis =
self._get_axis_number(axis) 8351 if axis == 0:
-> 8352 new_data.set_axis(1, self.index.to_period(freq=freq)) 8353 elif axis == 1:
8354 new_data.set_axis(0,
self.columns.to_period(freq=freq))
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/accessor.py
in f(self, *args, **kwargs)
91 def _create_delegator_method(name):
92 def f(self, *args, **kwargs):
---> 93 return self._delegate_method(name, *args, **kwargs)
94
95 f.__name__ = name
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/indexes/datetimelike.py
in _delegate_method(self, name, *args, **kwargs)
811
812 def _delegate_method(self, name, *args, **kwargs):
--> 813 result = operator.methodcaller(name, *args, **kwargs)(self._data)
814 if name not in self._raw_methods:
815 result = Index(result, name=self.name)
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/arrays/datetimes.py
in to_period(self, freq) 1280 freq =
get_period_alias(freq) 1281
-> 1282 return PeriodArray._from_datetime64(self._data, freq, tz=self.tz) 1283 1284 def to_perioddelta(self, freq):
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/arrays/period.py
in _from_datetime64(cls, data, freq, tz)
273 PeriodArray[freq]
274 """
--> 275 data, freq = dt64arr_to_periodarr(data, freq, tz)
276 return cls(data, freq=freq)
277
~/.virtualenvs/py36/lib/python3.6/site-packages/pandas/core/arrays/period.py
in dt64arr_to_periodarr(data, freq, tz)
914 data = data._values
915
--> 916 base, mult = libfrequencies.get_freq_code(freq)
917 return libperiod.dt64arr_to_periodarr(data.view("i8"), base, tz), freq
918
pandas/_libs/tslibs/frequencies.pyx in
pandas._libs.tslibs.frequencies.get_freq_code()
pandas/_libs/tslibs/frequencies.pyx in
pandas._libs.tslibs.frequencies.get_freq_code()
pandas/_libs/tslibs/frequencies.pyx in
pandas._libs.tslibs.frequencies.get_freq_code()
pandas/_libs/tslibs/frequencies.pyx in
pandas._libs.tslibs.frequencies._period_str_to_code()
ValueError: Invalid frequency: SMS-15
I am using python 3.6.5, pandas version '0.25.1'
Use DataFrame.resample here:
print (df.resample('W').sum())
val
date
2017-01-01 0
2017-01-08 28
2017-01-15 77
2017-01-22 126
2017-01-29 175
2017-02-05 224
2017-02-12 273
2017-02-19 322
2017-02-26 371
2017-03-05 357
print (df.resample('SM').sum())
val
date
2016-12-31 91
2017-01-15 344
2017-01-31 555
2017-02-15 663
2017-02-28 300
print (df.resample('SMS').sum())
val
date
2017-01-01 91
2017-01-15 374
2017-02-01 525
2017-02-15 721
2017-03-01 242
Alternatives with groupby and Grouper:
print (df.groupby(pd.Grouper(freq='W')).sum())
print (df.groupby(pd.Grouper(freq='SM')).sum())
print (df.groupby(pd.Grouper(freq='SMS')).sum())

How to resample a Pandas multi-index data frame via methods depending on the column name

Here is a Pandas v0.14.0 data frame with multi-index columns.
> import pandas as pd
> import numpy as np
>
> rng = pd.date_range('1/1/2001', periods=6, freq='H')
> mi = [(dt, i) for dt in rng for i in range(2)]
> f = pd.DataFrame(np.random.randn(len(mi), 2),
> index = pd.MultiIndex.from_tuples(mi, names=['time', 'extra']),
columns =['A', 'B'])
> g = f.unstack('extra')
> g
A B
extra 0 1 0 1
time
2001-01-01 00:00:00 -0.169742 0.390842 -0.017884 1.043376
2001-01-01 01:00:00 -0.184442 -0.102512 -0.013702 0.675290
2001-01-01 02:00:00 0.244708 -0.360740 1.059269 -0.330537
2001-01-01 03:00:00 -2.275161 -1.782581 0.754368 -0.157851
2001-01-01 04:00:00 -0.554282 0.310691 0.917221 -0.114459
2001-01-01 05:00:00 0.599133 0.904824 1.858538 1.319041
I can resample g successfully using one method across all columns, e.g. by g.resample('6H', how=np.sum). How can i resample g with different methods for each column, e.g. by summing the 'A' columns and averaging the 'B' columns?
I tried the following, which works for non-multi-index columns, but got an error.
> g.resample('6H', how={'A': np.sum, 'B': np.mean})
KeyError Traceback (most recent call last)
<ipython-input-217-b1a72fd62178> in <module>()
4 g = f.unstack('extra')
5 print(g)
----> 6 g.resample('6H', how={'A': np.sum, 'B': np.mean})
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/generic.py in resample(self, rule, how, axis, fill_method, closed, label, convention, kind, loffset, limit, base)
2834 fill_method=fill_method, convention=convention,
2835 limit=limit, base=base)
-> 2836 return sampler.resample(self).__finalize__(self)
2837
2838 def first(self, offset):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/tseries/resample.py in resample(self, obj)
81
82 if isinstance(ax, DatetimeIndex):
---> 83 rs = self._resample_timestamps()
84 elif isinstance(ax, PeriodIndex):
85 offset = to_offset(self.freq)
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/tseries/resample.py in _resample_timestamps(self)
252 # downsample
253 grouped = obj.groupby(grouper, axis=self.axis)
--> 254 result = grouped.aggregate(self._agg_method)
255 else:
256 # upsampling shortcut
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in aggregate(self, arg, *args, **kwargs)
2402 colg = SeriesGroupBy(obj[col], selection=col,
2403 grouper=self.grouper)
-> 2404 result[col] = colg.aggregate(agg_how)
2405 keys.append(col)
2406
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in aggregate(self, func_or_funcs, *args, **kwargs)
2078 cyfunc = _intercept_cython(func_or_funcs)
2079 if cyfunc and not args and not kwargs:
-> 2080 return getattr(self, cyfunc)()
2081
2082 if self.grouper.nkeys > 1:
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in mean(self)
668 self._set_selection_from_grouper()
669 f = lambda x: x.mean(axis=self.axis)
--> 670 return self._python_agg_general(f)
671
672 def median(self):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in _python_agg_general(self, func, *args, **kwargs)
1012 # iterate through "columns" ex exclusions to populate output dict
1013 output = {}
-> 1014 for name, obj in self._iterate_slices():
1015 try:
1016 result, counts = self.grouper.agg_series(obj, f)
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in _iterate_slices(self)
650
651 def _iterate_slices(self):
--> 652 yield self.name, self._selected_obj
653
654 def transform(self, func, *args, **kwargs):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/lib.so in pandas.lib.cache_readonly.__get__ (pandas/lib.c:37563)()
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/groupby.py in _selected_obj(self)
461 return self.obj
462 else:
--> 463 return self.obj[self._selection]
464
465 def _set_selection_from_grouper(self):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/frame.py in __getitem__(self, key)
1682 return self._getitem_multilevel(key)
1683 else:
-> 1684 return self._getitem_column(key)
1685
1686 def _getitem_column(self, key):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/frame.py in _getitem_column(self, key)
1689 # get column
1690 if self.columns.is_unique:
-> 1691 return self._get_item_cache(key)
1692
1693 # duplicate columns & possible reduce dimensionaility
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/generic.py in _get_item_cache(self, item)
1050 res = cache.get(item)
1051 if res is None:
-> 1052 values = self._data.get(item)
1053 res = self._box_item_values(item, values)
1054 cache[item] = res
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/internals.py in get(self, item)
2535
2536 if not isnull(item):
-> 2537 loc = self.items.get_loc(item)
2538 else:
2539 indexer = np.arange(len(self.items))[isnull(self.items)]
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/core/index.py in get_loc(self, key)
1154 loc : int if unique index, possibly slice or mask if not
1155 """
-> 1156 return self._engine.get_loc(_values_from_object(key))
1157
1158 def get_value(self, series, key):
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/index.so in pandas.index.IndexEngine.get_loc (pandas/index.c:3650)()
/Users/araichev/anaconda/envs/py3k/lib/python3.3/site-packages/pandas/index.so in pandas.index.IndexEngine.get_loc (pandas/index.c:3577)()
KeyError: 'B'
If you start with f, you can use a groupby with a TimeGrouper to do the resample "manually":
In [11]: grp = f.groupby(pd.TimeGrouper('6H', level=0))
In [12]: grp['A'].sum()
Out[12]:
0
2001-01-01 -1.805954
Freq: 6H, Name: A, dtype: float64
In [13]: grp['B'].mean()
Out[13]:
0
2001-01-01 -0.461053
Freq: 6H, Name: B, dtype: float64
If you want to separate based on the extra add that to the groupby*:
In [21]: grp2 = f.groupby([pd.TimeGrouper('6H', level=0),
f.index.get_level_values('extra')])
In [22]: grp2['A'].sum()
Out[22]:
0 extra
2001-01-01 0 2.030321
1 -3.836275
Name: A, dtype: float64
In [23]: grp2['B'].mean()
Out[23]:
0 extra
2001-01-01 0 -0.554839
1 -0.367267
Name: B, dtype: float64
*Note: groupby with a column and a TimeGrouper won't work in versions prior to 0.14.
To get from g to f you can reshape with stack:
In [31]: f2 = g.stack(level=1) # Note: use stack to get f from g
and back from the results above to a similar format:
In [32]: pd.DataFrame({'A': grp['A'].sum(), 'B': grp['B'].mean()})
Out[32]:
A B
0 extra
2001-01-01 0 -2.762064 -0.269427
1 -2.006839 -0.026213
In [33]: _.unstack(level=1)
Out[33]:
A B
extra 0 1 0 1
0
2001-01-01 -2.762064 -2.006839 -0.269427 -0.026213
Another approach, which might be "simpler", in that you actually do the resample, is to make the dict from the columns:
In [41]: dict(zip(g.columns,
map({'A': 'sum', 'B': 'mean'}.get,
[x[0] for x in g.columns])))
Out[41]: {('A', 0): 'sum', ('A', 1): 'sum', ('B', 0): 'mean', ('B', 1): 'mean'}
In [42]: g.resample('6H', _)
Out[42]:
A B A B
1 0 0 1
time
2001-01-01 -3.836275 -0.554839 2.030321 -0.367267

Categories