Find begin and end index of consecutive ones in pandas dataframe - python

I have the following dataframe:
A B C
0 1 1 1
1 0 1 0
2 1 1 1
3 1 0 1
4 1 1 0
5 1 1 0
6 0 1 1
7 0 1 0
of which I want to know the start and end index when the values are 1 for 3 or more consecutive values per column. Desired outcome:
Column From To
A 2 5
B 1 3
B 4 7
first I filter out the ones that are not consecutive for 3 or more values
filtered_df = df.copy().apply(filter, threshold=3)
where
def filter(col, threshold=3):
mask = col.groupby((col != col.shift()).cumsum()).transform('count').lt(threshold)
mask &= col.eq(1)
col.update(col.loc[mask].replace(1,0))
return col
filtered_df now look as:
A B C
0 0 1 0
1 0 1 0
2 1 1 0
3 1 0 0
4 1 1 0
5 1 1 0
6 0 1 0
7 0 1 0
If the dataframe would have only one column with zeros and ones the result could be achieved as in How to use pandas to find consecutive same data in time series. However, I am struggeling to do something similar for multiple columns at once.

Use DataFrame.pipe for apply function for all DataFrame.
In first solution get first and last value of consecutive 1 per each columns, add output to lists and last concat:
def f(df, threshold=3):
out = []
for col in df.columns:
m = df[col].eq(1)
g = (df[col] != df[col].shift()).cumsum()[m]
mask = g.groupby(g).transform('count').ge(threshold)
filt = g[mask].reset_index()
output = filt.groupby(col)['index'].agg(['first','last'])
output.insert(0, 'col', col)
out.append(output)
return pd.concat(out, ignore_index=True)
Or first reshape by unstack and then apply solution:
def f(df, threshold=3):
df1 = df.unstack().rename_axis(('col','idx')).reset_index(name='val')
m = df1['val'].eq(1)
g = (df1['val'] != df1.groupby('col')['val'].shift()).cumsum()
mask = g.groupby(g).transform('count').ge(threshold) & m
return (df1[mask].groupby([df1['col'], g])['idx']
.agg(['first','last'])
.reset_index(level=1, drop=True)
.reset_index())
filtered_df = df.pipe(f, threshold=3)
print (filtered_df)
col first last
0 A 2 5
1 B 0 2
2 B 4 7
filtered_df = df.pipe(f, threshold=2)
print (filtered_df)
col first last
0 A 2 5
1 B 0 2
2 B 4 7
3 C 2 3

You can use rolling to create a window over the data frame. Then you can apply all your conditions and shift the window back to its start location:
length = 3
window = df.rolling(length)
mask = (window.min() == 1) & (window.max() == 1)
mask = mask.shift(1 - length)
print(mask)
which prints:
A B C
0 False True False
1 False False False
2 True False False
3 True False False
4 False True False
5 False True False
6 NaN NaN NaN
7 NaN NaN NaN

Related

Comparing the value of a column with the previous value of a new column using Apply in Python (Pandas)

I have a dataframe with these values in column A:
df = pd.DataFrame(A,columns =['A'])
A
0 0
1 5
2 1
3 7
4 0
5 2
6 1
7 3
8 0
I need to create a new column (called B) and populate it using next conditions:
Condition 1: If the value of A is equal to 0 then, the value of B must be 0.
Condition 2: If the value of A is not 0 then I compare its value to the previous value of B. If A is higher than the previous value of B then I take A, otherwise I take B.
The result should be this:
A B
0 0 0
1 5 5
2 1 5
3 7 7
4 0 0
5 2 2
6 1 2
7 3 3
The dataset is huge and using loops would be too slow. I would need to solve this without using loops and the pandas “Loc” function. Anyone could help me to solve this using the Apply function? I have tried different things without success.
Thanks a lot.
One way to do this I guess could be the following
def do_your_stuff(row):
global value
# fancy stuff here
value = row["b"]
[...]
value = df.iloc[0]['B']
df["C"] = df.apply(lambda row: do_your_stuff(row), axis=1)
Try this:
df['B'] = df['A'].shift()
df['B'] = df.apply(lambda x:0 if x.A == 0 else x.A if x.A > x.B else x.B, axis=1)
Use .shift() to shift your one cell down and check if the previous value is smaller and it is not 0. Then use .mask() to replace the values with the previous if the condition stands.
from io import StringIO
import pandas as pd
wt = StringIO("""A
0 0
1 2
2 3
3 1
4 2
5 7
6 0
""")
df = pd.read_csv(wt, sep='\s\s+')
df
A
0 0
1 2
2 3
3 1
4 2
5 7
6 0
def func(df, col):
df['B'] = df[col].mask(cond=((df[col].shift(1) > df[col]) & (df[col] != 0)), other=df[col].shift(1))
if col == 'B':
while ((df[col].shift(1) > df[col]) & (df[col] != 0)).any():
df['B'] = df[col].mask(cond=((df[col].shift(1) > df[col]) & (df[col] != 0)), other=df[col].shift(1))
return df
(df.pipe(func, 'A').pipe(func, 'B'))
Output:
A B
0 0 0
1 2 2
2 3 3
3 1 3
4 2 3
5 7 7
6 0 0
Using the solution of Achille I solved it this way:
import pandas as pd
A = [0,2,3,0,2,7,2,3,2,20,1,0,2,5,4,3,1]
df = pd.DataFrame(A,columns =['A'])
df['B'] = 0
def function(row):
global value
global prev
if row['A'] ==0:
value = 0
elif row['A'] > value:
value = row['A']
else:
value = prev
prev = value
return value
value = df.iloc[0]['B']
prev = value
df["B"] = df.apply(lambda row: function(row), axis=1)
df
output:
A B
0 0 0
1 2 2
2 3 3
3 0 0
4 2 2
5 7 7
6 2 7
7 3 7
8 2 7
9 20 20
10 1 20
11 0 0
12 2 2
13 5 5
14 4 5
15 3 5
16 1 5

Select rows in pandas where any of six column are not all zero

Here's what the pandas table look like:
As you can see the red marked rows have any of all six column values set to '0'. I want to select only non-red rows and filter these red ones out.
I can't seem to figure if there's in-built or, easy way to do it.
Use a boolean mask as suggested by #Ch3steR and use .iloc or .loc to select a subset of columns:
# Minimal sample
>>> df
A B C D E F G H I J
0 4 0 0 0 0 0 0 1 3 2 # Drop
1 4 6 4 0 0 0 0 0 0 0 # Keep
# .iloc version: select the first 7 columns
>>> df[df.iloc[:, :7].eq(0).sum(1).lt(6)]
A B C D E F G H I J
1 4 6 4 0 0 0 0 0 0 0
# .loc version: select columns from A to G
>>> df[df.loc[:, 'A':'G'].eq(0).sum(1).lt(6)]
A B C D E F G H I J
1 4 6 4 0 0 0 0 0 0 0
Step by Step:
# Is value equal to 0
>>> df.loc[:, 'A':'G'].eq(0)
A B C D E F G
0 False True True True True True True
1 False False False True True True True
# Sum of boolean, if there are 3 True, the sum will be 3
# sum(1) <- 1 is for axis, the sum per row
>>> df.loc[:, 'A':'G'].eq(0).sum(1)
0 6 # 6 zeros
1 4 # 4 zeros
dtype: int64
# Are there less than 6 zeros ?
>>> df.loc[:, 'A':'G'].eq(0).sum(1).lt(6)
0 False
1 True
dtype: bool
# If yes, I keep row else I drop it
>>> df[df.loc[:, 'A':'G'].eq(0).sum(1).lt(6)]
A B C D E F G H I J
1 4 6 4 0 0 0 0 0 0 0

Concatenate column names by using the binary values in the columns

Currently, I have a dataframe as follows:
date A B C
02/19/2020 0 0 0
02/20/2020 0 0 0
02/21/2020 1 1 1
02/22/2020 0 1 0
02/23/2020 0 1 1
02/24/2020 0 0 1
02/25/2020 1 0 1
02/26/2020 1 0 0
The binary columns contain integers. The "date" column is a DateTime object. I want to create a new categorical column that is based on the binary columns as follows
date A B C new
02/19/2020 0 0 0 "None"
02/20/2020 0 0 0 "None"
02/21/2020 1 1 1 A+B+C
02/22/2020 0 1 0 B
02/23/2020 0 1 1 B+C
02/24/2020 0 0 1 C
02/25/2020 1 0 1 A+C
02/26/2020 1 0 0 A
How can I achieve this?
Use DataFrame.dot for matrix multiplication with columns names with omit first column by position in DataFrame.iloc, add separator to columns names without first and last remove separator by indexing str[:-1]:
df['new'] = df.iloc[:, 1:].dot(df.columns[1:] + '+').str[:-1]
#set empty string to None
df.loc[df['new'].eq(''), 'new'] = None
print (df)
date A B C new
0 02/19/2020 0 0 0 None
1 02/20/2020 0 0 0 None
2 02/21/2020 1 1 1 A+B+C
3 02/22/2020 0 1 0 B
4 02/23/2020 0 1 1 B+C
5 02/24/2020 0 0 1 C
6 02/25/2020 1 0 1 A+C
7 02/26/2020 1 0 0 A
If possible use NaNs instead Nones:
df['new'] = df.iloc[:, 1:].dot(df.columns[1:] + '+').str[:-1].replace('', np.nan)
print (df)
date A B C new
0 02/19/2020 0 0 0 NaN
1 02/20/2020 0 0 0 NaN
2 02/21/2020 1 1 1 A+B+C
3 02/22/2020 0 1 0 B
4 02/23/2020 0 1 1 B+C
5 02/24/2020 0 0 1 C
6 02/25/2020 1 0 1 A+C
7 02/26/2020 1 0 0 A
Or if possible set first column to DatetimeIndex use:
df1 = df.set_index('date')
df1['new'] = df1.dot(df1.columns + '+').str[:-1]
df1.loc[df1['new'].eq(''), 'new'] = None
You can iterate over the Dataframe to calculate the new columns values and then add it.
This is a basic example
new_column = []
for i, row in df.iterrows():
row_val = None
if row["A"]:
if row_val:
row_val += "+A"
else:
row_val = "A"
if row["B"]:
if row_val:
row_val += "+B"
else:
row_val = "B"
if row["C"]:
if row_val:
row_val += "+C"
else:
row_val = "C"
if row_val is None:
row_val = "None"
new_column.append(row_val)
df["new_column_name"] = new_column

Creating a matrix of 0 and 1 that maintains its shape

I am attempting to create a matrix of 1 if every 2nd column value is greater than the previous column value and 0s if less, when I use np.where it just flattens it I want to keep the first column and the last column and it shape.
df = pd.DataFrame(np.random.randn(8, 4),columns=['A', 'B', 'C', 'D'])
newd=pd.DataFrame()
for x in df.columns[1::2]:
if bool(df.iloc[:,df.columns.get_loc(x)] <=
df.iloc[:,df.columns.get_loc(x)-1]):
newdf.append(1)
else:newdf.append(0)
This question was a little vague, but I will answer a question that I think gets at the heart of what you are asking:
Say you start with a matrix:
df1 = pd.DataFrame(np.random.randn(8, 4),columns=['A', 'B', 'C', 'D'])
Which creates:
A B C D
0 2.464130 0.796172 -1.406528 0.332499
1 -0.370764 -0.185119 -0.514149 0.158218
2 -2.164707 0.888354 0.214550 1.334445
3 2.019189 0.910855 0.582508 -0.861778
4 1.574337 -1.063037 0.771726 -0.196721
5 1.091648 0.407703 0.406509 -1.052855
6 -1.587963 -1.730850 0.168353 -0.899848
7 0.225723 0.042629 2.152307 -1.086585
Now you can use pd.df.shift() to shift the entire matrix, and then check the resulting columns item by item in one step. For example:
df1.shift(1)
Creates:
A B C D
0 -0.370764 -0.185119 -0.514149 0.158218
1 -2.164707 0.888354 0.214550 1.334445
2 2.019189 0.910855 0.582508 -0.861778
3 1.574337 -1.063037 0.771726 -0.196721
4 1.091648 0.407703 0.406509 -1.052855
5 -1.587963 -1.730850 0.168353 -0.899848
6 0.225723 0.042629 2.152307 -1.086585
7 NaN NaN NaN NaN
And now you can check the resulting columns with a new matrix as so:
df2 = df1.shift(-1) > df1
which returns:
A B C D
0 False False True False
1 False True True True
2 True True True False
3 False False True True
4 False True False False
5 False False False True
6 True True True False
7 False False False False
To complete your question, we convert the True/False to 1/0 as such:
df2 = df2.applymap(lambda x: 1 if x == True else 0)
Which returns:
A B C D
0 0 0 1 0
1 0 1 1 1
2 1 1 1 0
3 0 0 1 1
4 0 1 0 0
5 0 0 0 1
6 1 1 1 0
7 0 0 0 0
In one line:
df2 = (df1.shift(-1)>df1).replace({True:1,False:0})

Pandas: How to add number of the row within grouped rows

so I have DataFrame:
>>> df2
text
0 0 a
0 1 b
0 2 c
0 3 d
1 4 e
1 5 f
1 6 g
2 7 h
2 8 1
How do I create another column, which contains counter for each row within an level=0 index?
I have tried the following code (i need to get df['counter'] column):
current_index = ''
for index, row in df.iterrows():
if index[0] != current_index:
current_index = index[0]
df[(df.index == current_index)]['counter'] = np.arange(len(df[(df.index == current_index)].index))
and following code as well:
df2 = pd.DataFrame()
for group, df in df1.groupby('level_0_column'):
df0 = df0.sort_values(by=['level_1_column'])
df['counter'] = list(df.reset_index().index.values + 1)
df2 = df2.append(df0)
I have around 650K rows in DataFrame... goes to infinite loop. Please advice
I believe you're looking for groupby along the 0th column index + cumcount:
df['counter'] = df.groupby(level=0).cumcount() + 1
df
text counter
0 0 a 1
1 b 2
2 c 3
3 d 4
1 4 e 1
5 f 2
6 g 3
2 7 h 1
8 1 2

Categories