Different results from interpolation if (same data) is done with timeindex - python

I get different results from interpolation if (same data) is done with timeindex, how can that be?
On pandas docs it says:
The ‘krogh’, ‘piecewise_polynomial’, ‘spline’, ‘pchip’ and ‘akima’ methods
are wrappers around the respective SciPy implementations of similar names.
These use the actual numerical values of the index. For more information
on their behavior, see the SciPy documentation and SciPy tutorial.
the sub-methods in interpolation( method= ...), where i noticed this strange behavior are (among others):
['krogh', 'spline', 'pchip', 'akima', 'cubicspline']
reproducable sample (with comparison):
import numpy as np , pandas as pd
from math import isclose
# inputs:
no_timeindex = False # reset both dataframes indices to numerical indices # for comparison.
no_timeindex_for_B = True # reset only dataframe indices of the first approach to numerical indices, the other one stays datetime, for comparison.
holes = True # create date-timeindex that skips the timestamps, that would normally be at location 6,7,12, 14, 17, instead of a perfectly frequent one.
o_ = 2 # order parameter for interpolation.
method_ = 'cubicspline'
#------------------+
n = np.nan
arr = [n,n,10000000000 ,10,10,10000,10,10, 10,40,4,4,9,4,4,n,n,n,4,4,4,4,4,4,18,400000000,4,4,4,n,n,n,n,n,n,n,4,4,4,5,6000000000,4,5,4,5,4,3,n,n,n,n,n,n,n,n,n,n,n,n,n,4,n,n,n,n,n,n,n,n,n,n,n,n,n,n,2,n,n,n,10,1000000000,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,n,1,n,n,n,n,n,n,n,n,n]
#--------------------------------------------------------------------------------+
df = pd.DataFrame(arr) # create dataframe from array.
if holes: # create a date-timeindex that skips the timestamps, that would normally be at location 6,7,12, 14, 17.
ix = pd.date_range("01.01.2000", periods = len(df)+(2 +5), freq="T")[2:]
to_drop = [ix[6],ix[7],ix[12],ix[14],ix[17]]
ix = ix.drop( to_drop)
df.index = ix
else: # create a perfectly frequent datetime-index without any holes.
ix = pd.date_range("01.01.2000", periods = len(df)+2, freq="T")[2:]
df.index = ix
# if wanted, drop timeindex and set it to integer indices later
if no_timeindex == True:
df.reset_index( inplace=True, drop=True )
df = df.interpolate(method=method_, order=o_, limit_area = 'inside') # interpolate.
df.index = ix # set index equal to the second approach, for comparing later.
A = df.copy(deep=True) # create a copy, to compare result with second approach later.
#------------------------------+
# second approach with numerical index instead of index-wise
df = pd.DataFrame(arr) # create dataframe from array.
if holes: # create a date-timeindex that skips the timestamps, that would normally be at location 6,7,12, 14, 17.
ix = pd.date_range("01.01.2000", periods = len(df)+(2 +5), freq="T")[2:]
to_drop = [ix[6],ix[7],ix[12],ix[14],ix[17]]
ix = ix.drop( to_drop)
df.index = ix
else: # create a perfectly frequent datetime-index without any holes.
ix = pd.date_range("01.01.2000", periods = len(df)+2, freq="T")[2:]
df.index = ix
# if wanted, drop timeindex and set it to integer indices later
if no_timeindex == True or no_timeindex_for_B == True:
df.reset_index(inplace=True, drop=True)
df = df.interpolate(method=method_, order=o_, limit_area = 'inside') # interpolate.
df.index = ix # set index equal to the first approach, for comparing later.
B = df.copy(deep=True) # create a copy, to compare result with second approach later.
#--------------------------------------------------------------------------------+
# compare:
if A.equals(B)==False:
# if values arent equal, count the ones that arent.
i=0
for x,y in zip( A[A.columns[0]], B[B.columns[0]]):
if x!=y and not (np.isnan(x) and np.isnan(y) ) :
print( x, " ?= ", y," ", (x==y), abs(x-y))
i+=1
# if theres no different values, ...
if i==0: print(" both are the same. ")
else: # if theres different values, ...
# count those different values, that are NOT almost the same.
not_almost = 0
for x,y in zip( A[A.columns[0]], B[B.columns[0]]):
if not (np.isnan(x) and np.isnan(y) ) :
if isclose(x,y, abs_tol=0.000001) == False:
not_almost+=1
# if all values are almost the same, ...
if not_almost == 0: print(" both are not, but almost the same. ")
else: print(" both are definetly not the same. ")
else: print(" both are the same. ")
This shouldnt be the case, since the pandas docs state different. Why does it happen anyways?

Related

Find minima and maxima of DataFrame by chronological order

I have a pandas data frame where I extract minima and extrema values. It work good so far, but the problem is how can I place them by Date (chronological order) into a list? They are separated into two list and I only want one price values list with them being in chronological order
import pandas as pd
import numpy as np
import yfinance
from scipy.signal import argrelextrema
import matplotlib.dates as mpl_dates
def extract_data():
ticker = 'GBPJPY=X'
ticker = yfinance.Ticker(ticker)
start_date = '2022-09-25'
end_date = '2022-10-08'
df = ticker.history(interval='1h', start=start_date, end=end_date)
df['Date'] = pd.to_datetime(df.index)
df['Date'] = df['Date'].apply(mpl_dates.date2num)
df = df.loc[:, ['Date', 'Open', 'High', 'Low', 'Close']]
# Call function to find Min-Max Extrema
find_extrema(df)
def find_extrema(df):
n = 10 # number of points to be checked before and after
# Find local peaks
df['min'] = df.iloc[argrelextrema(df.Close.values, np.less_equal,
order=n)[0]]['Close']
df['max'] = df.iloc[argrelextrema(df.Close.values, np.greater_equal,
order=n)[0]]['Close']
min_values_list = []
max_values_list = []
# Add min value to list
for item in df['min']:
check_NaN = np.isnan(item) # check if values is empty
if check_NaN == True:
pass
else:
min_values_list.append(item)
# Add max values to list
for item in df['max']:
check_NaN = np.isnan(item) # check if values is empty
if check_NaN == True:
pass
else:
max_values_list.append(item)
print(f"Min: {min_values_list}")
print(f"Max: {max_values_list}")
extract_data()
Option 1
First, use df.to_numpy to convert columns min and max to a np.array.
Get rid of all the NaN values by selecting from the array using np.logical_or applied to a boolean mask (created with np.isnan).
arr = df[['min','max']].to_numpy()
value_list = arr[np.logical_not(np.isnan(arr))].tolist()
print(value_list)
[159.7030029296875,
154.8979949951172,
160.7830047607422,
165.43800354003906,
149.55799865722656,
162.80499267578125,
156.6529998779297,
164.31900024414062,
156.125,
153.13499450683594,
161.3520050048828,
156.9340057373047,
162.52200317382812,
155.7740020751953,
160.98500061035156,
161.83700561523438]
Option 2
Rather more cumbersome:
n = 10
# get the indices for `min` and `max` in two arrays
_min = argrelextrema(df.Close.values, np.less_equal, order=n)[0]
_max = argrelextrema(df.Close.values, np.greater_equal, order=n)[0]
# create columns (assuming you need this for other purposes as well)
df['min'] = df.iloc[_min]['Close']
df['max'] = df.iloc[_max]['Close']
# create lists for `min` and `max`
min_values_list = df['min'].dropna().tolist()
max_values_list = df['max'].dropna().tolist()
# join the lists
value_list2 = min_values_list + max_values_list
value_idxs = _min.tolist() + _max.tolist()
# finally, sort `value_list2` based on `value_idxs`
value_list2 = [x for _, x in sorted(zip(value_idxs, value_list2))]
# check if result is the same:
value_list2 == value_list
# True
Assuming that you have max and min columns, what about something like this?
df['max_or_min'] = np.where(df['max'].notna(), df['max'], df['min'])
min_max_values = df['max_or_min'].dropna().values.tolist()

python - "merge based on a partial match" - Improving performance of function

I have the below script - which aims to create a "merge based on a partial match" functionality since this is not possible with the normal .merge() funct to the best of my knowledge.
The below works / returns the desired result, but unfortunately, it's incredibly slow to the point that it's almost unusable where I need it.
Been looking around at other Stack Overflow posts that contain similar problems, but haven't yet been able to find a faster solution.
Any thoughts on how this could be accomplished would be appreciated!
import pandas as pd
df1 = pd.DataFrame([ 'https://wwww.example.com/hi', 'https://wwww.example.com/tri', 'https://wwww.example.com/bi', 'https://wwww.example.com/hihibi' ]
,columns = ['pages']
)
df2 = pd.DataFrame(['hi','bi','geo']
,columns = ['ngrams']
)
def join_on_partial_match(full_values=None, matching_criteria=None):
# Changing columns name with index number
full_values.columns.values[0] = "full"
matching_criteria.columns.values[0] = "ngram_match"
# Creating matching column so all rows match on join
full_values['join'] = 1
matching_criteria['join'] = 1
dfFull = full_values.merge(matching_criteria, on='join').drop('join', axis=1)
# Dropping the 'join' column we created to join the 2 tables
matching_criteria = matching_criteria.drop('join', axis=1)
# identifying matching and returning bool values based on whether match exists
dfFull['match'] = dfFull.apply(lambda x: x.full.find(x.ngram_match), axis=1).ge(0)
# filtering dataset to only 'True' rows
final = dfFull[dfFull['match'] == True]
final = final.drop('match', axis=1)
return final
join = join_on_partial_match(full_values=df1,matching_criteria=df2)
print(join)
>> full ngram_match
0 https://wwww.example.com/hi hi
7 https://wwww.example.com/bi bi
9 https://wwww.example.com/hihibi hi
10 https://wwww.example.com/hihibi bi
For anyone who is interested - ended up figuring out 2 ways to do this.
First returns all matches (i.e., it duplicates the input value and matches with all partial matches)
Only returns the first match.
Both are extremely fast. Just ended up using a pretty simple masking script
def partial_match_join_all_matches_returned(full_values=None, matching_criteria=None):
"""The partial_match_join_first_match_returned() function takes two series objects and returns a dataframe with all matching values (duplicating the full value).
Args:
full_values = None: This is the series that contains the full values for matching pair.
partial_values = None: This is the series that contains the partial values for matching pair.
Returns:
A dataframe with 2 columns - 'full' and 'match'.
"""
start_join1 = time.time()
matching_criteria = matching_criteria.to_frame("match")
full_values = full_values.to_frame("full")
full_values = full_values.drop_duplicates()
output=[]
for n in matching_criteria['match']:
mask = full_values['full'].str.contains(n, case=False, na=False)
df = full_values[mask]
df_copy = df.copy()
df_copy['match'] = n
# df = df.loc[n, 'match']
output.append(df_copy)
final = pd.concat(output)
end_join1 = (time.time() - start_join1)
end_join1 = str(round(end_join1, 2))
len_join1 = len(final)
return final
def partial_match_join_first_match_returned(full_values=None, matching_criteria=None):
"""The partial_match_join_first_match_returned() function takes two series objects and returns a dataframe with the first matching value.
Args:
full_values = None: This is the series that contains the full values for matching pair.
partial_values = None: This is the series that contains the partial values for matching pair.
Returns:
A dataframe with 2 columns - 'full' and 'match'.
"""
start_singlejoin = time.time()
matching_criteria = matching_criteria.to_frame("match")
full_values = full_values.to_frame("full").drop_duplicates()
output=[]
for n in matching_criteria['match']:
mask = full_values['full'].str.contains(n, case=False, na=False)
df = full_values[mask]
df_copy = df.copy()
df_copy['match'] = n
# leaves us with only the 1st of each URL
df_copy.drop_duplicates(subset=['full'])
output.append(df_copy)
final = pd.concat(output)
end_singlejoin = (time.time() - start_singlejoin)
end_singlejoin = str(round(end_singlejoin, 2))
len_singlejoin = len(final)
return final

How to fill missing date in timeSeries

Here's what my data looks like:
There are daily records, except for a gap from 2017-06-12 to 2017-06-16.
df2['timestamp'] = pd.to_datetime(df['timestamp'])
df2['timestamp'] = df2['timestamp'].map(lambda x:
datetime.datetime.strftime(x,'%Y-%m-%d'))
df2 = df2.convert_objects(convert_numeric = True)
df2 = df2.groupby('timestamp', as_index = False).sum()
I need to fill this missing gap and others with values for all fields (e.g. timestamp, temperature, humidity, light, pressure, speed, battery_voltage, etc...).
How can I accomplish this with Pandas?
This is what I have done before
weektime = pd.date_range(start = '06/04/2017', end = '12/05/2017', freq = 'W-SUN')
df['week'] = 'nan'
df['weektemp'] = 'nan'
df['weekhumidity'] = 'nan'
df['weeklight'] = 'nan'
df['weekpressure'] = 'nan'
df['weekspeed'] = 'nan'
df['weekbattery_voltage'] = 'nan'
for i in range(0,len(weektime)):
df['week'][i+1] = weektime[i]
df['weektemp'][i+1] = df['temperature'].iloc[7*i+1:7*i+7].sum()
df['weekhumidity'][i+1] = df['humidity'].iloc[7*i+1:7*i+7].sum()
df['weeklight'][i+1] = df['light'].iloc[7*i+1:7*i+7].sum()
df['weekpressure'][i+1] = df['pressure'].iloc[7*i+1:7*i+7].sum()
df['weekspeed'][i+1] = df['speed'].iloc[7*i+1:7*i+7].sum()
df['weekbattery_voltage'][i+1] =
df['battery_voltage'].iloc[7*i+1:7*i+7].sum()
i = i + 1
The value of sum is not correct. Cause the value of 2017-06-17 is a sum of 2017-06-12 to 2017-06-16. I do not want to add them again. This gap is not only one gap in the period. I want to fill all of them.
Here is a function I wrote that might be helpful to you. It looks for inconsistent jumps in time and fills them in. After using this function, try using a linear interpolation function (pandas has a good one) to fill in your null data values. Note: Numpy arrays are much faster to iterate over and manipulate than Pandas dataframes, which is why I switch between the two.
import numpy as np
import pandas as pd
data_arr = np.array(your_df)
periodicity = 'daily'
def fill_gaps(data_arr, periodicity):
rows = data_arr.shape[0]
data_no_gaps = np.copy(data_arr) #avoid altering the thing you're iterating over
data_no_gaps_idx = 0
for row_idx in np.arange(1, rows): #iterate once for each row (except the first record; nothing to compare)
oldtimestamp_str = str(data_arr[row_idx-1, 0])
oldtimestamp = np.datetime64(oldtimestamp_str)
currenttimestamp_str = str(data_arr[row_idx, 0])
currenttimestamp = np.datetime64(currenttimestamp_str)
period = currenttimestamp - oldtimestamp
if period != np.timedelta64(900,'s') and period != np.timedelta64(3600,'s') and period != np.timedelta64(86400,'s'):
if periodicity == 'quarterly':
desired_period = 900
elif periodicity == 'hourly':
desired_period = 3600
elif periodicity == 'daily':
desired_period = 86400
periods_missing = int(period / np.timedelta64(desired_period,'s'))
for missing in np.arange(1, periods_missing):
new_time_orig = str(oldtimestamp + missing*(np.timedelta64(desired_period,'s')))
new_time = new_time_orig.replace('T', ' ')
data_no_gaps = np.insert(data_no_gaps, (data_no_gaps_idx + missing),
np.array((new_time, np.nan, np.nan, np.nan, np.nan, np.nan)), 0) # INSERT VALUES YOU WANT IN THE NEW ROW
data_no_gaps_idx += (periods_missing-1) #incriment the index (zero-based => -1) in accordance with added rows
data_no_gaps_idx += 1 #allow index to change as we iterate over original data array (main for loop)
#create a dataframe:
data_arr_no_gaps = pd.DataFrame(data=data_no_gaps, index=None,columns=['Time', 'temp', 'humidity', 'light', 'pressure', 'speed'])
return data_arr_no_gaps
Fill time gaps and nulls
Use the function below to ensure expected date sequence exists, and then use forward fill to fill in nulls.
import pandas as pd
import os
def fill_gaps_and_nulls(df, freq='1D'):
'''
General steps:
A) check for extra dates (out of expected frequency/sequence)
B) check for missing dates (based on expected frequency/sequence)
C) use forwardfill to fill nulls
D) use backwardfill to fill remaining nulls
E) append to file
'''
#rename the timestamp to 'date'
df.rename(columns={"timestamp": "date"})
#sort to make indexing faster
df = df.sort_values(by=['date'], inplace=False)
#create an artificial index of dates at frequency = freq, with the same beginning and ending as the original data
all_dates = pd.date_range(start=df.date.min(), end=df.date.max(), freq=freq)
#record column names
df_cols = df.columns
#delete ffill_df.csv so we can begin anew
try:
os.remove('ffill_df.csv')
except FileNotFoundError:
pass
#check for extra dates and/or dates out of order. print warning statement for log
extra_dates = set(df.date).difference(all_dates)
#if there are extra dates (outside of expected sequence/frequency), deal with them
if len(extra_dates) > 0:
#############################
#INSERT DESIRED BEHAVIOR HERE
print('WARNING: Extra date(s):\n\t{}\n\t Shifting highlighted date(s) back by 1 day'.format(extra_dates))
for date in extra_dates:
#shift extra dates back one day
df.date[df.date == date] = date - pd.Timedelta(days=1)
#############################
#check the artificial date index against df to identify missing gaps in time and fill them with nulls
gaps = all_dates.difference(set(df.date))
print('\n-------\nWARNING: Missing dates: {}\n-------\n'.format(gaps))
#if there are time gaps, deal with them
if len(gaps) > 0:
#initialize df of correct size, filled with nulls
gaps_df = pd.DataFrame(index=gaps, columns=df_cols.drop('date')) #len(index) sets number of rows
#give index a name
gaps_df.index.name = 'date'
#add the region and type
gaps_df.region = r
gaps_df.type = t
#remove that index so gaps_df and df are compatible
gaps_df.reset_index(inplace=True)
#append gaps_df to df
new_df = pd.concat([df, gaps_df])
#sort on date
new_df.sort_values(by='date', inplace=True)
#fill nulls
new_df.fillna(method='ffill', inplace=True)
new_df.fillna(method='bfill', inplace=True)
#append to file
new_df.to_csv('ffill_df.csv', mode='a', header=False, index=False)
return df_cols, regions, types, all_dates

How to time-efficiently remove values next to 'NaN' values?

I'm trying to remove wrong values form my data (a series of 15mln values, 700MB). The values to be removed are values next to 'nan' values, e.g.:
Series: /1/,nan,/2/,3,/4/,nan,nan,nan,/8/,9
Numbers surrounded by slashes i.e. /1/,/2/,/4/,/8/ are values, which should be removed.
The problem is that it takes way too long to compute that with the following code that I have:
%%time
import numpy as np
import pandas as pd
# sample data
speed = np.random.uniform(0,25,15000000)
next_speed = speed[1:]
# create a dataframe
data_dict = {'speed': speed[:-1],
'next_speed': next_speed}
df = pd.DataFrame(data_dict)
# calculate difference between the current speed and the next speed
list_of_differences = []
for i in df.index:
difference = df.next_speed[i]-df.speed[i]
list_of_differences.append(difference)
df['difference'] = list_of_differences
# add 'nan' to data in form of a string.
for i in range(len(df.difference)):
# arbitrary condition
if df.difference[i] < -2:
df.difference[i] = 'nan'
#########################################
# THE TIME-INEFFICIENT LOOP
# remove wrong values before and after 'nan'.
for i in range(len(df)):
# check if the value is a number to skip computations of the following "if" cases
if not(isinstance(df.difference[i], str)):
continue
# case 1: where there's only one 'nan' surrounded by values.
# Without this case the algo will miss some wrong values because 'nan' will be removed
# Example of a series: /1/,nan,/2/,3,4,nan,nan,nan,8,9
# A number surrounded by slashes e.g. /1/ is a value to be removed
if df.difference[i] == 'nan' and df.difference[i-1] != 'nan' and df.difference[i+1] != 'nan':
df.difference[i-1]= 'wrong'
df.difference[i+1]= 'wrong'
# case 2: where the following values are 'nan': /1/, nan, nan, 4
# E.g.: /1/, nan,/2/,3,/4/,nan,nan,nan,8,9
elif df.difference[i] == 'nan' and df.difference[i+1] == 'nan':
df.difference[i-1]= 'wrong'
# case 3: where next value is NOT 'nan' wrong, nan,nan,4
# E.g.: /1/, nan,/2/,3,/4/,nan,nan,nan,/8/,9
elif df.difference[i] == 'nan' and df.difference[i+1] != 'nan':
df.difference[i+1]= 'wrong'
How to make it more time-efficient?
This is still a work in progress for me. I knocked 100x off your dummy data size to get down to something I could stand to wait for.
I also added this code at the top of my version:
import time
current_milli_time = lambda: int(round(time.time() * 1000))
def mark(s):
print("[{}] {}".format(current_milli_time()/1000, s))
This just prints a string with a time-mark in front of it, to see what's taking so long.
With that done, in your 'difference' column computation, you can replace the manual list generation with a vector operation. This code:
df = pd.DataFrame(data_dict)
mark("Got DataFrame")
# calculate difference between the current speed and the next speed
list_of_differences = []
for i in df.index:
difference = df.next_speed[i]-df.speed[i]
list_of_differences.append(difference)
df['difference'] = list_of_differences
mark("difference 1")
df['difference2'] = df['next_speed'] - df['speed']
mark('difference 2')
print(df[:10])
Produces this output:
[1490943913.921] Got DataFrame
[1490943922.094] difference 1
[1490943922.096] difference 2
next_speed speed difference difference2
0 18.008314 20.182982 -2.174669 -2.174669
1 14.736095 18.008314 -3.272219 -3.272219
2 5.352993 14.736095 -9.383102 -9.383102
3 5.854199 5.352993 0.501206 0.501206
4 2.003826 5.854199 -3.850373 -3.850373
5 12.736061 2.003826 10.732236 10.732236
6 2.512623 12.736061 -10.223438 -10.223438
7 18.224716 2.512623 15.712093 15.712093
8 14.023848 18.224716 -4.200868 -4.200868
9 15.991590 14.023848 1.967741 1.967741
Notice that the two difference columns are the same, but the second version took about 8 seconds less time. (Presumably 800 seconds when you have 100x more data.)
I did the same thing in the 'nanify' process:
df.difference2[df.difference2 < -2] = np.nan
The idea here is that many of the binary operators actually generate either a placeholder, or a Series or vector. And that can be used as an index, so that df.difference2 < -2 becomes (in essence) a list of the places where that condition is true, and you can then index either df (the whole table) or any of the columns of df, like df.difference2, using that index. It's a fast shorthand for the otherwise-slow python for loop.
Update
Okay, finally, here is a version that vectorizes the "Time-inefficient Loop". I'm just pasting the whole thing in at the bottom, for copying.
The premise is that the Series.isnull() method returns a boolean Series (column) that is true if the contents are "missing" or "invalid" or "bogus." Generally, this means NaN, but it also recognizes Python None, etc.
The tricky part, in pandas, is shifting that column up or down by one to reflect "around"-ness.
That is, I want another boolean column, where col[n-1] is true if col[n] is null. That's my "before a nan" column. And likewise, I want another column where col[n+1] is true if col[n] is null. That's my "after a nan" column.
It turns out I had to take the damn thing apart! I had to reach in, extract the underlying numpy array using the Series.values attribute, so that the pandas index would be discarded. Then a new index is created, starting at 0, and everything works again. (If you don't strip the index, the columns "remember" what their numbers are supposed to be. So even if you delete column[0], the column doesn't shift down. Instead, is knows "I am missing my [0] value, but everyone else is still in the right place!")
Anyway, with that figured out, I was able to build three columns (needlessly - they could probably be parts of an expression) and then merge them together into a fourth column that indicates what you want: the column is True when the row is before, on, or after a nan value.
missing = df.difference2.isnull()
df['is_nan'] = missing
df['before_nan'] = np.append(missing[1:].values, False)
df['after_nan'] = np.insert(missing[:-1].values, 0, False)
df['around_nan'] = df.is_nan | df.before_nan | df.after_nan
Here's the whole thing:
import numpy as np
import pandas as pd
import time
current_milli_time = lambda: int(round(time.time() * 1000))
def mark(s):
print("[{}] {}".format(current_milli_time()/1000, s))
# sample data
speed = np.random.uniform(0,25,150000)
next_speed = speed[1:]
# create a dataframe
data_dict = {'speed': speed[:-1],
'next_speed': next_speed}
df = pd.DataFrame(data_dict)
mark("Got DataFrame")
# calculate difference between the current speed and the next speed
list_of_differences = []
#for i in df.index:
#difference = df.next_speed[i]-df.speed[i]
#list_of_differences.append(difference)
#df['difference'] = list_of_differences
#mark("difference 1")
df['difference'] = df['next_speed'] - df['speed']
mark('difference 2')
df['difference2'] = df['next_speed'] - df['speed']
# add 'nan' to data in form of a string.
#for i in range(len(df.difference)):
## arbitrary condition
#if df.difference[i] < -2:
#df.difference[i] = 'nan'
df.difference[df.difference < -2] = np.nan
mark('nanify')
df.difference2[df.difference2 < -2] = np.nan
mark('nanify 2')
missing = df.difference2.isnull()
df['is_nan'] = missing
df['before_nan'] = np.append(missing[1:].values, False)
df['after_nan'] = np.insert(missing[:-1].values, 0, False)
df['around_nan'] = df.is_nan | df.before_nan | df.after_nan
mark('looped')
#########################################
# THE TIME-INEFFICIENT LOOP
# remove wrong values before and after 'nan'.
for i in range(len(df)):
# check if the value is a number to skip computations of the following "if" cases
if not(isinstance(df.difference[i], str)):
continue
# case 1: where there's only one 'nan' surrounded by values.
# Without this case the algo will miss some wrong values because 'nan' will be removed
# Example of a series: /1/,nan,/2/,3,4,nan,nan,nan,8,9
# A number surrounded by slashes e.g. /1/ is a value to be removed
if df.difference[i] == 'nan' and df.difference[i-1] != 'nan' and df.difference[i+1] != 'nan':
df.difference[i-1]= 'wrong'
df.difference[i+1]= 'wrong'
# case 2: where the following values are 'nan': /1/, nan, nan, 4
# E.g.: /1/, nan,/2/,3,/4/,nan,nan,nan,8,9
elif df.difference[i] == 'nan' and df.difference[i+1] == 'nan':
df.difference[i-1]= 'wrong'
# case 3: where next value is NOT 'nan' wrong, nan,nan,4
# E.g.: /1/, nan,/2/,3,/4/,nan,nan,nan,/8/,9
elif df.difference[i] == 'nan' and df.difference[i+1] != 'nan':
df.difference[i+1]= 'wrong'
mark('time-inefficient loop done')
I am assuming that you don't want either 'nan' or wrong values and nan values are not much compared to size of data. Please try with this:
nan_idx = df[df['difference']=='nan'].index.tolist()
from copy import deepcopy
drop_list = deepcopy(nan_idx)
for i in nan_idx:
if (i+1) not in(drop_list) and (i+1) < len(df):
mm.append(i+1)
if (i-1) not in(drop_list) and (i-1) < len(df):
mm.append(i-1)
df.drop(df.index[drop_list])
if nan is not a string but it is NaN which is for missing values then use this to get its indexes:
nan_idx = df[pandas.isnull(df['difference'])].index.tolist()

Python looping and Pandas rank/index quirk

This question pertains to one posted here:
Sort dataframe rows independently by values in another dataframe
In the linked question, I utilize a Pandas Dataframe to sort each row independently using values in another Pandas Dataframe. The function presented there works perfectly every single time it is directly called. For example:
import pandas as pd
import numpy as np
import os
##Generate example dataset
d1 = {}
d2 = {}
d3 = {}
d4 = {}
## generate data:
np.random.seed(5)
for col in list("ABCDEF"):
d1[col] = np.random.randn(12)
d2[col+'2'] = np.random.random_integers(0,100, 12)
d3[col+'3'] = np.random.random_integers(0,100, 12)
d4[col+'4'] = np.random.random_integers(0,100, 12)
t_index = pd.date_range(start = '2015-01-31', periods = 12, freq = "M")
#place data into dataframes
dat1 = pd.DataFrame(d1, index = t_index)
dat2 = pd.DataFrame(d2, index = t_index)
dat3 = pd.DataFrame(d3, index = t_index)
dat4 = pd.DataFrame(d4, index = t_index)
## Functions
def sortByAnthr(X,Y,Xindex, Reverse=False):
#order the subset of X.index by Y
ordrX = [x for (x,y) in sorted(zip(Xindex,Y), key=lambda pair: pair[1],reverse=Reverse)]
return(ordrX)
def OrderRow(row,df):
ordrd_row = df.ix[row.dropna().name,row.dropna().values].tolist()
return(ordrd_row)
def r_selectr(dat2,dat1, n, Reverse=False):
ordr_cols = dat1.apply(lambda x: sortByAnthr(x,dat2.loc[x.name,:],x.index,Reverse),axis=1).iloc[:,-n:]
ordr_cols.columns = list(range(0,n)) #assign interpretable column names
ordr_r = ordr_cols.apply(lambda x: OrderRow(x,dat1),axis=1)
return([ordr_cols, ordr_r])
## Call functions
ordr_cols2,ordr_r = r_selectr(dat2,dat1,5)
##print output:
print("Ordering set:\n",dat2.iloc[-2:,:])
print("Original set:\n", dat1.iloc[-2:,:])
print("Column ordr:\n",ordr_cols2.iloc[-2:,:])
As can be checked, the columns of dat1 are correctly ordered according to the values in dat2.
However, when called from a loop over dataframes, it does not rank/index correctly and produces completely dubious results. Although I am not quite able to recreate the problem using the reduced version presented here, the idea should be the same.
## Loop test:
out_dict = {}
data_dicts = {'dat2':dat2, 'dat3': dat3, 'dat4':dat4}
for i in range(3):
#this outer for loop supplies different parameter values to a wrapper
#function that calls r_selectr.
for key in data_dicts.keys():
ordr_cols,_ = r_selectr(data_dicts[key], dat1,5)
out_list.append(ordr_cols)
#do stuff here
#print output:
print("Ordering set:\n",dat3.iloc[-2:,:])
print("Column ordr:\n",ordr_cols2.iloc[-2:,:])
In my code (almost completely analogous to the example given here), the ordr_cols are no longer ordered correctly for any of the sorting data frames.
I currently solve the issue by separating the ordering and indexing operations with r_selectr into two separate functions. That, for some reason, resolves the issue though I have no idea why.

Categories