Angle Interpolation - python

I was trying to interpolate the angle which are in list.
Dir DirOffset
0 109.6085
30 77.5099
60 30.5287
90 -10.2748
120 -75.359
150 -147.6015
180 -162.7055
210 21.0103
240 3.5502
270 -11.5475
300 -39.8371
330 -109.5473
360 109.6085
I have written the code to interpolate angle(It keeps on calculating the mean in between angle to reach the interpolation value) which is taking long time. Please help me if some one have the faster and shorter code.
from cmath import rect, phase
from math import radians, degrees, sqrt
#Calculate the mean of angles in List
def mean_angle(degArray):
return degrees(phase(sum(rect(1, radians(d)) for d in degArray)/len(degArray)))
#Calculate Interpolation Angle
def Interpolate_angle(Dir, DirOffset, ValuetoInterpolate):
#Create Lower and Higher bin of ValuetoInterpolate
DirLBin = round(float(ValuetoInterpolate)/30,0)*30
DirHBin = round(float(ValuetoInterpolate+15)/30,0)*30
#Check if the ValuetoInterpolate lies between Lower and Higher bin
if DirLBin == DirHBin:
DirLBin = DirHBin-30
if DirLBin <= ValuetoInterpolate <= DirHBin:
DBin = [float(DirLBin), float(DirHBin)]
Doff = [DirOffset[Dir.index(DirLBin)], DirOffset[Dir.index(DirHBin)]]
else:
DirHBin = DirLBin+30
DBin = [float(DirLBin), float(DirHBin)]
Doff = [DirOffset[Dir.index(DirLBin)], DirOffset[Dir.index(DirHBin)]]
else:
DBin = [float(DirLBin), float(DirHBin)]
Doff = [DirOffset[Dir.index(DirLBin)], DirOffset[Dir.index(DirHBin)]]
#Run 50 iterations to calculate the mean of angle and find the ValuetoInterpolate
for i in range(51):
DMean = mean_angle(DBin)
DOMean = mean_angle(Doff)
if DMean < 0 :
DMean = 360+DMean
if DBin[0] <= ValuetoInterpolate <=DMean:
DBin = [float(DBin[0]), float(DMean)]
Doff = [float(Doff[0]), float(DOMean)]
else:
DBin = [float(DMean), float(DBin[1])]
Doff = [float(DOMean), float(Doff[1])]
return DOMean
Dir = range(0,370,30)
DirOffset = [109.6085,77.5099,30.5287,-10.2748,-75.359,-147.6015,-162.7055,21.0103,3.5502,-11.5475,-39.8371,-109.5473,109.6085]
ValuetoInterpolate = 194.4
print Interpolate_angle(Dir, DirOffset, ValuetoInterpolate)

I got the solution for above question after searching answers from stackoverflow, then I modified little bit to get the solution as per my requirement. The solution might be useful for some one in need of it.
I interpolated the Degrees using below function for each directional bin (0,30,60....360) till 360(360 and 0 degree will be same) and store them in dictionary to create a DataFrame(pandas DataFrame) and left join it with main DataFrame and process further.
def InterpolateDegrees(109.6085,77.5099)
will return interpolated array of DirectionOffset 0 to 30 degree with an interval of 0.1 (0.0, 0.1, 0.2, 0.3......28.7, 29.8, 29.9)
import numpy as np
from math import fabs
def InterpolateDegrees(start, end, BinSector=12):
BinAngle = 360/BinSector
amount = np.arange(0,1,(1/(BinAngle/0.1)))
dif = fabs(end-start)
if dif >180:
if end>start:
start+=360
else:
end+=360
#Interpolate it
value = (start + ((end-start)*amount))
#Wrap it
rzero = 360
Arr = np.where((value>=0) & (value<=360), (value), (value % rzero))
return Arr

Here is a Pandas/Numpy based solution for interpolating an angle series with NaN data.
import pandas as pd
import numpy as np
def interpolate_degrees(series: pd.Series) -> pd.Series:
# I don't want to modify in place
series = series.copy()
# convert to radians
a = np.radians(series)
# unwrap if not nan
a[~np.isnan(a)] = np.unwrap(a[~np.isnan(a)])
series.update(a)
# interpolate unwrapped values
interpolated = series.interpolate()
# wrap 0 - 360 (2*pi)
wrapped = (interpolated + 2*np.pi) % (2 * np.pi)
# cconvert back to degrees
degrees = np.degrees(wrapped)
series.update(degrees)
return series
Usage:
angle = [350, np.nan, 355, np.nan, 359, np.nan, 1, np.nan, 10]
df = pd.DataFrame(data=angle, columns=['angle'])
df['interpolated'] = interpolate_degrees(df.angle)

Related

np.where() to eliminate data, where coordinates are too close to each other

I'm doing aperture photometry on a cluster of stars, and to get easier detection of background signal, I want to only look at stars further apart than n pixels (n=16 in my case).
I have 2 arrays, xs and ys, with the x- and y-values of all the stars' coordinates:
Using np.where I'm supposed to find the indexes of all stars, where the distance to all other stars is >= n
So far, my method has been a for-loop
import numpy as np
# Lists of coordinates w. values between 0 and 2000 for 5000 stars
xs = np.random.rand(5000)*2000
ys = np.random.rand(5000)*2000
# for-loop, wherein the np.where statement in question is situated
n = 16
for i in range(len(xs)):
index = np.where( np.sqrt( pow(xs[i] - xs,2) + pow(ys[i] - ys,2)) >= n)
Due to the stars being clustered pretty closely together, I expected a severe reduction in data, though even when I tried n=1000 I still had around 4000 datapoints left
Using just numpy (and part of the answer here)
X = np.random.rand(5000,2) * 2000
XX = np.einsum('ij, ij ->i', X, X)
D_squared = XX[:, None] + XX - 2 * X.dot(X.T)
out = np.where(D_squared.min(axis = 0) > n**2)
Using scipy.spatial.pdist
from scipy.spatial import pdist, squareform
D_squared = squareform(pdist(x, metric = 'sqeuclidean'))
out = np.where(D_squared.min(axis = 0) > n**2)
Using a KDTree for maximum fast:
from scipy.spatial import KDTree
X_tree = KDTree(X)
in_radius = np.array(list(X_tree.query_pairs(n))).flatten()
out = np.where(~np.in1d(np.arange(X.shape[0]), in_radius))
np.random.seed(seed=1)
xs = np.random.rand(5000,1)*2000
ys = np.random.rand(5000,1)*2000
n = 16
mask = (xs>=0)
for i in range(len(xs)):
if mask[i]:
index = np.where( np.sqrt( pow(xs[i] - x,2) + pow(ys[i] - y,2)) <= n)
mask[index] = False
mask[i] = True
x = xs[mask]
y = ys[mask]
print(len(x))
4220
You can use np.subtract.outer for creating the pairwise comparisons. Then you check for each row whether the distance is below 16 for exactly one item (which is the comparison with the particular start itself):
distances = np.sqrt(
np.subtract.outer(xs, xs)**2
+ np.subtract.outer(ys, ys)**2
)
indices = np.nonzero(np.sum(distances < 16, axis=1) == 1)

Place points with variable density

Assume that you have an NxM matrix, with values ranging from [0,100]. What I'd like to do is place points with a density (inversely) relative to the values in that area.
For example, here's a 2D Gaussian field, inverted s.t. the centroid has a value of 0, and the perimeter is at 100:
I'd like to pack the points so that they appear somewhat similar to this image:
Note how there is a radial spread outwards.
My attempt looks a little different :( ...
What I attempt to do is (i) generate a boolean area, of the same shape and size, and (ii) move through the rows and columns. If the value of the boolean array at some point is True, then pass; otherwise, add a [row,col] point to a list and cover the boolean array with True in a radius proportional to the value in the Gaussian array.
The choice of Gaussian for this example isn't important, the fundamental idea is that: given a floating point matrix, how can one place points with a density proportional to those values?
Any help very much appreciated :)
import matplotlib.pyplot as plt
import numpy as np
from math import exp
def gaussian(x,y,x0,y0,A=10.0,sigma_x=10.0,sigma_y=10.0):
return A - A*exp(-((x-x0)**2/(2*sigma_x**2) + (y-y0)**2/(2*sigma_y**2)))
def generate_grid(width=100,height=100):
grid = np.empty((width,height))
for x in range(0,width):
for y in range(0,height):
grid[x][y] = gaussian(x,y,width/2,height/2,A=100.0)
return grid
def cover_array(a,row,col,radius):
nRows = np.shape(grid)[0]
nCols = np.shape(grid)[1]
mid = round(radius / 2)
half_radius = int(round(radius))
for x in range(-half_radius,half_radius):
for y in range(-half_radius,half_radius):
if row+x >= 0 and x+row < nRows and col+y >= 0 and y+col < nCols:
if (x-mid)**2 + (y-mid)**2 <= radius**2:
a[row+x][col+y] = True
def pack_points(grid):
points = []
nRows = np.shape(grid)[0]
nCols = np.shape(grid)[1]
maxDist = 50.0
minDist = 0.0
maxEdge = 10.0
minEdge = 5.0
grid_min = 0.0
grid_max = 100.0
row = 0
col = 0
arrayCovered = np.zeros((nRows,nCols))
while True:
if row >= nRows:
return np.array(points)
if arrayCovered[row][col] == False:
radius = maxEdge * ((grid[row][col] - grid_min) / (grid_max - grid_min))
cover_array(arrayCovered,row,col,radius)
points.append([row,col])
col += 1
if col >= nCols:
row += 1
col = 0
grid = generate_grid()
plt.imshow(grid)
plt.show()
points = pack_points(grid)
plt.scatter(points[:,0],points[:,1])
plt.show()
Here is a cheap and simple method, although it requires hand-setting an amount parameter:
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x,y,x0,y0,A=10.0,sigma_x=10.0,sigma_y=10.0):
return A - A*np.exp(-((x-x0)**2/(2*sigma_x**2) + (y-y0)**2/(2*sigma_y**2)))
def distribute_points(data, amount=1):
p = amount * (1 / data)
r = np.random.random(p.shape)
return np.where(p > r)
ii, jj = np.mgrid[-10:10:.1, -10:10:.1]
data = gaussian(ii, jj, 0, 0)
px, py = distribute_points(data, amount=.03)
plt.imshow(data)
plt.scatter(px, py, marker='.', c='#ff000080')
plt.xticks([])
plt.yticks([])
plt.xlim([0, len(ii)])
plt.ylim([0, len(jj)])
Result:

numpy to generate discrete probability distribution

I'm following a code example I found at http://docs.scipy.org/doc/scipy/reference/tutorial/stats.html#subclassing-rv-discrete for implementing a random number generator for discrete values of a normal distribution. The exact example (not surprisingly) works quite well, but if I modify it to allow only left or right-tailed results, the distribution around 0 should is too low (bin zero should contain more values). I must have hit a boundary condition, but am unable to work it out. Am I missing something?
This is the result of counting the random numbers per bin:
np.bincount(rvs) [1082 2069 1833 1533 1199 837 644 376 218 111 55 20 12 7 2 2]
This is the histogram:
from scipy import stats
np.random.seed(42)
def draw_discrete_gaussian(rng, tail='both'):
# number of integer support points of the distribution minus 1
npoints = rng if tail == 'both' else rng * 2
npointsh = npoints / 2
npointsf = float(npoints)
# bounds for the truncated normal
nbound = 4
# actual bounds of truncated normal
normbound = (1+1/npointsf) * nbound
# integer grid
grid = np.arange(-npointsh, npointsh+2, 1)
# bin limits for the truncnorm
gridlimitsnorm = (grid-0.5) / npointsh * nbound
# used later in the analysis
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(values=(gridint, np.round(probs, decimals=7)), name='normdiscrete')
# print 'mean = %6.4f, variance = %6.4f, skew = %6.4f, kurtosis = %6.4f'% normdiscrete.stats(moments = 'mvsk')
rnd_val = normdiscrete.rvs()
if tail == 'both':
return rnd_val
if tail == 'left':
return -abs(rnd_val)
elif tail == 'right':
return abs(rnd_val)
rng = 15
tail = 'right'
rvs = [draw_discrete_gaussian(rng, tail=tail) for i in xrange(10000)]
if tail == 'both':
rng_min = rng / -2.0
rng_max = rng / 2.0
elif tail == 'left':
rng_min = -rng
rng_max = 0
elif tail == 'right':
rng_min = 0
rng_max = rng
gridlimits = np.arange(rng_min-.5, rng_max+1.5, 1)
print gridlimits
f, l = np.histogram(rvs, bins=gridlimits)
# cheap way of creating histogram
import matplotlib.pyplot as plt
%matplotlib inline
bins, edges = f, l
left,right = edges[:-1],edges[1:]
X = np.array([left, right]).T.flatten()
Y = np.array([bins, bins]).T.flatten()
# print 'rvs', rvs
print 'np.bincount(rvs)', np.bincount(rvs)
plt.plot(X,Y)
plt.show()
I try to answer my own question based on comments from #user333700 and #user235711:
I insert into the method before normdiscrete = ...
if tail == 'right':
gridint = gridint[npointsh:]
probs = probs[npointsh:]
s = probs.sum()
probs = probs / s
elif tail == 'left':
gridint = gridint[0: npointsh]
probs = probs[0: npointsh]
s = probs.sum()
probs = probs / s
The resulting histograms and look much nicer:

Relative Strength Index in python pandas

I am new to pandas. What is the best way to calculate the relative strength part in the RSI indicator in pandas? So far I got the following:
from pylab import *
import pandas as pd
import numpy as np
def Datapull(Stock):
try:
df = (pd.io.data.DataReader(Stock,'yahoo',start='01/01/2010'))
return df
print 'Retrieved', Stock
time.sleep(5)
except Exception, e:
print 'Main Loop', str(e)
def RSIfun(price, n=14):
delta = price['Close'].diff()
#-----------
dUp=
dDown=
RolUp=pd.rolling_mean(dUp, n)
RolDown=pd.rolling_mean(dDown, n).abs()
RS = RolUp / RolDown
rsi= 100.0 - (100.0 / (1.0 + RS))
return rsi
Stock='AAPL'
df=Datapull(Stock)
RSIfun(df)
Am I doing it correctly so far? I am having trouble with the difference part of the equation where you separate out upward and downward calculations
It is important to note that there are various ways of defining the RSI. It is commonly defined in at least two ways: using a simple moving average (SMA) as above, or using an exponential moving average (EMA). Here's a code snippet that calculates various definitions of RSI and plots them for comparison. I'm discarding the first row after taking the difference, since it is always NaN by definition.
Note that when using EMA one has to be careful: since it includes a memory going back to the beginning of the data, the result depends on where you start! For this reason, typically people will add some data at the beginning, say 100 time steps, and then cut off the first 100 RSI values.
In the plot below, one can see the difference between the RSI calculated using SMA and EMA: the SMA one tends to be more sensitive. Note that the RSI based on EMA has its first finite value at the first time step (which is the second time step of the original period, due to discarding the first row), whereas the RSI based on SMA has its first finite value at the 14th time step. This is because by default rolling_mean() only returns a finite value once there are enough values to fill the window.
import datetime
from typing import Callable
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas_datareader.data as web
# Window length for moving average
length = 14
# Dates
start, end = '2010-01-01', '2013-01-27'
# Get data
data = web.DataReader('AAPL', 'yahoo', start, end)
# Get just the adjusted close
close = data['Adj Close']
# Define function to calculate the RSI
def calc_rsi(over: pd.Series, fn_roll: Callable) -> pd.Series:
# Get the difference in price from previous step
delta = over.diff()
# Get rid of the first row, which is NaN since it did not have a previous row to calculate the differences
delta = delta[1:]
# Make the positive gains (up) and negative gains (down) Series
up, down = delta.clip(lower=0), delta.clip(upper=0).abs()
roll_up, roll_down = fn_roll(up), fn_roll(down)
rs = roll_up / roll_down
rsi = 100.0 - (100.0 / (1.0 + rs))
# Avoid division-by-zero if `roll_down` is zero
# This prevents inf and/or nan values.
rsi[:] = np.select([roll_down == 0, roll_up == 0, True], [100, 0, rsi])
rsi.name = 'rsi'
# Assert range
valid_rsi = rsi[length - 1:]
assert ((0 <= valid_rsi) & (valid_rsi <= 100)).all()
# Note: rsi[:length - 1] is excluded from above assertion because it is NaN for SMA.
return rsi
# Calculate RSI using MA of choice
# Reminder: Provide ≥ `1 + length` extra data points!
rsi_ema = calc_rsi(close, lambda s: s.ewm(span=length).mean())
rsi_sma = calc_rsi(close, lambda s: s.rolling(length).mean())
rsi_rma = calc_rsi(close, lambda s: s.ewm(alpha=1 / length).mean()) # Approximates TradingView.
# Compare graphically
plt.figure(figsize=(8, 6))
rsi_ema.plot(), rsi_sma.plot(), rsi_rma.plot()
plt.legend(['RSI via EMA/EWMA', 'RSI via SMA', 'RSI via RMA/SMMA/MMA (TradingView)'])
plt.show()
dUp= delta[delta > 0]
dDown= delta[delta < 0]
also you need something like:
RolUp = RolUp.reindex_like(delta, method='ffill')
RolDown = RolDown.reindex_like(delta, method='ffill')
otherwise RS = RolUp / RolDown will not do what you desire
Edit: seems this is a more accurate way of RS calculation:
# dUp= delta[delta > 0]
# dDown= delta[delta < 0]
# dUp = dUp.reindex_like(delta, fill_value=0)
# dDown = dDown.reindex_like(delta, fill_value=0)
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.rolling_mean(dUp, n)
RolDown = pd.rolling_mean(dDown, n).abs()
RS = RolUp / RolDown
My answer is tested on StockCharts sample data.
StockChart RSI info
def RSI(series, period):
delta = series.diff().dropna()
u = delta * 0
d = u.copy()
u[delta > 0] = delta[delta > 0]
d[delta < 0] = -delta[delta < 0]
u[u.index[period-1]] = np.mean( u[:period] ) #first value is sum of avg gains
u = u.drop(u.index[:(period-1)])
d[d.index[period-1]] = np.mean( d[:period] ) #first value is sum of avg losses
d = d.drop(d.index[:(period-1)])
rs = pd.DataFrame.ewm(u, com=period-1, adjust=False).mean() / \
pd.DataFrame.ewm(d, com=period-1, adjust=False).mean()
return 100 - 100 / (1 + rs)
#sample data from StockCharts
data = pd.Series( [ 44.34, 44.09, 44.15, 43.61,
44.33, 44.83, 45.10, 45.42,
45.84, 46.08, 45.89, 46.03,
45.61, 46.28, 46.28, 46.00,
46.03, 46.41, 46.22, 45.64 ] )
print RSI( data, 14 )
#output
14 70.464135
15 66.249619
16 66.480942
17 69.346853
18 66.294713
19 57.915021
I too had this question and was working down the rolling_apply path that Jev took. However, when I tested my results, they didn't match up against the commercial stock charting programs I use, such as StockCharts.com or thinkorswim. So I did some digging and discovered that when Welles Wilder created the RSI, he used a smoothing technique now referred to as Wilder Smoothing. The commercial services above use Wilder Smoothing rather than a simple moving average to calculate the average gains and losses.
I'm new to Python (and Pandas), so I'm wondering if there's some brilliant way to refactor out the for loop below to make it faster. Maybe someone else can comment on that possibility.
I hope you find this useful.
More info here.
def get_rsi_timeseries(prices, n=14):
# RSI = 100 - (100 / (1 + RS))
# where RS = (Wilder-smoothed n-period average of gains / Wilder-smoothed n-period average of -losses)
# Note that losses above should be positive values
# Wilder-smoothing = ((previous smoothed avg * (n-1)) + current value to average) / n
# For the very first "previous smoothed avg" (aka the seed value), we start with a straight average.
# Therefore, our first RSI value will be for the n+2nd period:
# 0: first delta is nan
# 1:
# ...
# n: lookback period for first Wilder smoothing seed value
# n+1: first RSI
# First, calculate the gain or loss from one price to the next. The first value is nan so replace with 0.
deltas = (prices-prices.shift(1)).fillna(0)
# Calculate the straight average seed values.
# The first delta is always zero, so we will use a slice of the first n deltas starting at 1,
# and filter only deltas > 0 to get gains and deltas < 0 to get losses
avg_of_gains = deltas[1:n+1][deltas > 0].sum() / n
avg_of_losses = -deltas[1:n+1][deltas < 0].sum() / n
# Set up pd.Series container for RSI values
rsi_series = pd.Series(0.0, deltas.index)
# Now calculate RSI using the Wilder smoothing method, starting with n+1 delta.
up = lambda x: x if x > 0 else 0
down = lambda x: -x if x < 0 else 0
i = n+1
for d in deltas[n+1:]:
avg_of_gains = ((avg_of_gains * (n-1)) + up(d)) / n
avg_of_losses = ((avg_of_losses * (n-1)) + down(d)) / n
if avg_of_losses != 0:
rs = avg_of_gains / avg_of_losses
rsi_series[i] = 100 - (100 / (1 + rs))
else:
rsi_series[i] = 100
i += 1
return rsi_series
You can use rolling_apply in combination with a subfunction to make a clean function like this:
def rsi(price, n=14):
''' rsi indicator '''
gain = (price-price.shift(1)).fillna(0) # calculate price gain with previous day, first row nan is filled with 0
def rsiCalc(p):
# subfunction for calculating rsi for one lookback period
avgGain = p[p>0].sum()/n
avgLoss = -p[p<0].sum()/n
rs = avgGain/avgLoss
return 100 - 100/(1+rs)
# run for all periods with rolling_apply
return pd.rolling_apply(gain,n,rsiCalc)
# Relative Strength Index
# Avg(PriceUp)/(Avg(PriceUP)+Avg(PriceDown)*100
# Where: PriceUp(t)=1*(Price(t)-Price(t-1)){Price(t)- Price(t-1)>0};
# PriceDown(t)=-1*(Price(t)-Price(t-1)){Price(t)- Price(t-1)<0};
# Change the formula for your own requirement
def rsi(values):
up = values[values>0].mean()
down = -1*values[values<0].mean()
return 100 * up / (up + down)
stock['RSI_6D'] = stock['Momentum_1D'].rolling(center=False,window=6).apply(rsi)
stock['RSI_12D'] = stock['Momentum_1D'].rolling(center=False,window=12).apply(rsi)
Momentum_1D = Pt - P(t-1) where P is closing price and t is date
You can get a massive speed up of Bill's answer by using numba. 100 loops of 20k row series( regular = 113 seconds, numba = 0.28 seconds ). Numba excels with loops and arithmetic.
import numpy as np
import numba as nb
#nb.jit(fastmath=True, nopython=True)
def calc_rsi( array, deltas, avg_gain, avg_loss, n ):
# Use Wilder smoothing method
up = lambda x: x if x > 0 else 0
down = lambda x: -x if x < 0 else 0
i = n+1
for d in deltas[n+1:]:
avg_gain = ((avg_gain * (n-1)) + up(d)) / n
avg_loss = ((avg_loss * (n-1)) + down(d)) / n
if avg_loss != 0:
rs = avg_gain / avg_loss
array[i] = 100 - (100 / (1 + rs))
else:
array[i] = 100
i += 1
return array
def get_rsi( array, n = 14 ):
deltas = np.append([0],np.diff(array))
avg_gain = np.sum(deltas[1:n+1].clip(min=0)) / n
avg_loss = -np.sum(deltas[1:n+1].clip(max=0)) / n
array = np.empty(deltas.shape[0])
array.fill(np.nan)
array = calc_rsi( array, deltas, avg_gain, avg_loss, n )
return array
rsi = get_rsi( array or series, 14 )
rsi_Indictor(close,n_days):
rsi_series = pd.DataFrame(close)
# Change = close[i]-Change[i-1]
rsi_series["Change"] = (rsi_series["Close"] - rsi_series["Close"].shift(1)).fillna(0)
# Upword Movement
rsi_series["Upword Movement"] = (rsi_series["Change"][rsi_series["Change"] >0])
rsi_series["Upword Movement"] = rsi_series["Upword Movement"].fillna(0)
# Downword Movement
rsi_series["Downword Movement"] = (abs(rsi_series["Change"])[rsi_series["Change"] <0]).fillna(0)
rsi_series["Downword Movement"] = rsi_series["Downword Movement"].fillna(0)
#Average Upword Movement
# For first Upword Movement Mean of first n elements.
rsi_series["Average Upword Movement"] = 0.00
rsi_series["Average Upword Movement"][n] = rsi_series["Upword Movement"][1:n+1].mean()
# For Second onwords
for i in range(n+1,len(rsi_series),1):
#print(rsi_series["Average Upword Movement"][i-1],rsi_series["Upword Movement"][i])
rsi_series["Average Upword Movement"][i] = (rsi_series["Average Upword Movement"][i-1]*(n-1)+rsi_series["Upword Movement"][i])/n
#Average Downword Movement
# For first Downword Movement Mean of first n elements.
rsi_series["Average Downword Movement"] = 0.00
rsi_series["Average Downword Movement"][n] = rsi_series["Downword Movement"][1:n+1].mean()
# For Second onwords
for i in range(n+1,len(rsi_series),1):
#print(rsi_series["Average Downword Movement"][i-1],rsi_series["Downword Movement"][i])
rsi_series["Average Downword Movement"][i] = (rsi_series["Average Downword Movement"][i-1]*(n-1)+rsi_series["Downword Movement"][i])/n
#Relative Index
rsi_series["Relative Strength"] = (rsi_series["Average Upword Movement"]/rsi_series["Average Downword Movement"]).fillna(0)
#RSI
rsi_series["RSI"] = 100 - 100/(rsi_series["Relative Strength"]+1)
return rsi_series.round(2)
For More Information
You do this using finta package as well just to add above
ref: https://github.com/peerchemist/finta/tree/master/examples
import pandas as pd
from finta import TA
import matplotlib.pyplot as plt
ohlc = pd.read_csv("C:\\WorkSpace\\Python\\ta-lib\\intraday_5min_IBM.csv", index_col="timestamp", parse_dates=True)
ohlc['RSI']= TA.RSI(ohlc)
It is not really necessary to calculate the mean, because after they are divided, you only need to calculate the sum, so we can use Series.cumsum ...
def rsi(serie, n):
diff_serie = close.diff()
cumsum_incr = diff_serie.where(lambda x: x.gt(0), 0).cumsum()
cumsum_decr = diff_serie.where(lambda x: x.lt(0), 0).abs().cumsum()
rs_serie = cumsum_incr.div(cumsum_decr)
rsi = rs_serie.mul(100).div(rs_serie.add(1)).fillna(0)
return rsi
Less code here but seems to work for me:
df['Change'] = (df['Close'].shift(-1)-df['Close']).shift(1)
df['ChangeAverage'] = df['Change'].rolling(window=2).mean()
df['ChangeAverage+'] = df.apply(lambda x: x['ChangeAverage'] if x['ChangeAverage'] > 0 else 0,axis=1).rolling(window=14).mean()
df['ChangeAverage-'] = df.apply(lambda x: x['ChangeAverage'] if x['ChangeAverage'] < 0 else 0,axis=1).rolling(window=14).mean()*-1
df['RSI'] = 100-(100/(1+(df['ChangeAverage+']/df['ChangeAverage-'])))

Mask a circular sector in a numpy array

I have a code that slices a numpy array into a circle. I wish to recover only the values included in a certain range of angles from the circle and mask the array. For example: mask the original array with the (x,y) positions comprised between 0 and 45 degrees of the circle.
Is there a pythonic way for doing so?
Here's my (simplified) original code:
import numpy as np
matrix = np.zeros((500,500))
x = 240
y = 280
radius = 10
mask=np.ogrid[x-radius:x+radius+1,y-radius:y+radius+1]
matrix[mask]
Thanks in advance
Edit: I omitted that radius can vary.
I would do this by converting from cartesian to polar coordinates and constructing boolean masks for the circle and for the range of angles you want:
import numpy as np
def sector_mask(shape,centre,radius,angle_range):
"""
Return a boolean mask for a circular sector. The start/stop angles in
`angle_range` should be given in clockwise order.
"""
x,y = np.ogrid[:shape[0],:shape[1]]
cx,cy = centre
tmin,tmax = np.deg2rad(angle_range)
# ensure stop angle > start angle
if tmax < tmin:
tmax += 2*np.pi
# convert cartesian --> polar coordinates
r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy)
theta = np.arctan2(x-cx,y-cy) - tmin
# wrap angles between 0 and 2*pi
theta %= (2*np.pi)
# circular mask
circmask = r2 <= radius*radius
# angular mask
anglemask = theta <= (tmax-tmin)
return circmask*anglemask
For example:
from matplotlib import pyplot as pp
from scipy.misc import lena
matrix = lena()
mask = sector_mask(matrix.shape,(200,100),300,(0,50))
matrix[~mask] = 0
pp.imshow(matrix)
pp.show()
Same approach for centered circles in square matrices:
def circleMask(mat, r=0):
if mat.shape[0] != mat.shape[1]:
raise TypeError('Matrix has to be square')
if not isinstance(r, int):
raise TypeError('Radius has to be of type int')
s = mat.shape[0]
d = num.abs(num.arange(-s/2 + s%2, s/2 + s%2))
dm = num.sqrt(d[:, num.newaxis]**2 + d[num.newaxis, :]**2)
return num.logical_and(dm >= r-.5, dm < r+.5)
looping over this implicit function is costly!

Categories