Related
I'm using this code to calculate pivot points.
def pivots_low(osc, LBR, LBL):
pivots = []
for i in range(len(osc) - LBR):
pivots.append(0)
pivot = True
if i > LBL:
for j in range(1, LBR + 1):
if osc[i] >= osc[i + j]:
pivot = False
for j in range(1, LBL + 1):
if osc[i] > osc[i - j]:
pivot = False
if pivot is True:
pivots[len(pivots) - 1] = osc[i]
for i in range(LBR):
pivots.append(0)
return pivots
This returns an array with 0's where there's no pivots and the value of the pivot if there is one.
When Comparing the results to TradingView (downloaded csv with pivot points), the only time it matches exactly is when lookback left and right are both 5. Otherwise it deviates in the number of total pivots and the location of some.
But using this code to calculate pivot highs:
def pivots_high(osc, LBR, LBL):
pivots = []
for i in range(len(osc)-LBR):
pivots.append(0)
pivot = True
if i > LBL:
for j in range(1,LBL + 1):
if osc[i] < osc[i-j]:
pivot = False
for j in range(1,LBR + 1):
if osc[i] <= osc[i+j]:
pivot = False
if pivot is True:
pivots[len(pivots)-1] = osc[i]
for i in range(LBR):
pivots.append(0)
return pivots
the results are perfect regardless of lookback values. But the code is almost exactly the same besides comparison.
What is going wrong here? This is day 3 of having this problem and I just cant fix it
To Reproduce:
Load Data:
Full_Data = pd.read_csv(file)
use this simple function to check matches between calculated pivots and TradingView pivots.
def match_pivs(data, pivs_h, pivs_l): //Data is a DataFrame loaded from tradingview csv
global lblh
global lbrh
global lbll
global lbrl
start = lbrh
if lbrl > lbrh:
start = lbrl
match_h = 0
tot_hd = 0
tot_hp = 0
match_l = 0
tot_ld = 0
tot_lp = 0
for i in range(start, len(data)):
if data['PivHigh'][i] != 0 and pivs_h[i-lbrh] != 0:
match_h += 1
if data['PivLow'][i] != 0 and pivs_l[i-lbrl] != 0:
match_l += 1
if data['PivHigh'][i] != 0:
tot_hd += 1
if data['PivLow'][i] != 0:
tot_ld += 1
if pivs_h[i] != 0:
tot_hp += 1
if pivs_l[i] != 0:
tot_lp += 1
print('PivsLow ' + str(tot_lp))
print('DataLows ' + str(tot_ld))
print('MatchesL ' + str(match_l))
print('PivsHigh ' + str(tot_hp))
print('DataHighs ' + str(tot_hd))
print('MatchesH ' + str(match_h))
and to get csv from TradingView:
//#version=5
indicator("Data Script", overlay=true, max_labels_count=500)
leftLenL = input.int(title="Pivot Low", defval=10, minval=1, inline="Pivot Low", group=lengthGroupTitle)
rightLenL = input.int(title="/", defval=10, minval=1, inline="Pivot Low", group=lengthGroupTitle)
leftLenH = input.int(title="Pivot High", defval=10, minval=1, inline="Pivot High", group=lengthGroupTitle)
rightLenH = input.int(title="/", defval=10, minval=1, inline="Pivot High", group=lengthGroupTitle)
ph = ta.pivothigh(leftLenH, rightLenH)
pl = ta.pivotlow(leftLenL, rightLenL)
if not na(ph)
plth := ph
else
plth := 0.0
if not na(pl)
pltl := pl
else
pltl := 0.0
plot(plth, 'PivHigh')
plot(pltl, 'PivLow')
then just download csv with this script loaded.
Run program with these three lines:
pl = pivots_low(Full_Data['low'], lbll, lbrl)
ph = pivots_high(Full_Data['high'], lbrh, lblh)
match_pivs(Full_Data, ph, pl)
Finally found a way.
I still have no idea why that code does not work but I've made a different way that seems to be doing the job 100% to tradingview data.
def checkhl(data_back, data_forward, hl):
if hl == 'high' or hl == 'High':
ref = data_back[len(data_back)-1]
for i in range(len(data_back)-1):
if ref < data_back[i]:
return 0
for i in range(len(data_forward)):
if ref <= data_forward[i]:
return 0
return 1
if hl == 'low' or hl == 'Low':
ref = data_back[len(data_back)-1]
for i in range(len(data_back)-1):
if ref > data_back[i]:
return 0
for i in range(len(data_forward)):
if ref >= data_forward[i]:
return 0
return 1
def pivot(osc, LBL, LBR, highlow)
left = []
right = []
for i in range(len(osc)):
pivots.append(0.0)
if i < LBL + 1:
left.append(osc[i])
if i > LBL:
right.append(osc[i])
if i > LBL + LBR:
left.append(right[0])
left.pop(0)
right.pop(0)
if checkhl(left, right, highlow):
pivots[i - LBR] = osc[i - LBR]
return pivots
then just do:
pivots_low = pivot(data, lbl, lbr, 'low')
pivots_high = pivot(data, lbl, lbr, 'high')
All the pivots will be in the actual position that they occur, not lbr bars after, otherwise the value will be 0.0
I'm not sure if this is efficient or not but it seems to work.
I am trying to calculate RSI using simple functions.
The general formula for it is:
RSI = 100/(1+RS), where RS = Exponential Moving Average of gains / -||- of losses.
Here is what I am getting:
enter image description here
Here it is how should it look like:
enter image description here
I have everything double checked or even triple checked, but I can't find any mistake.
Thus I need your help, I know that the question is very simple though I need some help, I have no idea where I have made the mistake.
The general idea of RSI is that it should be low where the price is "low" and high, where the price is high, and generally no matter what I try I have it upside down.
def EMA(close_price_arr, n):
a = (2/n + 1)
EMA_n = np.empty((1, len(close_price_arr)))
for i in range(len(close_price_arr)):
if i < n:
# creating NaN values where it is impossible to calculate EMA to drop it later after connecting the whole database
EMA_n[0, i] = 'NaN'
if i >= n:
# Calaculating nominator and denominator of EMA
for j in range(n):
nominator_ema += close_price_arr[i - j] * a**(j)
denominator_ema += a**(j)
EMA_n[0, i] = nominator_ema / denominator_ema
nominator_ema = 0
denominator_ema = 0
return EMA_n
def gains(close_price_arr):
gain_arr = np.empty((len(close_price_arr) - 1))
for i in range(len(close_price_arr)):
if i == 0:
pass
if i >= 1:
if close_price_arr[i] > close_price_arr[i - 1]:
gain_arr[i - 1] = (close_price_arr[i] - close_price_arr[i-1])
else:
gain_arr[i - 1] = 0
return gain_arr
def losses(close_price_arr):
loss_arr = np.empty((len(close_price_arr) - 1))
for i in range(len(close_price_arr)):
if i == 0:
pass
if i >= 1:
if close_price_arr[i] < close_price_arr[i - 1]:
loss_arr[i - 1] = abs(close_price_arr[i] - close_price_arr[i - 1])
else:
loss_arr[i - 1] = 0
return loss_arr
def RSI(gain_arr, loss_arr, n):
EMA_u = EMA(gain_arr, n)
EMA_d = EMA(loss_arr, n)
EMA_diff = EMA_u / EMA_d
x,y = EMA_diff.shape
print(x, y)
RSI_n = np.empty((1, y))
for i in range(y):
if EMA_diff[0, i] == 'NaN':
RSI_n[0, i] = 'NaN'
print(i)
else:
RSI_n[0, i] = 100 / (1 + EMA_diff[0, i])
return RSI_n
#contextmanager
def show_complete_array():
oldoptions = np.get_printoptions()
np.set_printoptions(threshold=np.inf)
try:
yield
finally:
np.set_printoptions(**oldoptions)
np.set_printoptions(linewidth=3000)
pd.set_option('display.max_columns', None)
# Specyfying root folder, file folder and file
FILE = 'TVC_SILVER, 5.csv'
FOLDER = 'src'
PROJECT_ROOT_DIR = '.'
csv_path = os.path.join(PROJECT_ROOT_DIR, FOLDER, FILE)
# reading csv
price_data = pd.read_csv(csv_path, delimiter=',')
price_data_copy = price_data.copy()
price_data_nodate = price_data.copy().drop('time', axis=1)
price_data_np = price_data_nodate.to_numpy(dtype='float32')
close_price = price_data_np[:, 3]
EMA15 = EMA(close_price_arr=close_price, n=15)
EMA55 = EMA(close_price_arr=close_price, n=55)
gain = gains(close_price_arr=close_price)
loss = losses(close_price_arr=close_price)
RSI14 = RSI(gain_arr=gain, loss_arr=loss, n=14)
Try this:
"""dataset is a dataframe"""
def RSI(dataset, n=14):
delta = dataset.diff()
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.Series(dUp).rolling(window=n).mean()
RolDown = pd.Series(dDown).rolling(window=n).mean().abs()
RS = RolUp / RolDown
rsi= 100.0 - (100.0 / (1.0 + RS))
return rsi
I have a function that does the following:
Inserting class values 1,2,3 based on timestamps. This work as inspected and in the first iteration of the first for-loop i get the following class distribution:
mapping: {'Seizure': 1, 'Preictal': 2, 'Interictal': 3}
value counts:
3.0 3150000
2.0 450000
1.0 28000
Name: class, dtype:
So i have this number of rows for each class.
However in the second forloop i iterate through the same list of timestamps and want to subset the data between the timestamps and include some conditions based on the classes i inserted in first forloop.
This is the result of the same timestamps e.g. first iteration:
len sz: 28000
len prei: 450000
len pre int: 29700000
logging
len post int: 1485499
How the * does preint and post int (interictal class) get this high of a count? it doesn't at all correspond somewhat to the number interictal in the first?
here my function.
def insert_class_col(dataframe, sz_info_list, date_converter, save_filename, save_path, file_sample_rate, file_channel):
print(f"sz_info_list: {sz_info_list}")
if "class" not in dataframe.columns:
dataframe.insert(0, "class", np.nan)
file_channel.extend(['timestamp', 'class'])
dataframe = dataframe[file_channel]
# Insert class attributes to ensure that seizure, preictal, interictal does not overlap.
for index, container in enumerate(sz_info_list):
delay = container.delay * 1000
duration = container.duration * 1000
sz_start = date_converter(container.time_emu) + delay
sz_end = sz_start + duration
print(f"sz_start index = {sz_start}")
print(f"sz_end: {sz_end}")
preictal_start = sz_start - (15 * 60 * 1000)
interictal_start = sz_start - (1 * 60 * 60 * 1000)
interictal_end = sz_end + (1 * 60 * 60 * 1000)
dataframe['timestamp'] = pd.to_numeric(dataframe['timestamp'])
# hvis data er sezure tag seizure
# hvis data er preictal tag preictal/interictal, men ikke indenfor seizure data.
dataframe.loc[(dataframe['timestamp'] >= sz_start) & (dataframe['timestamp'] < sz_end), "class"] = class_mapping['Seizure']
dataframe.loc[(dataframe['class'] != class_mapping['Seizure']) & (dataframe['timestamp'] >= preictal_start) & (dataframe['timestamp'] < sz_start), "class"] = class_mapping['Preictal']
dataframe.loc[(dataframe['class'] != class_mapping['Seizure']) & (dataframe['class'] != class_mapping['Preictal']) & (dataframe['timestamp'] >= interictal_start) & (dataframe['timestamp'] < interictal_end), "class"] = class_mapping['Interictal']
print(f"mapping: {class_mapping} \n value counts: \n{dataframe['class'].value_counts()}")
print(f"Begginging current number of class in df {dataframe['class'].value_counts()}")
# Saving to csv
for index, container in enumerate(sz_info_list):
delay = container.delay * 1000
duration = container.duration * 1000
sz_start = date_converter(container.time_emu) + delay
sz_end = sz_start + duration
print(f"sz_start index = {sz_start}")
print(f"sz_end: {sz_end}")
preictal_start = sz_start - (15 * 60 * 1000)
interictal_start = sz_start - (1 * 60 * 60 * 1000)
interictal_end = sz_end + (1 * 60 * 60 * 1000)
dataframe['timestamp'] = pd.to_numeric(dataframe['timestamp'])
#INSERTING SEIZURE CLASS
sz_df = dataframe[(dataframe['timestamp'] >= sz_start) & (dataframe['timestamp'] < sz_end)].copy()
print(f"len sz: {len(sz_df)}")
#df_save_compress(f"Seizure_{index}_{save_filename}", save_path + "/Seizure", sz_df)
#logging_info_txt(f"Seizure_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#INSERTING PREICTAL
prei_df = dataframe[(dataframe['timestamp'] >= preictal_start) & (dataframe['timestamp'] < sz_start) & (dataframe['class'] != class_mapping["Seizure"])].copy()
print(f"len prei: {len(prei_df)}")
#df_save_compress(f"Preictal_{index}_{save_filename}", save_path + "/Preictal", prei_df)
#logging_info_txt(f"Preictal_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#INSERTING INTERICTAL
pre_int_df = dataframe[(dataframe['timestamp'] >= interictal_start) & (dataframe['timestamp'] < preictal_start) & (dataframe['class'] != class_mapping["Seizure"]) | (dataframe['class'] != class_mapping["Preictal"])].copy()
print(f"len pre int: {len(pre_int_df)}")
#df_save_compress(f"PreInt_{index}_{save_filename}", save_path + "/Interictal", pre_int_df)
logging_info_txt(f"PreInt_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
post_int_df = dataframe[(dataframe['timestamp'] >= sz_end) & (dataframe['timestamp'] < interictal_end) & (dataframe['class'] != class_mapping["Seizure"]) & (dataframe['class'] != class_mapping["Preictal"])].copy()
print(f"len post int: {len(post_int_df)}")
#df_save_compress(f"PostInt_{index}_{save_filename}", save_path + "/Interictal", post_int_df)
logging_info_txt(f"PostInt_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#print(f"after = len df: {len(dataframe)} values class: \n {dataframe['class'].value_counts()}")
# clean up
del pre_int_df, post_int_df, sz_df, prei_df
gc.collect()
Notice that preint which is interictal is 29700000 while printing the classes i should be lower than 3150000.
Any ideas of this pandas behavior?
#richardec answered the question see comments.
I am trying to convert arrays that contain both Inches and Feet into Inches. The feet are denoted with a single quote (') and inches are denoted with double quotes ("). The data comes in the following forms:
[8'x10']
[60\" x 72\"]
[5'x8',5x8,60\"x92\"]
[8'10\"x12']
What I want:
["96x120"]
["60x72"]
["60x96","60x96","60x96","60x92"]
["106x144"]
What I have:
def ft_inch(numbers):
if str(numbers).find("x") > -1:
numbers=numbers.replace('[','').replace('"','').replace('[','').replace(']','')
try:
nom = numbers.split("x")[0]
nom=nom.replace(r'\\|\"|\]|\[','')
nom_one = nom.split("'")[0]
nom_two = nom.split("'")[1]
den = numbers.split("x")[1]
den=den.replace(r'\\|\"|\[|\]','')
den_one = den.split("'")[0]
den_two = den.split("'")[1]
ft=int(nom_one)*12
inch=nom_two.replace(r'\"| |\\','')
try:
inch=int(inch)
except:
print('B')
tmp = int(ft)+int(inch)
fts=int(den_one)*12
inchs=den_two.replace(r'\"| |\\','')
try:
inchs=int(inchs)
except:
print('B')
tmp_two = int(fts)+int(inch)
return f'["{tmp}x{tmp_two}"]'
except:
return numbers
else:
return numbers
x="[5'1x8'1]"
ft_inch(x)
This works for a single array as long as it has both feet and inches but fails if its only feet [8'x8']. If anyone has a simpler solution please let me know
A regex-based approach:
import re
inputs = [["8'1x10'1"], ["60\" x 72\""], ["5'x8'", "5x8", "60\"x92\""], ["8'10\"x12'"]]
for inpt in inputs:
sub_output = []
for measurement in inpt:
m = re.match(r"(\d+['\"]?)(\d+['\"]?)?x(\d+['\"]?)(\d+['\"]?)?",
"".join(measurement.split()))
groups = [m.groups()[:2], m.groups()[2:]]
result_inches = [0, 0]
for i, group in enumerate(groups):
for raw_val in group:
if raw_val == None:
continue
if '"' in raw_val:
result_inches[i] += int(raw_val[:-1])
elif "'" in raw_val:
result_inches[i] += int(raw_val[:-1])*12
else:
result_inches[i] += int(raw_val)*12
sub_output.append(result_inches)
print([f"{x}x{y}" for x, y in sub_output])
Output:
['108x132']
['60x72']
['60x96', '60x96', '60x92']
['106x144']
I saw your edit and included the ["8'1x10'1"] case :)
I rewrote the entire thing, but this seems to work:
input_ = ["8'x10'", "60\" x 72\"", "5'x8'","5x8","60\"x92\"", "8'10\"x12'", "8'1x10'1\""]
inches_only = []
for s in input_:
s.replace(" ", "")
sides = s.split("x")
new_sides = []
for side in sides:
inches = 0
split1 = side.split("'")
if len(split1) > 1 or (len(split1) == 1 and not side.__contains__('"')):
inches = int(split1[0]) * 12
split2 = side.split('"')
if len(split2) > 1:
inches += int(split2[0].split("'")[-1])
elif len(split2) == 1 and len(split1) > 1 and len(split1[1]) > 0:
inches += int(split1[1])
new_sides.append(str(inches) + '"')
inches_only.append("x".join(new_sides))
print(inches_only)
Output:
['96"x120"', '60"x72"', '60"x96"', '60"x96"', '60"x92"', '106"x144"', '97"x121"']
Good afternoon.
Announced the ADX indicator function
(link Python: Average Directional Index (ADX) 2 Directional Movement System Calculation - https://www.youtube.com/watch?v=joOWm-GcHTw).
An error occurs during operation - "TypeError: 'builtin_function_or_method' object is not subscriptable".
on this line -
TRDate,TrueRange = TR(date[x],closep[x],highp[x],lowp[x],openp[x],closep[x-1])
TypeError: 'builtin_function_or_method' object is not subscriptable
I will be glad of any help.
Thank.
The code is below.
def TR(d,c,h,l,o,yc):
x = h-l
y = abs(h-yc)
z = abs(l-yc)
if y <= x >= z:
TR = x
elif x <= y >= z:
TR = y
elif x <= z >= y:
TR = z
return d, TR
def DM(d,o,h,l,c,yo,yh,yl,yc):
moveUp = h-yh
moveDown = yl-l
if 0 < moveUp > moveDown:
PDM = moveUp
else:
PDM = 0
if 0 < moveDown > moveUp:
NDM = moveDown
else:
NDM = 0
return d,PDM,NDM
def calcDIs(date,openp,highp,lowp,closep,openpy,highpy,lowpy,closepy,tf):
x = 1
TRDates = []
TrueRanges = []
PosDMs = []
NegDMs = []
while x < len(date):
TRDate,TrueRange = TR(date[x],closep[x],highp[x],lowp[x],openp[x],closep[x-1]) << error
TRDates.append(TRDate)
TrueRanges.append(TrueRange)
DMdate,PosDM,NegDM = DM(date[x],openp[x],highp[x],lowp[x],closep[x],openp[x-1],highp[x-1],lowp[-1],closep[x-1]) << I assume that there will be the same error
PosDMs.append(PosDM)
NegDMs.append(NegDM)
x +=1
expPosDM = ExpMovingAverage(PosDMs,14)
expNegDM = ExpMovingAverage(NegDMs,14)
ATR = ExpMovingAverage(TrueRanges,14)
xx = 0
PDIs = []
NDIs = []
while xx < len(ATR):
PDI = 100*(expPosDM[xx]/ATR[xx])
PDIs.append(PDI)
NDI = 100*(expNegDM[xx]/ATR[xx])
NDIs.append(NDI)
xx +=1
return PDIs,NDIs
I'm sorry, I did not insert the last part of the code.
def ADX(date,openp,highp,lowp,closep,openpy,highpy,lowpy,closepy,tf,tfy):
PositiveDI,NegativeDI = calcDIs(date,openp,highp,lowp,closep,openpy,highpy,lowpy,closepy,tf)
xxx = 0
DXs =[]
while xxx < len(date[1:]):
DX = 100*( (abs(PositiveDI[xxx]-NegativeDI[xxx])
/(PositiveDI[xxx]+NegativeDI[xxx])))
DXs.append(DX)
xxx += 1
ADX = ExpMovingAverage(DXs,14)
return PositiveDI, NegativeDI, ADX
And accordingly the call:
PositiveDI, NegativeDI, ADX = ta.ADX(data,open,high,low,closes,open,high,low,closes,14,14)
here data on candles received through API Binance - https://api.binance.com/api/v1/klines?symbol=LTCBTC&interval=5m
eg:
[
[
1499040000000, // data
"0.01634790", // Open
"0.80000000", // High
"0.01575800", // Low
"0.01577100", // Close
"148976.11427815", // volume
1499644799999, // Closing time
"2434.19055334", // Quota currency
308, // Number of deals
"1756.87402397", // Taker buy base asset volume
"28.46694368", // Taker buy quote asset volume
"17928899.62484339" // Ignore
]
]
Can you show all your import statements?
Also can you change your date variable to something else?
Good afternoon. Did as suggested. Did not help.
def calcDIs(cur_date,openp,highp,lowp,closep,openpy,highpy,lowpy,closepy,tf):
x = 1
TRDates = []
TrueRanges = []
PosDMs = []
NegDMs = []
while x < len(cur_date):
TRDate,TrueRange = TR(cur_date[x],closep[x],highp[x],lowp[x],openp[x],closep[x-1])
All the same error.
TRDate,TrueRange = TR(cur_date[x],closep[x],highp[x],lowp[x],openp[x],closep[x-1])
TypeError: 'builtin_function_or_method' object is not subscriptable