I'm getting an error ('DataFrame' object has no attribute 'Buy') but I can't get rid of this error even though I've checked everything
import pandas as pd
import ta
import numpy as np
import time
from binance import Client
client = Client(api_key, api_secret)
def getminutedata(symbol, interval, lookback):
frame = pd.DataFrame(client.get_historical_klines(symbol, interval, lookback+' min ago UTC'))
frame = frame.iloc[:,:6]
frame.columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume']
frame = frame.set_index('Time')
frame.index = pd.to_datetime(frame.index, unit='ms')
frame = frame.astype(float)
return frame
df = getminutedata('ADAUSDT', '1m', '100')
def applytechnicals(df):
df['%K'] = ta.momentum.stoch(df.High,df.Low,df.Close, window=14,
smooth_window=3)
df['%D'] = df['%K'].rolling(3).mean()
df['rsi'] = ta.momentum.rsi(df.Close, window=14)
df['macd'] = ta.trend.macd_diff(df.Close)
df.dropna(inplace=True)
applytechnicals(df)
class Signals:
def __init__(self,df, lags):
self.df = df
self.lags = lags
def gettrigger(self):
dfx = pd.DataFrame()
for i in range(self.lags + 1):
mask = (self.df['%K'].shift(i) < 20) & (self.df['%D'].shift(i) < 20)
dfx = dfx.append(mask, ignore_index=True)
return dfx.sum(axis=0)
def decide(self):
self.df['trigger'] = np.where(self.gettrigger(), 1, 0)
self.df['Buy'] = np.where((self.df.trigger) &
(self.df['%K'].between(20,80)) & (self.df['%D'].between(20,80))
& (self.df.rsi > 50) & (self.df.macd > 0), 1, 0)
inst = Signals(df, 100)
inst.decide()
inst.decide()
strategy('ADAUSDT', 4)
When I run the code "strategy('ADAUSDT', 4)" I get the output "current Close is x.xx" and then the error "'DataFrame' object has no attribute 'Buy'"
You did not post the code that gives the error. But from the error message, the 'buy' column should be inside the iloc, not outside
Explain better what you are trying to do with the iloc
Related
I'm trying to add to this code a function that would calculate vwap by date, but it isn't working:
def get_ohlc (pair, interval=1, since='last'):
endpoint = 'https://api.kraken.com/0/public/OHLC'
payLoad = {
'pair': pair,
'interval': interval,
'since' : since
}
response = requests.get(endpoint, payLoad)
data = response.json()
OHLC = data['result'][pair]
data = pd.DataFrame.from_records(OHLC, columns=['Time', 'Open', 'High', 'Low', 'Close', 'vwap', 'volume', 'count'])
data['Time'] = pd.to_datetime(data['Time'], unit='s')
data['Date'] = data['Time'].dt.date
data.set_index('Time',inplace=True)
data = data.drop(['vwap', 'count'], axis=1)
data['Open'] = data.Open.astype(float)
data['High'] = data.High.astype(float)
data['Low'] = data.Low.astype(float)
data['Close'] = data.Close.astype(float)
data['volume'] = data.volume.astype(float)
data['Vwap'] = data.groupby('Date', group_keys=False).apply(Vwap)
return data
def Vwap(data):
H = data.High
L = data.Low
C = data.Close
V = data.volume
return data.assign(Vwap = (V * ((H+L+C)/3)).cumsum() / V.cumsum())
I get the following error:
ValueError: Wrong number of items passed 7, placement implies 1
In my view, you have been mixing the "responsibilities" in your code:
the Vwap func should only take care of the calculation bit
you can create the vwap column in the get_ohlc function (btw: that is doing too many things in my view - maybe I would split the download from the manipulation of data).
Anyway, this is how I would write a quick solution to your problem:
import requests
import pandas as pd
def get_ohlc (pair, interval=1, since='last'):
endpoint = 'https://api.kraken.com/0/public/OHLC'
payLoad = {
'pair': pair,
'interval': interval,
'since' : since
}
response = requests.get(endpoint, payLoad)
data = response.json()
OHLC = data['result'][pair]
data = pd.DataFrame.from_records(OHLC, columns=['Time', 'Open', 'High', 'Low', 'Close', 'vwap', 'volume', 'count'])
data['Time'] = pd.to_datetime(data['Time'], unit='s')
data['Date'] = data['Time'].dt.date
data.set_index('Time',inplace=True)
data = data.drop(['vwap', 'count'], axis=1)
data['Open'] = data.Open.astype(float)
data['High'] = data.High.astype(float)
data['Low'] = data.Low.astype(float)
data['Close'] = data.Close.astype(float)
data['volume'] = data.volume.astype(float)
data = data.assign(vwap = data.groupby('Date', group_keys=False).apply(vwap_func))
return data
def vwap_func(data):
H = data["High"]
L = data["Low"]
C = data["Close"]
V = data["volume"]
res = (V * (H+L+C) / 3).cumsum() / V.cumsum()
return res.to_frame()
data = get_ohlc(pair="XXBTZUSD")
print(data)
As you can see, there is no need to call vwap_func at the end, given that it is applied already in your get_ohlc function
i m trying to make a trade bot that when macdh turns positive from negative i wanna get a buy signal. i get macdh values but when i type if parameter i get typeerror.
my error type is
if df['macdh'].iloc[i]>0 and df['macdh'].iloc[-2]<0:
TypeError: 'NoneType' object is not subscriptable
import requests
import json
from stockstats import StockDataFrame as Sdf
class TradingModel:
def __init__(self, symbol):
self.symbol = symbol
self.df = self.getData()
def getData(self):
# define URL
base = 'https://api.binance.com'
endpoint = '/api/v3/klines'
params = '?&symbol='+self.symbol+'&interval=4h'
url = base + endpoint + params
data = requests.get(url)
dictionary = json.loads(data.text)
# put in dataframe and clean-up
df = pd.DataFrame.from_dict(dictionary)
df = df.drop(range(6, 12), axis=1)
col_names = ['time', 'open', 'high', 'low', 'close', 'volume']
df.columns = col_names
for col in col_names:
df[col]=df[col].astype(float)
stock = Sdf.retype(df)
df['macdh']=stock['macdh']
df['macds']=stock['macds']
df['macd']=stock['macd']
print(df)
def strategy(self):
df = self.df
buy_signals=[]
for i in range(1,len(df['close'])):
if df['macdh'][i]>0 and df['macdh'].iloc[-2]<0:
buy_signals.append(df['time'][i],df['low'][i])
def Main():
symbol = "BTCUSDT"
model = TradingModel(symbol)
model.strategy()
if __name__ == '__main__':
Main() ``
when i added return df its done
df['macdh']=stock['macdh']
df['macds']=stock['macds']
df['macd']=stock['macd']
return df
NB My code runs if copied
I wrote a simple script to backtest cryptocurrencies using the poloniex API.
First I request the data from the API and turn it into a dataframe data.
Then I take the data I want and make new df called df
A function trade must then be run on each line in df, simple put if the price is above the rolling mean it buys and sells if below, this data is then saved in log.
I am having trouble applying this function on each row in df.
I had great success using the line log = df.apply(lambda x: trade(x['date'], x['close'], x['MA']), axis=1) BUT surprising it works when BTC_ETH is used in the API call and not for others ie BTC_FCT or BTC_DOGE despite the data being identical in form. Using ETH results in the creation of DataFrame (which is what i want) DOGE and FCT creates a Series
First question, how can I run my trade function on each row and create a new df log with the results
Bonus question, even though the data types are the same why does it work for ETH but not for DOGE/FCT ?
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
API = 'https://poloniex.com/public?command=returnChartData¤cyPair=BTC_FCT&start=1435699200&end=9999999999&period=86400'
data = pd.read_json(API)
df = pd.DataFrame(columns = {'date','close','MA'})
df.MA = pd.rolling_mean(data.close, 30)
df.close = data.close
df.date = data.date
df = df.truncate(before=29)
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
log = pd.DataFrame(columns = ['Date', 'type', 'profit', 'port_value'])
port = {'coin': 0, 'BTC':1}
def trade(date, close, MA):
if MA < close and port['coin'] == 0 :
coins_bought = port['BTC']/MA
port['BTC'] = 0
port['coin'] = coins_bought
d = {'Date':date, 'type':'buy', 'coin_value': port['coin'], 'btc_value':port['BTC']}
return pd.Series(d)
elif MA > close and port['BTC'] == 0 :
coins_sold = port['coin']*MA
port['coin'] = 0
port['BTC'] = coins_sold
d = {'Date':date, 'type':'sell', 'coin_value': port['coin'], 'btc_value':port['BTC']}
print()
return pd.Series(d)
log = df.apply(lambda x: trade(x['date'], x['close'], x['MA']), axis=1)
log = log.dropna()
print_full(log)
EDIT:
I solved the problem, I fixed it by appending the dicts to list and then using the df.from_dict() method to create the log dataframe, my code just to clarify.
def trade(date, close, MA):#, port):
#d = {'Data': close}
#test_log = test_log.append(d, ignore_index=True)
if MA < close and port['coin'] == 0 :
coins_bought = port['BTC']/MA
port['BTC'] = 0
port['coin'] = coins_bought
d = {'Date':date, 'type':'buy', 'coin_value': port['coin'], 'btc_value':port['BTC']}
data_list.append(d)
#return pd.Series(d)
elif MA > close and port['BTC'] == 0 :
coins_sold = port['coin']*MA
port['coin'] = 0
port['BTC'] = coins_sold
d = {'Date':date, 'type':'sell', 'coin_value': port['coin'], 'btc_value':port['BTC']}
data_list.append(d)
#return pd.Series(d)
df.apply(lambda x: trade(x['date'], x['close'], x['MA']), axis=1)
log = log.dropna()
for key,value in port.items():
print(key, value )
log.from_dict(data_list)
The problem is that you are not always returning a value in trade, which is confusing Pandas. Try this:
import numpy as np
from pandas import Series, DataFrame
import pandas as pd
API = 'https://poloniex.com/public?command=returnChartData¤cyPair=BTC_FCT&start=1435699200&end=9999999999&period=86400'
data = pd.read_json(API)
df = pd.DataFrame(columns = {'date','close','MA'})
df.MA = pd.rolling_mean(data.close, 30)
df.close = data.close
df.date = data.date
df = df.truncate(before=29)
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
log = pd.DataFrame(columns = ['Date', 'type', 'profit', 'port_value'])
port = {'coin': 0, 'BTC':1}
port = {'coin': 0, 'BTC':1}
def trade(date, close, MA):
d = {'Date': date, 'type':'', 'coin_value': np.nan, 'btc_value': np.nan}
if MA < close and port['coin'] == 0 :
coins_bought = port['BTC']/MA
port['BTC'] = 0
port['coin'] = coins_bought
d['type'] = 'buy'
d['coin_value'] = port['coin']
d['btc_value'] = port['BTC']
elif MA > close and port['BTC'] == 0 :
coins_sold = port['coin']*MA
port['coin'] = 0
port['BTC'] = coins_sold
d['type'] = 'sell'
d['coin_value'] = port['coin']
d['btc_value'] = port['BTC']
return pd.Series(d)
log = df.apply(lambda x: trade(x['date'], x['close'], x['MA']), axis=1)
log = log.dropna()
print_full(log)
However, as I mentioned in the comment, passing a function with side-effects to apply is not a good idea according to the documentation, and in fact I think it may not produce the correct result in your case.
I have a Panda DataFrame structure and I want to add another column to it, but I can't do it with append, add or insert.
I'm trying to replicate the portfolio data with the Panda's built-in function, because this script doesn't give me correct data if the period that I request is lower than ~ 1,5 years while data must be obtained even for two days if I want. So here's the script that I want to rewrite:
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
ls_symbols = ["AAPL", "GLD", "GOOG", "$SPX", "XOM"]
dt_start = dt.datetime(2006, 1, 1)
dt_end = dt.datetime(2010, 12, 31)
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
c_dataobj = da.DataAccess('Yahoo')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
**d_data = dict(zip(ls_keys, ldf_data))**
d_data = dict(zip(ls_keys, ldf_data)) is what I want to replicate because it doesn't fetch the data that I want, but I need to figure out a way to append a new column to my dict. Here is my script:
from pandas.io.data import DataReader, DataFrame
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
import datetime as dt
def get_historical_data(symbol, source, date_from, date_to):
global data_validator
symbol_data = {}
ls_keys = ['Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
for key in ls_keys:
symbol_data[key] = DataFrame({})
dataframe_open = DataFrame({})
for item in symbol:
print 'Fetching data for:', item
current_data = DataReader(str(item), source, date_from, date_to)
dataframe_open = {item : current_data['Open']}
if len(symbol_data['Open'].columns) == 0:
symbol_data['Open'] = DataFrame(dataframe_open)
else:
**#i want to add the new column here but can't seem to do this.**
#symbol_data['Open'].loc[:item] = DataFrame(dataframe_open)
pass
return symbol_data
P.S. I call the func with these parameters for testing purposes:
test = get_historical_data(['SPY', 'DIA'], 'yahoo', datetime(2015,1,1), datetime(2015,1,31))
Does the following help? Have not tested yet, but should work in principle... Just put the data in arrays of equal length and construct the data frame from that.
def get_historical_data(symbols=[], source=None, date_from=None, date_to=None):
global data_validator
symbol_data = {}
ls_keys = ['Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
data = []
for item in ls_keys:
data.append(DataReader(str(item), source, date_from, date_to)
symbol_dataframe=DataFrame(data=data, columns=ls_keys)
#symbol_dataframe = DataFrame()
#for key in ls_keys:
# symbol_data[key] = DataFrame({})
#dataframe_open = DataFrame({})
#for item in symbols:
''' print 'Fetching data for:', item
current_data = DataReader(str(item), source, date_from, date_to)
dataframe_open = {item : current_data['Open']}
#print(dataframe_open)
if len(symbol_data['Open'].columns) == 0:
symbol_data['Open'] = DataFrame(dataframe_open)
else:
#i want to add the new column here but can't seem to do this.**
symbol_data['Open'] = DataFrame(dataframe_open)
symbol_data.head()
'''
return symbol_dataframe
This is the portion of the code that's causing trouble:
import pandas as pd
import re
df
df.columns = ['Campaigns', 'Impressions', 'Attempts', 'Spend']
Campaigns = df['Campaigns']
IDs = []
for c in Campaigns:
num = re.search(r'\d{6}',c).group()
IDs.append(num)
pieces = [df,pd.DataFrame(IDs)]
frame = pd.concat(pieces, axis=1, join='outer',ignore_index=False)
frame['ID'] = frame[0]
del frame[0]
frame
This is the error:
Error: 'NoneType' object has no attribute 'group'
When I try things individually in ipython everything works, for example:
in>> test = 'YP_WON2_SP8_115436'
in>> num = re.search(r'\d{6}',test)
in>> num.group()
out>> '115436'
I've tried splitting up the code as above and it still throws the same error.
Fixed the code:
df
df.columns = ['Campaigns', 'Impressions', 'Attempts', 'Spend']
Campaigns = df['Campaigns']
ID = []
for c in Campaigns:
m = re.search(r'\d{6}',c)
if m:
num = re.search(r'\d{6}',c).group()
ID.append(num)
else:
ID.append('No ID')
pieces = [df,pd.DataFrame(ID)]
frame = pd.concat(pieces, axis=1, join='outer',ignore_index=False)
frame['ID'] = frame[0]
del frame[0]
frame