How to use index in python list? - python

I am trying to execute the code of pyethereum but when I was analyzing the code in
pyethereum/Ethereum/hybrid_casper/consenus.py
I can't understand where the 'NULL_SENDER' value is defined and how this state.config['NULL_SENDER] will execute.
key, account = state.config['NULL_SENDER'], privtoaddr(state.config['NULL_SENDER'])

Let's look through all the code.
from ethereum import utils, transactions
from ethereum.common import update_block_env_variables
from ethereum.messages import apply_transaction
from ethereum.hybrid_casper import casper_utils
from ethereum.utils import privtoaddr
# Block initialization state transition
def initialize(state, block=None):
config = state.config
state.txindex = 0
state.gas_used = 0
state.bloom = 0
state.receipts = []
if block is not None:
update_block_env_variables(state, block)
# Initalize the next epoch in the Casper contract
if state.block_number % state.config['EPOCH_LENGTH'] == 0 and state.block_number != 0:
key, account = state.config['NULL_SENDER'], privtoaddr(state.config['NULL_SENDER'])
data = casper_utils.casper_translator.encode('initialize_epoch', [state.block_number // state.config['EPOCH_LENGTH']])
transaction = transactions.Transaction(state.get_nonce(account), 0, 3141592,
state.config['CASPER_ADDRESS'], 0, data).sign(key)
success, output = apply_transaction(state, transaction)
assert success
if state.is_DAO(at_fork_height=True):
for acct in state.config['CHILD_DAO_LIST']:
state.transfer_value(
acct,
state.config['DAO_WITHDRAWER'],
state.get_balance(acct))
if state.is_METROPOLIS(at_fork_height=True):
state.set_code(utils.normalize_address(
config["METROPOLIS_STATEROOT_STORE"]), config["METROPOLIS_GETTER_CODE"])
state.set_code(utils.normalize_address(
config["METROPOLIS_BLOCKHASH_STORE"]), config["METROPOLIS_GETTER_CODE"])
We can see that this code is most likely being imported by multiple programs, which it is! https://github.com/ethereum/pyethereum/blob/develop/ethereum/hybrid_casper/chain.py
If we see the usage of the function in chain.py, the state parameter is being fulfilled with self.state, it is self.state = self.mk_poststate_of_blockhash(self.db.get(b'head_hash')).
This function returns a State object, made in ethereum.state, which can be turned into a dictionary. This most likely means that it is getting the value that corresponds to the key called 'NULL_SENDER'.

Related

I want build a alert for ema indicator crypto in a special list

First one:
### configuration details
TELEGRAM_TOKEN = '' # telegram bot token
TELEGRAM_CHANNEL ='' # channel id
INTERVAL = '1m' # binance time interval
SHORT_EMA = 7 # short interval for ema
LONG_EMA = 21 # long interval for ema
Here is my second code:
import requests
import talib
import time
import numpy as np
import websocket
from config import TELEGRAM_TOKEN, TELEGRAM_CHANNEL , INTERVAL, SHORT_EMA , LONG_EMA
def streamKline(currency, interval):
websocket.enableTrace(False)
socket = f'wss://stream.binance.com:9443/ws/{currency}#kline_{interval}'
ws = websocket.WebSocketApp(socket)
ws.run_forever()
#SYMBOLS TO LOOK FOR ALERTS
SYMBOLS = [
"ETHUSDT",
"BTCUSDT",
"ATOMUSDT",
"BNBUSDT",
"FTMBUSD",
"ENJUSDT",
"WAXPUSDT"
]
#sending alerts to telegram
def send_message(message):
url = "https://api.telegram.org/bot{}/sendMessage?chat_id={}&text={}&parse_mode=markdown".format(TELEGRAM_TOKEN,TELEGRAM_CHANNEL,message)
res = requests.get(url);print(url);
return res
# getting klines data to process
def streamKline(symbol):
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the trade off between speed and time
return_data = []
# taking closing data for each kline
for each in data:
return_data.append(float(each[4])) # 4 is the index of the closing data in each kline
return np.array(return_data) # returning as numpy array for better precision and performance
def main():
# making a infinite loop that keeps checking for condition
while True:
#looping through each coin
for each in SYMBOLS:
data = streamKline(each)
ema_short = talib.EMA(data,int(SHORT_EMA))
ema_long = talib.EMA(data,int(LONG_EMA))
last_ema_short = ema_short[-2]
last_ema_long = ema_long[-2]
ema_short = ema_short[-1]
ema_long = ema_long[-1]
# conditions for alerts
if(ema_short > ema_long and last_ema_short < last_ema_long):
message = each + "bullcoming "+ str(SHORT_EMA) + " over "+str(LONG_EMA);print(each ,"alert came");
send_message(message);
time.sleep(0.5);
# calling the function
if __name__ == "__main__":
main()
The part of config is all settle done, just second for the kline data, the error mention lot like this.
data = socket.streamKline(symbol=symbol,interval=INTERVAL,limit=300) # more data means more precision but at the
trade off between speed and time
NameError: name 'socket' is not defined
I just don't know how to do it, I want build a ema alert that can give me a message when I am not watching chart, through this way seems not work, I have tried many times, and also find many video but still, I am just an beginner, nothing improving at all.

Alice blue api module problem with Strategy Creation

from alice_blue import *
import pdb
import datetime
import time
import pandas as pd
access_token = '1JekMg.ezoSy4HW8rn3zjUzpRWR2fA2P9c6yQLjGWHdnCvXtOs'
alice = AliceBlue(username='', password='', access_token= '5ENG1JekMg.ezoSy4HW8rn3zjUzpRWR2fA2P9c6yQLjGWHdnCvXtOs', master_contracts_to_download=['NSE', 'NFO'])
traded_stocks = []
socket_opened = False
def event_handler_quote_update(message):
print(f"quote update {message}")
name = message['instrument'].symbol
openx = message['open']
high = message['high']
low = message['low']
close = message['close']
ltp = message['ltp']
cap = 100000
curr_time=datetime.datetime.now()
crt_time=curr_time.time()
breakout=datetime.time(18, 30)
if (crt_time >= breakout) and (name not in traded_stocks) and (ltp >= high):
print(f"buy: {name} , ltp: {ltp}")
traded_stocks.append(name)
message = alice.place_order(transaction_type = TransactionType.Buy, instrument = alice.get_instrument_by_symbol('NSE', name), quantity = 1 , order_type = OrderType.Limit, product_type = ProductType.BracketOrder, price = high, trigger_price = None, stop_loss =low, square_off =float(format((high-low), ".1f")), trailing_sl = None, is_amo = False)
sleep(10)
else :
print(f"time not satisfied is {curr_time}")
def open_callback():
global socket_opened
socket_opened = True
alice.start_websocket(subscribe_callback=event_handler_quote_update,
socket_open_callback=open_callback,
run_in_background=True)
while(socket_opened==False):
pass
instrument = [alice.get_instrument_by_symbol('NSE', 'ICICIBANK')]
while True:
alice.subscribe(instrument, LiveFeedType.MARKET_DATA)
Im trying to code a Open Range Breakout Strategy with this alice(https://pypi.org/project/alice-blue/)
But i cant breakthrough what concept to use
Can anyone Suggest a CODE based on provided link to alice-blue data and code please
I tried of placing order on specific time alone but the code runs and fires orders continously
Want a Suggestion with Function and the Order must be placed only once for a certain script
Step 1: Create a cron job at 9:30 AM and Store the Highest Price in DB.
Step 2: Create another cron job for every 15 mins --> Compare LTP > High
Step 3: If the condition is true --> Place an order and store the order ID & instrument
Step 4: Create one more cron job for exit signal --> compare with Target or SL price ---> Place Exit Order

Using concurrent.futures within a for statement

I store QuertyText within a pandas dataframe. Once I've loaded all the queries into I want to conduct an analysis again each query. Currently, I have ~50k to evaluate. So, doing it one by one, will take a long time.
So, I wanted to implement concurrent.futures. How do I take the individual QueryText stored within fullAnalysis as pass it to concurrent.futures and return the output as a variable?
Here is my entire code:
import pandas as pd
import time
import gensim
import sys
import warnings
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import as_completed
fullAnalysis = pd.DataFrame()
def fetch_data(jFile = 'ProcessingDetails.json'):
print("Fetching data...please wait")
#read JSON file for latest dictionary file name
baselineDictionaryFileName = 'Dictionary/Dictionary_05-03-2020.json'
#copy data to pandas dataframe
labelled_data = pd.read_json(baselineDictionaryFileName)
#Add two more columns to get the most similar text and score
labelled_data['SimilarText'] = ''
labelled_data['SimilarityScore'] = float()
print("Data fetched from " + baselineDictionaryFileName + " and there are " + str(labelled_data.shape[0]) + " rows to be evalauted")
return labelled_data
def calculateScore(inputFunc):
warnings.filterwarnings("ignore", category=DeprecationWarning)
model = gensim.models.Word2Vec.load('w2v_model_bigdata')
inp = inputFunc
print(inp)
out = dict()
strEvaluation = inp.split("most_similar ",1)[1]
#while inp != 'quit':
split_inp = inp.split()
try:
if split_inp[0] == 'help':
pass
elif split_inp[0] == 'similarity' and len(split_inp) >= 3:
pass
elif split_inp[0] == 'most_similar' and len(split_inp) >= 2:
for pair in model.most_similar(positive=[split_inp[1]]):
out.update({pair[0]: pair[1]})
except KeyError as ke:
#print(str(ke) + "\n")
inp = input()
return out
def main():
with ThreadPoolExecutor(max_workers=5) as executor:
for i in range(len(fullAnalysis)):
text = fullAnalysis['QueryText'][i]
arg = 'most_similar'+ ' ' + text
#for item in executor.map(calculateScore, arg):
output = executor.map(calculateScore, arg)
return output
if __name__ == "__main__":
fullAnalysis = fetch_data()
results = main()
print(f'results: {results}')
The Python Global Interpreter Lock or GIL allows only one thread to hold control of the Python interpreter. Since your function calculateScore might be cpu-bound and requires the interpreter to execute its byte code, you may be gaining little by using threading. If, on the other hand, it were doing mostly I/O operations, it would be giving up the GIL for most of its running time allowing other threads to run. But that does not seem to be the case here. You probably should be using the ProcessPoolExecutor from concurrent.futures (try it both ways and see):
def main():
with ProcessPoolExecutor(max_workers=None) as executor:
the_futures = {}
for i in range(len(fullAnalysis)):
text = fullAnalysis['QueryText'][i]
arg = 'most_similar'+ ' ' + text
future = executor.submit(calculateScore, arg)
the_futures[future] = i # map future to request
for future in as_completed(the_futures): # results as they become available not necessarily the order of submission
i = the_futures[future] # the original index
result = future.result() # the result
If you omit the max_workers parameter (or specify a value of None) from the ProcessPoolExecutor constructor, the default will be the number of processors you have on your machine (not a bad default). There is no point in specifying a value larger than the number of processors you have.
If you do not need to tie the future back to the original request, then the_futures can just be a list to which But simplest yest in not even to bother to use the as_completed method:
def main():
with ProcessPoolExecutor(max_workers=5) as executor:
the_futures = []
for i in range(len(fullAnalysis)):
text = fullAnalysis['QueryText'][i]
arg = 'most_similar'+ ' ' + text
future = executor.submit(calculateScore, arg)
the_futures.append(future)
# wait for the completion of all the results and return them all:
results = [f.result() for f in the_futures()] # results in creation order
return results
It should be mentioned that code that launches the ProcessPoolExecutor functions should be in a block governed by a if __name__ = '__main__':. If it isn't you will get into a recursive loop with each subprocess launching the ProcessPoolExecutor. But that seems to be the case here. Perhaps you meant to use the ProcessPoolExecutor all along?
Also:
I don't know what the line ...
model = gensim.models.Word2Vec.load('w2v_model_bigdata')
... in function calculateStore does. It may be the one i/o-bound statement. But this appears to be something that does not vary from call to call. If that is the case and model is not being modified in the function, shouldn't this statement be moved out of the function and computed just once? Then this function would clearly run faster (and be clearly cpu-bound).
Also:
The exception block ...
except KeyError as ke:
#print(str(ke) + "\n")
inp = input()
... is puzzling. You are inputting a value that will never be used right before returning. If this is to pause execution, there is no error message being output.
With Booboo assistance, I was able to update code to include ProcessPoolExecutor. Here is my updated code. Overall, processing has been speed up by more than 60%.
I did run into a processing issue and found this topic BrokenPoolProcess that addresses the issue.
output = {}
thePool = {}
def main(labelled_data, dictionaryRevised):
args = sys.argv[1:]
with ProcessPoolExecutor(max_workers=None) as executor:
for i in range(len(labelled_data)):
text = labelled_data['QueryText'][i]
arg = 'most_similar'+ ' '+ text
output = winprocess.submit(
executor, calculateScore, arg
)
thePool[output] = i #original index for future to request
for output in as_completed(thePool): # results as they become available not necessarily the order of submission
i = thePool[output] # the original index
text = labelled_data['QueryText'][i]
result = output.result() # the result
maximumKey = max(result.items(), key=operator.itemgetter(1))[0]
maximumValue = result.get(maximumKey)
labelled_data['SimilarText'][i] = maximumKey
labelled_data['SimilarityScore'][i] = maximumValue
return labelled_data, dictionaryRevised
if __name__ == "__main__":
start = time.perf_counter()
print("Starting to evaluate Query Text for labelling...")
output_Labelled_Data, output_dictionary_revised = preProcessor()
output,dictionary = main(output_Labelled_Data, output_dictionary_revised)
finish = time.perf_counter()
print(f'Finished in {round(finish-start, 2)} second(s)')

Multiprocessing function not writing to file or printing

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!
I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

Optimize output of a script by varying input parameters

I have a written a script that uses the code below and I would like to optimize rsi_high and rsi_low to get the best sharpe_ratio:
#
import numpy
import talib as ta
global rsi_high, rsi_low
rsi_high = 63
rsi_low = 41
def myTradingSystem(DATE, OPEN, HIGH, LOW, CLOSE, VOL, exposure, equity, settings):
''' This system uses trend following techniques to allocate capital into the desired equities'''
nMarkets = CLOSE.shape[1] # SHAPE OF NUMPY ARRAY
result, rsi_pos = numpy.apply_along_axis(rsicalc, axis=0, arr=CLOSE)
pos = numpy.asarray(rsi_pos, dtype=numpy.float64)
return pos, settings
def rsicalc(num):
# print rsi_high
try:
rsival = ta.RSI(numpy.array(num,dtype='f8'),timeperiod=14)
if rsival[14] > rsi_high: pos_rsi = 1
elif rsival[14] < rsi_low: pos_rsi = -1
else: pos_rsi = 0
except:
rsival = 0
pos_rsi = 0
return rsival, pos_rsi
def mySettings():
''' Define your trading system settings here '''
settings = {}
# Futures Contracts
settings['markets'] = ['CASH','F_AD', 'F_BO', 'F_BP', 'F_C', 'F_CC', 'F_CD',
'F_CL', 'F_CT', 'F_DX', 'F_EC', 'F_ED', 'F_ES', 'F_FC', 'F_FV', 'F_GC',
'F_HG', 'F_HO', 'F_JY', 'F_KC', 'F_LB', 'F_LC', 'F_LN', 'F_MD', 'F_MP',
'F_NG', 'F_NQ', 'F_NR', 'F_O', 'F_OJ', 'F_PA', 'F_PL', 'F_RB', 'F_RU',
'F_S', 'F_SB', 'F_SF', 'F_SI', 'F_SM', 'F_TU', 'F_TY', 'F_US', 'F_W',
'F_XX', 'F_YM']
settings['slippage'] = 0.05
settings['budget'] = 1000000
settings['beginInSample'] = '19900101'
settings['endInSample'] = '19931231'
settings['lookback'] = 504
return settings
# Evaluate trading system defined in current file.
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__, plotEquity=False)
sharpe_ratio = results['stats']['sharpe']
I suspect that using something like scipy minimize function would do the trick, but I am having trouble understanding how to package my script so that it can be in a usable form.
I have tried putting everything in a function and then running all the code through a number of loops, each time incrementing values but there must be a more elegant way of doing this.
Apologies for posting all my code but I thought it would help if the responder wanted to reproduce my setup and for anyone who is new to quantiacs to see a real example who is faced with the same issue.
Thanks for your help in advance!

Categories