numpy.where makes code slow - python

I have the following block of code:
def hasCleavage(tags, pair, fragsize):
limit = int(fragsize["mean"] + fragsize["sd"] * 4)
if pair.direction == "F1R2" or pair.direction == "R2F1":
x1 = np.where((tags[pair.chr_r1] >= pair.r1["pos"]) & (tags[pair.chr_r1] <= pair.r1["pos"]+limit))[0]
x2 = np.where((tags[pair.chr_r2] <= pair.r2["pos"]+pair.frside) & (tags[pair.chr_r2] >= pair.r2["pos"]+pair.frside-limit))[0]
elif pair.direction == "F1F2" or pair.direction == "F2F1":
x1 = np.where((tags[pair.chr_r1] >= pair.r1["pos"]) & (tags[pair.chr_r1] <= pair.r1["pos"]+limit))[0]
x2 = np.where((tags[pair.chr_r2] >= pair.r2["pos"]) & (tags[pair.chr_r2] <= pair.r2["pos"]+limit))[0]
elif pair.direction == "R1R2" or pair.direction == "R2R1":
x1 = np.where((tags[pair.chr_r1] <= pair.r1["pos"]+pair.frside) & (tags[pair.chr_r1] >= pair.r1["pos"]+pair.frside-limit))[0]
x2 = np.where((tags[pair.chr_r2] <= pair.r2["pos"]+pair.frside) & (tags[pair.chr_r2] >= pair.r2["pos"]+pair.frside-limit))[0]
else: #F2R1 or R1F2
x1 = np.where((tags[pair.chr_r2] >= pair.r2["pos"]) & (tags[pair.chr_r2] <= pair.r2["pos"]+limit))[0]
x2 = np.where((tags[pair.chr_r1] <= pair.r1["pos"]+pair.frside) & (tags[pair.chr_r1] >= pair.r1["pos"]+pair.frside-limit))[0]
if x1.size > 0 and x2.size > 0:
return True
else:
return False
My script takes 16 minutes to finish. It calls hasCleavage millions of times, one time per row reading a file. When I add above the variable limit a return True (preventing calling np.where), the script takes 5 minutes.
tags is a dictionary containing numpy arrays with ascending numbers.
Do you have any suggestions to improve performance?
EDIT:
tags = {'JH584302.1': array([ 351, 1408, 2185, 2378, 2740, 2904, 3364, 3657,
4240, 5324, 5966, 5977, 5986, 6488, 6531, 6847,
6961, 6973, 6991, 7107, 7383, 7395, 7557, 7569,
9178, 10077, 10456, 10471, 11271, 11466, 12311, 12441,
12598, 13051, 13123, 13859, 14167, 14672, 15156, 15252,
15268, 15273, 15694, 15786, 16361, 17073, 17293, 17454])
}
fragsize = {'sd': 130.29407997430428, 'mean': 247.56636}
And pair is an object of a custom class
<__main__.Pair object at 0x17129ad0>

Related

Pivot function results differ from TradingView

I'm using this code to calculate pivot points.
def pivots_low(osc, LBR, LBL):
pivots = []
for i in range(len(osc) - LBR):
pivots.append(0)
pivot = True
if i > LBL:
for j in range(1, LBR + 1):
if osc[i] >= osc[i + j]:
pivot = False
for j in range(1, LBL + 1):
if osc[i] > osc[i - j]:
pivot = False
if pivot is True:
pivots[len(pivots) - 1] = osc[i]
for i in range(LBR):
pivots.append(0)
return pivots
This returns an array with 0's where there's no pivots and the value of the pivot if there is one.
When Comparing the results to TradingView (downloaded csv with pivot points), the only time it matches exactly is when lookback left and right are both 5. Otherwise it deviates in the number of total pivots and the location of some.
But using this code to calculate pivot highs:
def pivots_high(osc, LBR, LBL):
pivots = []
for i in range(len(osc)-LBR):
pivots.append(0)
pivot = True
if i > LBL:
for j in range(1,LBL + 1):
if osc[i] < osc[i-j]:
pivot = False
for j in range(1,LBR + 1):
if osc[i] <= osc[i+j]:
pivot = False
if pivot is True:
pivots[len(pivots)-1] = osc[i]
for i in range(LBR):
pivots.append(0)
return pivots
the results are perfect regardless of lookback values. But the code is almost exactly the same besides comparison.
What is going wrong here? This is day 3 of having this problem and I just cant fix it
To Reproduce:
Load Data:
Full_Data = pd.read_csv(file)
use this simple function to check matches between calculated pivots and TradingView pivots.
def match_pivs(data, pivs_h, pivs_l): //Data is a DataFrame loaded from tradingview csv
global lblh
global lbrh
global lbll
global lbrl
start = lbrh
if lbrl > lbrh:
start = lbrl
match_h = 0
tot_hd = 0
tot_hp = 0
match_l = 0
tot_ld = 0
tot_lp = 0
for i in range(start, len(data)):
if data['PivHigh'][i] != 0 and pivs_h[i-lbrh] != 0:
match_h += 1
if data['PivLow'][i] != 0 and pivs_l[i-lbrl] != 0:
match_l += 1
if data['PivHigh'][i] != 0:
tot_hd += 1
if data['PivLow'][i] != 0:
tot_ld += 1
if pivs_h[i] != 0:
tot_hp += 1
if pivs_l[i] != 0:
tot_lp += 1
print('PivsLow ' + str(tot_lp))
print('DataLows ' + str(tot_ld))
print('MatchesL ' + str(match_l))
print('PivsHigh ' + str(tot_hp))
print('DataHighs ' + str(tot_hd))
print('MatchesH ' + str(match_h))
and to get csv from TradingView:
//#version=5
indicator("Data Script", overlay=true, max_labels_count=500)
leftLenL = input.int(title="Pivot Low", defval=10, minval=1, inline="Pivot Low", group=lengthGroupTitle)
rightLenL = input.int(title="/", defval=10, minval=1, inline="Pivot Low", group=lengthGroupTitle)
leftLenH = input.int(title="Pivot High", defval=10, minval=1, inline="Pivot High", group=lengthGroupTitle)
rightLenH = input.int(title="/", defval=10, minval=1, inline="Pivot High", group=lengthGroupTitle)
ph = ta.pivothigh(leftLenH, rightLenH)
pl = ta.pivotlow(leftLenL, rightLenL)
if not na(ph)
plth := ph
else
plth := 0.0
if not na(pl)
pltl := pl
else
pltl := 0.0
plot(plth, 'PivHigh')
plot(pltl, 'PivLow')
then just download csv with this script loaded.
Run program with these three lines:
pl = pivots_low(Full_Data['low'], lbll, lbrl)
ph = pivots_high(Full_Data['high'], lbrh, lblh)
match_pivs(Full_Data, ph, pl)
Finally found a way.
I still have no idea why that code does not work but I've made a different way that seems to be doing the job 100% to tradingview data.
def checkhl(data_back, data_forward, hl):
if hl == 'high' or hl == 'High':
ref = data_back[len(data_back)-1]
for i in range(len(data_back)-1):
if ref < data_back[i]:
return 0
for i in range(len(data_forward)):
if ref <= data_forward[i]:
return 0
return 1
if hl == 'low' or hl == 'Low':
ref = data_back[len(data_back)-1]
for i in range(len(data_back)-1):
if ref > data_back[i]:
return 0
for i in range(len(data_forward)):
if ref >= data_forward[i]:
return 0
return 1
def pivot(osc, LBL, LBR, highlow)
left = []
right = []
for i in range(len(osc)):
pivots.append(0.0)
if i < LBL + 1:
left.append(osc[i])
if i > LBL:
right.append(osc[i])
if i > LBL + LBR:
left.append(right[0])
left.pop(0)
right.pop(0)
if checkhl(left, right, highlow):
pivots[i - LBR] = osc[i - LBR]
return pivots
then just do:
pivots_low = pivot(data, lbl, lbr, 'low')
pivots_high = pivot(data, lbl, lbr, 'high')
All the pivots will be in the actual position that they occur, not lbr bars after, otherwise the value will be 0.0
I'm not sure if this is efficient or not but it seems to work.

append answer from pcb in while loop

upper_bound = 0x1200
lower_bound = 0x0
msg_to_send = rcvD.all_strck.MxFEAxiRegMsg.copy()
modem_snr_list = []
while True:
modem_snr_list.clear()
running_value = (upper_bound + lower_bound) // 2
msg_to_send["data"] = running_value
# rcvD.send_the_message("MxFEAxiRegMsg", rcvD.all_strck.MxFEAxiRegMsg)
rcvD.send_the_message("MxFEAxiRegMsg", msg_to_send)
time.sleep(2)
fm.rcv_the_packets(wait_for_optcode=38)
fm.rcv_the_packets(wait_for_optcode=90)
modem_sync = rcvD.all_strck.modemParamMsg["modemSync"]
modem_freq = rcvD.all_strck.modemParamMsg["modemEstFreq"]
modem_snr = rcvD.all_strck.modemParamMsg["modemSnr"]
modem_snr_list.append(modem_snr)
average_snr = sum(modem_snr_list) / len(modem_snr_list)
print(f"modem snr list is {modem_snr_list}\n modem snr average is {average_snr}")
sent_pack = rcvD.all_strck.dataStatusRepMsg["sentMsgCnt"]
receive_pack = rcvD.all_strck.dataStatusRepMsg["rcvMsgCnt"]
print(
f"msg_sent: {msg_to_send} \n running_value: {running_value}\n upper_bound: {upper_bound}\n lower_bound: {lower_bound}\n modem_snr: {modem_snr}\n modem_freq: {modem_freq}\n modem_sync: {modem_sync}\n "
f"sent_packets: {sent_pack}\n receive_packets: {receive_pack}")
if -1 < modem_snr < 1 and modem_sync and modem_freq < 1000 and sent_pack == receive_pack:
break
if modem_snr < -1:
lower_bound = running_value + 1
if modem_snr > 1:
upper_bound = running_value - 1
if upper_bound < lower_bound:
print("FAIL")
exit(1)
Hi all, I need to catch the answer from PCB(opcode 38) because of timing I prefer to send a data(msg_sent) than to do some list that appends received data, calculate the average, and after all this proceed to the if statements(instead of modem_snr will be average_snr), after that if I need to back to while to fix data, I will need to clear the previous list and repeat the circle.

Filtering and saving subset of pandas

I have a function that does the following:
Inserting class values 1,2,3 based on timestamps. This work as inspected and in the first iteration of the first for-loop i get the following class distribution:
mapping: {'Seizure': 1, 'Preictal': 2, 'Interictal': 3}
value counts:
3.0 3150000
2.0 450000
1.0 28000
Name: class, dtype:
So i have this number of rows for each class.
However in the second forloop i iterate through the same list of timestamps and want to subset the data between the timestamps and include some conditions based on the classes i inserted in first forloop.
This is the result of the same timestamps e.g. first iteration:
len sz: 28000
len prei: 450000
len pre int: 29700000
logging
len post int: 1485499
How the * does preint and post int (interictal class) get this high of a count? it doesn't at all correspond somewhat to the number interictal in the first?
here my function.
def insert_class_col(dataframe, sz_info_list, date_converter, save_filename, save_path, file_sample_rate, file_channel):
print(f"sz_info_list: {sz_info_list}")
if "class" not in dataframe.columns:
dataframe.insert(0, "class", np.nan)
file_channel.extend(['timestamp', 'class'])
dataframe = dataframe[file_channel]
# Insert class attributes to ensure that seizure, preictal, interictal does not overlap.
for index, container in enumerate(sz_info_list):
delay = container.delay * 1000
duration = container.duration * 1000
sz_start = date_converter(container.time_emu) + delay
sz_end = sz_start + duration
print(f"sz_start index = {sz_start}")
print(f"sz_end: {sz_end}")
preictal_start = sz_start - (15 * 60 * 1000)
interictal_start = sz_start - (1 * 60 * 60 * 1000)
interictal_end = sz_end + (1 * 60 * 60 * 1000)
dataframe['timestamp'] = pd.to_numeric(dataframe['timestamp'])
# hvis data er sezure tag seizure
# hvis data er preictal tag preictal/interictal, men ikke indenfor seizure data.
dataframe.loc[(dataframe['timestamp'] >= sz_start) & (dataframe['timestamp'] < sz_end), "class"] = class_mapping['Seizure']
dataframe.loc[(dataframe['class'] != class_mapping['Seizure']) & (dataframe['timestamp'] >= preictal_start) & (dataframe['timestamp'] < sz_start), "class"] = class_mapping['Preictal']
dataframe.loc[(dataframe['class'] != class_mapping['Seizure']) & (dataframe['class'] != class_mapping['Preictal']) & (dataframe['timestamp'] >= interictal_start) & (dataframe['timestamp'] < interictal_end), "class"] = class_mapping['Interictal']
print(f"mapping: {class_mapping} \n value counts: \n{dataframe['class'].value_counts()}")
print(f"Begginging current number of class in df {dataframe['class'].value_counts()}")
# Saving to csv
for index, container in enumerate(sz_info_list):
delay = container.delay * 1000
duration = container.duration * 1000
sz_start = date_converter(container.time_emu) + delay
sz_end = sz_start + duration
print(f"sz_start index = {sz_start}")
print(f"sz_end: {sz_end}")
preictal_start = sz_start - (15 * 60 * 1000)
interictal_start = sz_start - (1 * 60 * 60 * 1000)
interictal_end = sz_end + (1 * 60 * 60 * 1000)
dataframe['timestamp'] = pd.to_numeric(dataframe['timestamp'])
#INSERTING SEIZURE CLASS
sz_df = dataframe[(dataframe['timestamp'] >= sz_start) & (dataframe['timestamp'] < sz_end)].copy()
print(f"len sz: {len(sz_df)}")
#df_save_compress(f"Seizure_{index}_{save_filename}", save_path + "/Seizure", sz_df)
#logging_info_txt(f"Seizure_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#INSERTING PREICTAL
prei_df = dataframe[(dataframe['timestamp'] >= preictal_start) & (dataframe['timestamp'] < sz_start) & (dataframe['class'] != class_mapping["Seizure"])].copy()
print(f"len prei: {len(prei_df)}")
#df_save_compress(f"Preictal_{index}_{save_filename}", save_path + "/Preictal", prei_df)
#logging_info_txt(f"Preictal_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#INSERTING INTERICTAL
pre_int_df = dataframe[(dataframe['timestamp'] >= interictal_start) & (dataframe['timestamp'] < preictal_start) & (dataframe['class'] != class_mapping["Seizure"]) | (dataframe['class'] != class_mapping["Preictal"])].copy()
print(f"len pre int: {len(pre_int_df)}")
#df_save_compress(f"PreInt_{index}_{save_filename}", save_path + "/Interictal", pre_int_df)
logging_info_txt(f"PreInt_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
post_int_df = dataframe[(dataframe['timestamp'] >= sz_end) & (dataframe['timestamp'] < interictal_end) & (dataframe['class'] != class_mapping["Seizure"]) & (dataframe['class'] != class_mapping["Preictal"])].copy()
print(f"len post int: {len(post_int_df)}")
#df_save_compress(f"PostInt_{index}_{save_filename}", save_path + "/Interictal", post_int_df)
logging_info_txt(f"PostInt_{index}_{save_filename}", save_path, file_sample_rate, file_channel)
#print(f"after = len df: {len(dataframe)} values class: \n {dataframe['class'].value_counts()}")
# clean up
del pre_int_df, post_int_df, sz_df, prei_df
gc.collect()
Notice that preint which is interictal is 29700000 while printing the classes i should be lower than 3150000.
Any ideas of this pandas behavior?
#richardec answered the question see comments.

Inverse line graph year count matplotlib pandas python

I'm trying to create a lineplot of the count of three different groups i.e. desktop, mobile & tablet with the x axis having the years of 2014, 2015 and 2016 but I am getting the error
my code is currently:
#year-by-year change
desktop14 = od.loc[(od.Account_Year_Week >= 201401) & (od.Account_Year_Week <= 201453) & (od.online_device_type_detail == "DESKTOP"), "Gross_Demand_Pre_Credit"]
desktop15 = od.loc[(od.Account_Year_Week >= 201501) & (od.Account_Year_Week <= 201553) & (od.online_device_type_detail == "DESKTOP"), "Gross_Demand_Pre_Credit"]
desktop16 = od.loc[(od.Account_Year_Week >= 201601) & (od.Account_Year_Week <= 201653) & (od.online_device_type_detail == "DESKTOP"), "Gross_Demand_Pre_Credit"]
mobile14 = od.loc[(od.Account_Year_Week >= 201401) & (od.Account_Year_Week <= 201453) & (od.online_device_type_detail == "MOBILE"), "Gross_Demand_Pre_Credit"]
mobile15 = od.loc[(od.Account_Year_Week >= 201501) & (od.Account_Year_Week <= 201553) & (od.online_device_type_detail == "MOBILE"), "Gross_Demand_Pre_Credit"]
mobile16 = od.loc[(od.Account_Year_Week >= 201601) & (od.Account_Year_Week <= 201653) & (od.online_device_type_detail == "MOBILE"), "Gross_Demand_Pre_Credit"]
tablet14 = od.loc[(od.Account_Year_Week >= 201401) & (od.Account_Year_Week <= 201453) & (od.online_device_type_detail == "TABLET"), "Gross_Demand_Pre_Credit"]
tablet15 = od.loc[(od.Account_Year_Week >= 201501) & (od.Account_Year_Week <= 201553) & (od.online_device_type_detail == "TABLET"), "Gross_Demand_Pre_Credit"]
tablet16 = od.loc[(od.Account_Year_Week >= 201601) & (od.Account_Year_Week <= 201653) & (od.online_device_type_detail == "TABLET"), "Gross_Demand_Pre_Credit"]
devicedata = [["Desktop", desktop14.count(), desktop15.count(), desktop16.count()], ["Mobile", mobile14.count(), mobile15.count(), mobile16.count()], ["Tablet", tablet14.count(), tablet15.count(), tablet16.count()]]
df = pd.DataFrame(devicedata, columns=["Device", "2014", "2015", "2016"]).set_index("Device")
plt.show()
I want to make each of the lines the Device types and the x axis showing the change in year. How do I do this - (essentially reversing the axis).
any help is greatly appreciated
Just do
df.transpose().plot()
Result will be something like this:

TypeError: 'instancemethod' object is unsubscriptable in python

I'm working on my python script to get the list of buttons. I have got a problem with the code. When I pressed on the down arrow button of the keyboard, I get an error: TypeError: 'instancemethod' object is unsubscriptable
The error are jumping on this line:
for channel in channels[page_no*7:page_no*7+7]:
Here is the full code:
#get actioncodes from keyboard.xml
ACTION_MOVE_LEFT = 1
ACTION_MOVE_RIGHT = 2
ACTION_MOVE_UP = 3
ACTION_MOVE_DOWN = 4
CHANNELS_PER_PAGE = 7
class MyClass(xbmcgui.WindowXML):
def __new__(cls):
return super(MyClass, cls).__new__(cls, 'script-tvguide-mainmenu.xml', ADDON.getAddonInfo('path'))
def __init__(self):
self._timel = []
self.thread = None
self.buttonList=[]
self.last_page = False
def All_Channels(self):
yellow_flag = True
global __killthread__
self.getControl(4202).setLabel("0%")
try:
# DOWNLOAD THE XML SOURCE HERE
url = ADDON.getSetting('allchannel.url')
data = ''
response = urllib2.urlopen(url)
meta = response.info()
file_size = int(meta.getheaders("Content-Length")[0])
file_size_dl = 0
block_size = 2048
while True and not __killthread__:
mbuffer = response.read(block_size)
if not mbuffer:
break
file_size_dl += len(mbuffer)
data += mbuffer
state = int(file_size_dl * 10.0 / file_size)
self.getControl(4202).setLabel(str(state) + '%')
else:
if __killthread__:
raise AbortDownload('downloading')
del response
# CREATE DATABASE
profilePath = xbmc.translatePath(os.path.join('special://userdata/addon_data/script.tvguide', 'source.db'))
if os.path.exists(profilePath):
os.remove(profilePath)
con = database.connect(profilePath)
cur = con.cursor()
cur.execute('CREATE TABLE programs(channel TEXT, title TEXT, start_date TIMESTAMP, stop_date TIMESTAMP, description TEXT)')
con.commit()
# Get the loaded data
total_count = data.count('programme')/2
tv_elem = ElementTree.parse(StringIO.StringIO(data)).getroot()
cur = con.cursor()
count = 1
channels = OrderedDict()
for channel in tv_elem.findall('channel'):
channel_name = channel.find('display-name').text
for program in channel.findall('programme'):
if __killthread__:
raise AbortDownload('filling')
title = program.find('title').text
start_time = program.get("start")
stop_time = program.get("stop")
cur.execute("INSERT INTO programs(channel, title, start_date, stop_date)" + " VALUES(?, ?, ?, ?)", [channel_name, title, start_time, stop_time])
status = 10 + int(float(count)/float(total_count) * 90.0)
self.getControl(4202).setLabel(str(status) + '%')
xbmc.sleep(10)
count += 1
con.commit()
print 'Channels have been successfully stored into the database!'
self.getControl(4202).setLabel('100%')
xbmc.sleep(3000)
# Set the date and time row
current_time = time.time() # now (in seconds)
half_hour = current_time + 60*30 # now + 30 minutes
one_hour = current_time + 60*60 # now + 60 minutes
for t in [current_time,half_hour,one_hour]:
if (0 <= datetime.datetime.now().minute <= 29):
self.getControl(4204).setLabel(time.strftime("%I").lstrip('0') + ':00' + time.strftime("%p"))
self.getControl(4205).setLabel(time.strftime("%I").lstrip('0') + ':30' + time.strftime("%p"))
self.getControl(4206).setLabel(time.strftime("%I" + ":00%p",time.localtime(t)).lstrip("0"))
else:
self.getControl(4204).setLabel(time.strftime("%I").lstrip('0') + ':30' + time.strftime("%p"))
self.getControl(4205).setLabel(time.strftime("%I" + ":00%p",time.localtime(t)).lstrip("0"))
self.getControl(4206).setLabel(time.strftime("%I" + ":30%p",time.localtime(t)).lstrip("0"))
#Pull the data from the database
channelList = list()
database_path = xbmc.translatePath(os.path.join('special://userdata/addon_data/script.tvguide', 'source.db'))
if os.path.exists(database_path):
#get the channels list
cur.execute('SELECT channel FROM programs WHERE channel GROUP BY channel')
for row in cur:
channels = row[0].encode('ascii')
channelList.append(channels)
# set the channels text
for index in range(0, CHANNELS_PER_PAGE):
channel = channelList[index]
channel_index = index
if channel is not None:
pass
#self.getControl(4207 + index).setLabel(channel)
#self.button.setLabel(channel, 'font14', '0xFFFFFFFF', '0xFFFF3300', '0xFF000000')
#get the programs list
cur.execute('SELECT channel, title, start_date, stop_date FROM programs WHERE channel=?', [channel])
programList = list()
programs = cur.fetchall()
start_pos = 368 # indent for first program
for row in programs:
program = row[1].encode('ascii'), str(row[2]), str(row[3])
title = row[1].encode('ascii')
program_start_date = str(row[2])
program_end_date = str(row[3])
#convert the date formats into minutes
minutes_start = self.parseDateTimeToMinutesSinceEpoch(program_start_date)
minutes_end = self.parseDateTimeToMinutesSinceEpoch(program_end_date)
minutes_length = minutes_end - minutes_start
program_length = minutes_length
program_notification = program
programs_top_backup = 0
programs_top = 315
program_height = 34.5
program_gap = 2.5
position_start = start_pos
position_top = programs_top + channel_index * (program_height + program_gap)
if 10 <= program_length < 60:
program_width = 342.5
elif 60 <= program_length < 90:
program_width = 690
elif 90 <= program_length < 105:
program_width = 1050
elif 105 <= program_length < 120:
program_width = 1400
elif 120 <= program_length < 150:
program_width = 1750
elif 150 <= program_length < 180:
program_width = 2100
elif 180 <= program_length < 210:
program_width = 2450
elif 210 <= program_length < 240:
program_width = 2800
elif 240 <= program_length < 270:
program_width = 3150
elif 270 <= program_length < 300:
program_width = 3500
elif 300 <= program_length < 330:
program_width = 3850
elif 330 <= program_length < 360:
program_width = 4200
elif 360 <= program_length < 390:
program_width = 3250
elif 390 <= program_length < 420:
program_width = 4550
elif 420 <= program_length < 450:
program_width = 4900
elif 450 <= program_length < 480:
program_width = 5250
start_pos += program_width + 2 * program_gap
if program_width > 1:
if yellow_flag:
if program_notification:
button_nofocus = 'changelang_yellow.png'
button_focus = 'channels_bar1.png'
else:
button_nofocus = 'changelang_yellow.png'
button_focus = 'channels_bar1.png'
yellow_flag = False
text_color = '0xFF000000'
else:
if program_notification:
button_nofocus = 'channels_bar1.png'
button_focus = 'channels_yellow.png'
else:
button_nofocus = 'channels_bar1.png'
button_focus = 'channels_yellow.png'
text_color = '0xFFFFFFFF'
if program_width < 1:
program_title = ''
else:
program_title = '[B]' + title + '[/B]'
def showepg(self, channels, page_no):
self.last_page = False
self.removeControls(self.buttonList)
self.buttonList = []
page_no = 0
self.button = [[0 for x in xrange(20)] for x in xrange(20)]
self.pdata = [[dict() for x in xrange(20)] for x in xrange(20)]
row = 0
for channel in channels[page_no*7:page_no*7+7]:
#get the list of buttons in array
print channel
self.pdata[row][0]['url'] = channel['url']
self.pdata[row][0]['cname'] = xbmcgui.ControlLabel(0, self.startPos + 17 + (row * row_height), 100, row_height,channel['callSign'])
self.pdata[row][0]['cicon'] = channel['thumbnail'].replace('\\','')
self.pdata[row][0]['cimage'] = xbmcgui.ControlImage(100, self.startPos + (row * row_height), logo_width, logo_width,self.pdata[row][0]['cicon'])
self.buttonList.append(self.pdata[row][0]['cimage'])
self.buttonList.append(self.pdata[row][0]['cname'])
events = channel['events']
col = 0
coffset = 0
for event in events:
try:
self.pdata[row][col]['desc'] = '%s - %s\n%s' % (event['startTimeDisplay'], event['endTimeDisplay'], str(event['program']['description']))
#except:
self.pdata[row][col]['desc'] = ""
self.pdata[row][col]['duration'] = str(event['duration'])
self.pdata[row][col]['eptitle'] = '%s - %s : %s' % (event['startTimeDisplay'], event['endTimeDisplay'], event['eptitle'])
cwidth = int((float(event['percentWidth']) / 100) * progs_width)
self.button[row][col] = xbmcgui.ControlButton(poffset + coffset, self.startPos + (row * row_height), cwidth, row_height, event['program']['title'])
self.buttonList.append(self.button[row][col])
coffset = coffset + cwidth
col = col + 1
row = row + 1
if row == MAXIMUMROW:
break
self.addControls(self.buttonList)
if row == 0:
self.current_page = 0
self.showepg(channels, 0) # hack to display first page after last page - could be problem for empty epg
return
elif row < MAXIMUMROW:
self.last_page = True
maxrow = row
for row in range(maxrow + 1):
for col in range(20):
if self.button[row][col] == 0:
break
else:
if row < maxrow-1:
self.button[row][col].controlDown(self.button[row+1][0])
if row == maxrow-1:
if maxrow == MAXIMUMROW:
self.button[row][col].controlDown(self.button[row][col])
if col > 0:
self.button[row][col].controlLeft(self.button[row][col-1])
self.button[row][col-1].controlRight(self.button[row][col])
if row > 0:
self.button[row][col].controlUp(self.button[row-1][0])
if row == 0:
self.button[row][col].controlUp(self.button[row][col])
self.topRow = True
self.bottomRow = False
control = self.button[0][0]
self.setFocus(control)
self.updateEpg(control)
def onAction(self, action):
self.current_page = 0
self.last_page = False
if action == ACTION_MOVE_DOWN:
if allchannels_enabled:
if self.last_page:
self.current_page = 0
else:
self.current_page += 1
self.showepg(self.All_Channels, self.current_page)
return
I don't understand what the error are means. I'm trying to defined the channels to get the button control using the self.All_Channels when pressed on the down arrow button.
Do anyone know why I get an error and how to fix this?
The error means you're using [] on something that doesn't support it, this can be reproduced with (for example): None[0].
Now, the specific problem in your code appears to be:
self.showepg(self.All_Channels, self.current_page)
That this does, is pass the function self.All_Channels to the self.showepg function. this is why you're seeing instancemethod in the error, what you probably want to do, is to add parenthesis here:
self.showepg(self.All_Channels(), self.current_page)

Categories