MyHDL Signals inside functions not showing up in VCD - python

Should I be able to see these in the generated VCD file?
#always(clk.posedge)
def MentorCluster():
j = Signal(0)
mentorq0, mentorq1, mentorq2, mentorq3 = [[Signal(0) for j in range(10)] for i in range(4)]
I can see all the signals I created at the top level, but not the ones local to the function
Here is the code I used to generate the VCD:
def simulate(timesteps):
traceSignals.timescale = "1ps"
tb = traceSignals(avaca)
sim = Simulation(tb)
sim.run(timesteps)
sim.quit()
#simulate for 2000 ticks (picoseconds) -- very ambitious to do all this in 2ns!
simulate(2000)

Signals created inside an `always’ will not only not show up in the .vcd, but also won’t work.
Here is a small test program to try this.
'''
delay, StopSimulation)
#block
def f1(clk, sigin, sigout):
# this is the place to declare Signals and ListOfSignals
sigind1 = Signal(intbv(0)[4:])
mentorq0, mentorq1, mentorq2, mentorq3 = [[Signal(bool(0)) for j in range(10)] for i in range(4)]
#always_seq(clk.posedge, reset=None)
def f1s():
# declaring Signals and ListOfSignals won't work
# sigind1 = Signal(intbv(0)[4:])
# mentorq0, mentorq1, mentorq2, mentorq3 = [[Signal(bool(0)) for j in range(10)] for i in range(4)]
sigind1.next = sigin
mentorq0[0].next = sigind1[0]
mentorq1[0].next = sigind1[1]
mentorq2[0].next = sigind1[2]
mentorq3[0].next = sigind1[3]
for i in range(1, 10):
mentorq0[i].next = mentorq0[i - 1]
mentorq1[i].next = mentorq1[i - 1]
mentorq2[i].next = mentorq2[i - 1]
mentorq3[i].next = mentorq3[i - 1]
sigout.next[0] = mentorq0[9]
sigout.next[1] = mentorq1[9]
sigout.next[2] = mentorq2[9]
sigout.next[3] = mentorq3[9]
return f1s
if __name__ == '__main__':
import random
random.seed = 'We want repeatable randomness'
#block
def tb_f1():
clk = Signal(bool(0))
sigin = Signal(intbv(0)[4:])
sigout = Signal(intbv(0)[4:])
tCK = 10
dut = f1(clk, sigin, sigout)
#instance
def genclk():
while True:
clk.next = not clk
yield delay(int(tCK // 2))
#instance
def stimulus():
yield delay(int(tCK * 3.5))
for __ in range(10):
sigin.next = random.randint(1, 15)
yield delay(tCK)
yield delay(tCK * 20)
raise StopSimulation
return instances()
# finally
dft = tb_f1()
dft.config_sim(trace=True)
dft.run_sim()
When we activate the Signals inside the always block sigout will remain 0

Related

Threading and Matplotlib

I have created an application which basically generate graphs and displays them. However due to some processes taking a longer time i had created a loading bar which requires the use of threading.
Everytime i run the application the first time, the subsequent time displaying a heatgraph causes "RuntimeError: main thread is not in main loop"
Without the use of threading, it would work perfectly fine. I would like to thread and still continuously use graphs.
Sorry for the messy codes.
class App(customtkinter.CTk):
def generate_heatmap(self):
print('Generate HeatMap')
brightness = []
motion = []
for k in figs_b.keys():
l = figs_b[k][1]
brightness.append(sum(l)/len(l))
for s in figs_m.keys():
print(s)
l = figs_m[s][1]
motion.append(max(l))
data = pd.read_csv(filedialog.askopenfilename())
print(data)
data.insert(2,'Color',avg_c)
data.insert(3,'Motion',motion)
data.insert(4,'Brightness',brightness)
sns.heatmap(data.corr(), annot=True, cmap='viridis', vmin=-1, vmax=1)
plt.show()
def loading(self):
if(self.t.is_alive()):
print('EXECUTE LOAD')
self.main_frame.grid_forget()
self.loading_frame.grid(row=0,column=1,rowspan=4,columnspan=4,sticky="nsew")
self.loading_frame.grid_propagate(False)
self.loading_lbl.pack_propagate(False)
extract_image_from_gif()
self.play_gif()
self.update()
print('RESET LOAD')
self.loading_frame.grid_forget()
self.main_frame.grid(row=0,column=1,rowspan=4,columnspan=4,sticky="nsew")
self.update
def play_gif(self):
global countx, cur_img
try:
print('in')
countx += 2
cur_img = customtkinter.CTkImage(lst_image[countx],size=(800,200))
self.loading_lbl.configure(image = cur_img)
self.update()
self.after(gif_duration,self.play_gif)
except Exception as e:
#print(e)
if(self.t.is_alive()):
countx=0
self.after(gif_duration, self.play_gif)
else:
return
def openfile(self,force=''):
name = force
decision = ''
if force == '':
decision = filedialog.askopenfilename().split('/')
name = decision[len(decision)-1].split('.')[0]
if name == '':
return
self.btn_list.append(customtkinter.CTkButton(self.btn_frame,text=name,command=lambda: self.change_analysis(name)))
for x,btn in enumerate(self.btn_list):
btn.grid(row=x+1,column=0,padx=10,pady=10)
print('*'*20)
self.t = th.Thread(target=self.getGraph, args=(decision,name,))
self.t.start()
self.loading()
self.t.join()
self.change_analysis(name)
print('*'*20)
def getGraph(self,decision,name):
if os.path.exists(f'Frames/{name}') and decision != '':
return
elif(decision !=''):
frame_extraction('\\'.join(decision),name)
red = []
green = []
blue = []
files = os.listdir(f'Frames/{name}')
sorted_files = [file.replace('f_','') for file in files]
sorted_files = sorted(sorted_files, key=lambda x: int(os.path.splitext(x)[0]))
for frame in sorted_files:
rgb = color_extraction(f'Frames/{name}/f_{frame}')
r,g,b = rgb[0]
red.append(r)
green.append(g)
blue.append(b)
#print(f'Frame: {frame} RGB:{rgb[0]}')
frames = [(int(os.path.splitext(x)[0]))*20 for x in sorted_files]
figs_c[name] = generate_graph(red,green,blue,frames)
if(decision != ''):
figs_m[name] = motion_analysis(name)
figs_b[name] = brightness_graph(name)
print(figs_b[name])
self.save_data()
avg_r = round(sum(red) / len(red))
avg_g = round(sum(green) / len(green))
avg_b = round(sum(blue) / len(blue))
avg_c.append(rgbtoint32([avg_r,avg_g,avg_b])

Is there anyway these 2 almost similar functions can be squished into 1 general function?

I have these 2 functions that are really similar except for the different format of log it will receive and return. One look and return 4 values when the other return 3.
Is there any way I can make 1 general function for these 2? Thank you
> - Borrow book: B#<day>#<Student Name>#<Book name>#<days borrowed for>
> - Return book: R#<day>#<Student Name>#<Book name>
def read_borrow_log(log):
borrow_day = []
borrow_student = []
borrow_book = []
borrow_duration = []
for line in log:
hash_func = line.find("#")
hash_day = line.find("#", hash_func+1)
hash_student = line.find("#", hash_day+1)
hash_book = line.find("#", hash_student+1)
hash_duration = line.find("#", hash_book+1)
borrow_day.append(int(line[(hash_func+1):(hash_day)]))
borrow_student.append(line[(hash_day+1):(hash_student)])
borrow_book.append(line[(hash_student+1):(hash_duration)])
borrow_duration.append(line[(hash_duration+1):])
return borrow_day, borrow_student, borrow_book, borrow_duration
def read_return_log(log):
return_day = []
return_student = []
return_book = []
for line in log:
hash_func = line.find("#")
hash_day = line.find("#", hash_func+1)
hash_student = line.find("#", hash_day+1)
return_day.append(int(line[(hash_func+1):(hash_day)]))
return_student.append(line[(hash_day+1):(hash_student)])
return_book.append(line[(hash_student+1):])
return return_day, return_student, return_book
def main():
borrow_day, borrow_student, borrow_book, borrow_duration = read_borrow_log(borrow_log)
return_day, return_student, return_book = read_return_log(return_log)
Try using python's built-in string split:
def extract_log_parts(log):
recs = []
for line in log:
recs.append(line.split('#'))
# we want the record *columns* -- transpose the table
return tuple(map(list, zip(*recs)))
one thing you might do is to make the 'extra' work done only when a certain optional parameter is passed in as shown:
def read_borrow_log(log,borrow_log=True):
borrow_day = []
borrow_student = []
borrow_book = []
if borrow_log is True:
borrow_duration = []
for line in log:
hash_func = line.find("#")
hash_day = line.find("#", hash_func + 1)
hash_student = line.find("#", hash_day + 1)
if borrow_log is True:
hash_book = line.find("#", hash_student + 1)
hash_duration = line.find("#", hash_book + 1)
borrow_day.append(int(line[(hash_func + 1):(hash_day)]))
borrow_student.append(line[(hash_day + 1):(hash_student)])
borrow_book.append(line[(hash_student + 1):(hash_duration)])
if borrow_log is True:
borrow_duration.append(line[(hash_duration + 1):])
if borrow_log is True:
return borrow_day, borrow_student, borrow_book, borrow_duration
else:
return borrow_day, borrow_student, borrow_book
def main():
borrow_day, borrow_student, borrow_book, borrow_duration = read_borrow_log(borrow_log)
return_day, return_student, return_book = read_borrow_log(return_log,borrow_log=False)
however you might want to rethink the naming convention used since this function will now do more than one thing, which is bad for documentation purposes (and is generally a bad practice to have functions do more than one thing, bad enough that i should downvote my own answer if i can)

Python multiprocessing multiple iterations

I am trying to use multiprocessing to speed up my data processing. I am working on a machine with 6 Cores, so I want to iterate through a table of 12 million rows, and for each of these rows I iterate through several time steps doing a calculation (executing a function).
This line I would like to split up that it runs in parallel on different cores:
test = [rowiteration(i, output, ini_cols, cols) for i in a] # this should run in parallel
I tried something with
from multiprocessing import Pool
but I did not manage to pass the arguments of the function and the iterator.
I would appreciate any idea. I am new to Python.
This is what i have:
import pyreadr
import pandas as pd
import numpy as np
import time
from datetime import timedelta
import functools
from pathlib import Path
def read_data():
current_path = os.getcwd()
myfile = os.path.join(str(Path(current_path).parents[0]), 'dummy.RData')
result = pyreadr.read_r(myfile)
pc = result["pc"]
u = result["u"]
return pc, u
# add one column per time
def prepare_output_structure(pc):
ini_cols = pc.columns
pc = pc.reindex(columns=[*pc.columns, *np.arange(0, 11), 'cat'], fill_value=0)
pc.reset_index(level=0, inplace=True)
# print(pc.columns, pc.shape, pc.dtypes)
return pc, ini_cols
def conjunction(*conditions):
return functools.reduce(np.logical_and, conditions)
def timeloop(t_final: int, count_final: int, tipo):
if tipo == 'A':
count_ini = 35
else: # B:
count_ini = 30
yy_list = []
for t in np.arange(0, 11):
yy = ((count_final - count_ini) / t_final) * t + count_ini
yy_list.append(int(yy))
return yy_list
def rowiteration(i, output, ini_cols, cols):
c_2: bool = pc.loc[i, 'tipo'] == u.iloc[:, 0].str[:1] # first character of category e.g. 'A1'
c_5: bool = pc.loc[i, 't_final'] >= u.iloc[:, 1] # t_min (u)
c_6: bool = pc.loc[i, 't_final'] <= (u.iloc[:, 2]) # t_max (u)
pc.loc[i, 'cat'] = u[conjunction(c_2, c_5, c_6)].iloc[0, 0]
pc.iloc[i, (0 + (len(ini_cols))+1):(10 + (len(ini_cols))+2)] = timeloop(int(pc.loc[i, 't_final']), int(pc.loc[i, 'count_final']), pc.loc[i, 'tipo'])
out = pd.DataFrame(pc.iloc[i, :])
out = pd.DataFrame(out.transpose(), columns=cols)
output = output.append(out.iloc[0, :])
return output
if __name__ == '__main__':
start_time = time.time()
pc, u = read_data()
nrowpc = len(pc.index)
a = np.arange(0, nrowpc) # filas tabla pc
# print(a, nrowpc, len(pc.index))
pc, ini_cols = prepare_output_structure(pc)
cols = pc.columns
output = pd.DataFrame()
test = [rowiteration(i, output, ini_cols, cols) for i in a] # this should run in parallel
pc2 = pd.concat(test, ignore_index=True)
pc2 = pc2.iloc[:, np.r_[5, (len(ini_cols)+1):(len(pc2.columns))]]
print(pc2.head)
elapsed_time_secs = time.time() - start_time
msg = "Execution took: %s secs (Wall clock time)" % timedelta(milliseconds=elapsed_time_secs)
print(msg)```
Replace your [rowiteration(i, output, ini_cols, cols) for i in a] with:
from multiprocessing import Pool
n_cpu = 10 # put in the number of threads of cpu
with Pool(processes=n_cpu) as pool:
ret = pool.starmap(rowiteration,
[(i, output, ini_cols, cols) for i in a])
Here is an approach that I think solves the problem and that only sends what is necessary to the worker processes. I haven't tested this as is (which would be difficult without the data your code reads in) but this is basic idea:
import multiprocessing as mp
p = mp.Pool(processes=mp.cpu_count())
# Note that you already define the static cols and ini_cols
# in global scope so you don't need to pass them to the Pool.
# ... Other functions you've defined ...
def rowiteration(row):
c_2: bool = row['tipo'] == u.iloc[:, 0].str[:1]
c_5: bool = row['t_final'] >= u.iloc[:, 1]
c_6: bool = row['t_final'] <= (u.iloc[:, 2])
row['cat'] = u[conjunction(c_2, c_5, c_6)].iloc[0, 0]
row[(0 + (len(ini_cols))+1):(10 + (len(ini_cols))+2)] = timeloop(int(row['t_final']), int(row['count_final']), row['tipo'])
return row
out = []
for row in p.imap_unordered(rowiteration, [r for _, r in pc.iterrows()]):
row.index = cols
out.append(cols)
pc2 = pd.DataFrame(out, ignore_index=True)

Python multiprocessing don't wait all elements done

I have the following code
global total_pds
total_pds = []
ksplit = wr.s3.list_objects(pred_path)
ksplit = list(ksplit)
def process(x):
dk = wr.s3.read_parquet(path = pred_path+x,dataset=False)
return dk
def log_result(result):
print(len(total_pds), end = ' ')
total_pds.append(result)
def error_back(error):
print('error', error)
pool = mp.Pool(processes=4,maxtasksperchild=10)
dcms_info = [pool.apply_async(process, args=(spl,), callback = log_result, error_callback = error_back) for spl in ksplit]
for x in dcms_info:
x.wait()
pool.close()
pool.join()
dataset = pd.concat(total_pds, ignore_index=True)
the last element throw me this error:
error("'i' format requires -2147483648 <= number <= 2147483647"
Thank you

How to add a member function to an existing Python object?

Previously I created a lot of Python objects of class A, and I would like to add a new function plotting_in_PC_space_with_coloring_option() (the purpose of this function is to plot some data in this object) to class A and use those old objects to call plotting_in_PC_space_with_coloring_option().
An example is:
import copy
import numpy as np
from math import *
from pybrain.structure import *
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import pickle
import neural_network_related
class A(object):
"""the neural network for simulation"""
'''
todo:
- find boundary
- get_angles_from_coefficients
'''
def __init__(self,
index, # the index of the current network
list_of_coor_data_files, # accept multiple files of training data
energy_expression_file, # input, output files
preprocessing_settings = None,
connection_between_layers = None, connection_with_bias_layers = None,
PCs = None, # principal components
):
self._index = index
self._list_of_coor_data_files = list_of_coor_data_files
self._energy_expression_file = energy_expression_file
self._data_set = []
for item in list_of_coor_data_files:
self._data_set += self.get_many_cossin_from_coordiantes_in_file(item)
self._preprocessing_settings = preprocessing_settings
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
self._node_num = [8, 15, 2, 15, 8]
self._PCs = PCs
def save_into_file(self, filename = None):
if filename is None:
filename = "network_%s.pkl" % str(self._index) # by default naming with its index
with open(filename, 'wb') as my_file:
pickle.dump(self, my_file, pickle.HIGHEST_PROTOCOL)
return
def get_cossin_from_a_coordinate(self, a_coordinate):
num_of_coordinates = len(a_coordinate) / 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2);
normal_vectors_normalized = np.array(map(lambda x: x / sqrt(np.dot(x,x)), normal_vectors))
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :];normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:];
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2]; # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = range(len(normal_vectors_normalized_1))
sin_of_angles_vec = range(len(normal_vectors_normalized_1))
sin_of_angles = range(len(normal_vectors_normalized_1)) # initialization
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]));
return cos_of_angles + sin_of_angles
def get_many_cossin_from_coordinates(self, coordinates):
return map(self.get_cossin_from_a_coordinate, coordinates)
def get_many_cossin_from_coordiantes_in_file (self, filename):
coordinates = np.loadtxt(filename)
return self.get_many_cossin_from_coordinates(coordinates)
def mapminmax(self, my_list): # for preprocessing in network
my_min = min(my_list)
my_max = max(my_list)
mul_factor = 2.0 / (my_max - my_min)
offset = (my_min + my_max) / 2.0
result_list = np.array(map(lambda x : (x - offset) * mul_factor, my_list))
return (result_list, (mul_factor, offset)) # also return the parameters for processing
def get_mapminmax_preprocess_result_and_coeff(self,data=None):
if data is None:
data = self._data_set
data = np.array(data)
data = np.transpose(data)
result = []; params = []
for item in data:
temp_result, preprocess_params = self.mapminmax(item)
result.append(temp_result)
params.append(preprocess_params)
return (np.transpose(np.array(result)), params)
def mapminmax_preprocess_using_coeff(self, input_data=None, preprocessing_settings=None):
# try begin
if preprocessing_settings is None:
preprocessing_settings = self._preprocessing_settings
temp_setttings = np.transpose(np.array(preprocessing_settings))
result = []
for item in input_data:
item = np.multiply(item - temp_setttings[1], temp_setttings[0])
result.append(item)
return result
# try end
def get_expression_of_network(self, connection_between_layers=None, connection_with_bias_layers=None):
if connection_between_layers is None:
connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None:
connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
expression = ""
# first part: network
for i in range(2):
expression = '\n' + expression
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i])
bias_coef = connection_with_bias_layers[i].params
for j in range(np.size(mul_coef, 0)):
temp_expression = 'layer_%d_unit_%d = tanh( ' % (i + 1, j)
for k in range(np.size(mul_coef, 1)):
temp_expression += ' %f * layer_%d_unit_%d +' % (mul_coef[j, k], i, k)
temp_expression += ' %f);\n' % (bias_coef[j])
expression = temp_expression + expression # order of expressions matter in OpenMM
# second part: definition of inputs
index_of_backbone_atoms = [2, 5, 7, 9, 15, 17, 19];
for i in range(len(index_of_backbone_atoms) - 3):
index_of_coss = i
index_of_sins = i + 4
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_coss, index_of_coss, self._preprocessing_settings[index_of_coss][1], self._preprocessing_settings[index_of_coss][0])
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_sins, index_of_sins, self._preprocessing_settings[index_of_sins][1], self._preprocessing_settings[index_of_sins][0])
expression += 'raw_layer_0_unit_%d = cos(dihedral_angle_%d);\n' % (index_of_coss, i)
expression += 'raw_layer_0_unit_%d = sin(dihedral_angle_%d);\n' % (index_of_sins, i)
expression += 'dihedral_angle_%d = dihedral(p%d, p%d, p%d, p%d);\n' % \
(i, index_of_backbone_atoms[i], index_of_backbone_atoms[i+1],index_of_backbone_atoms[i+2],index_of_backbone_atoms[i+3])
return expression
def write_expression_into_file(self, out_file = None):
if out_file is None: out_file = self._energy_expression_file
expression = self.get_expression_of_network()
with open(out_file, 'w') as f_out:
f_out.write(expression)
return
def get_mid_result(self, input_data=None, connection_between_layers=None, connection_with_bias_layers=None):
if input_data is None: input_data = self._data_set
if connection_between_layers is None: connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None: connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
temp_mid_result = range(4)
mid_result = []
# first need to do preprocessing
for item in self.mapminmax_preprocess_using_coeff(input_data, self._preprocessing_settings):
for i in range(4):
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i]) # fix node_num
bias_coef = connection_with_bias_layers[i].params
previous_result = item if i == 0 else temp_mid_result[i - 1]
temp_mid_result[i] = np.dot(mul_coef, previous_result) + bias_coef
if i != 3: # the last output layer is a linear layer, while others are tanh layers
temp_mid_result[i] = map(tanh, temp_mid_result[i])
mid_result.append(copy.deepcopy(temp_mid_result)) # note that should use deepcopy
return mid_result
def get_PC_and_save_it_to_network(self):
'''get PCs and save the result into _PCs
'''
mid_result = self.get_mid_result()
self._PCs = [item[1] for item in mid_result]
return
def train(self):
####################### set up autoencoder begin #######################
node_num = self._node_num
in_layer = LinearLayer(node_num[0], "IL")
hidden_layers = [TanhLayer(node_num[1], "HL1"), TanhLayer(node_num[2], "HL2"), TanhLayer(node_num[3], "HL3")]
bias_layers = [BiasUnit("B1"),BiasUnit("B2"),BiasUnit("B3"),BiasUnit("B4")]
out_layer = LinearLayer(node_num[4], "OL")
layer_list = [in_layer] + hidden_layers + [out_layer]
molecule_net = FeedForwardNetwork()
molecule_net.addInputModule(in_layer)
for item in (hidden_layers + bias_layers):
molecule_net.addModule(item)
molecule_net.addOutputModule(out_layer)
connection_between_layers = range(4); connection_with_bias_layers = range(4)
for i in range(4):
connection_between_layers[i] = FullConnection(layer_list[i], layer_list[i+1])
connection_with_bias_layers[i] = FullConnection(bias_layers[i], layer_list[i+1])
molecule_net.addConnection(connection_between_layers[i]) # connect two neighbor layers
molecule_net.addConnection(connection_with_bias_layers[i])
molecule_net.sortModules() # this is some internal initialization process to make this module usable
####################### set up autoencoder end #######################
trainer = BackpropTrainer(molecule_net, learningrate=0.002,momentum=0.4,verbose=False, weightdecay=0.1, lrdecay=1)
data_set = SupervisedDataSet(node_num[0], node_num[4])
sincos = self._data_set
(sincos_after_process, self._preprocessing_settings) = self.get_mapminmax_preprocess_result_and_coeff(data = sincos)
for item in sincos_after_process: # is it needed?
data_set.addSample(item, item)
trainer.trainUntilConvergence(data_set, maxEpochs=50)
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
print("Done!\n")
return
def create_sge_files_for_simulation(self,potential_centers = None):
if potential_centers is None:
potential_centers = self.get_boundary_points()
neural_network_related.create_sge_files(potential_centers)
return
def get_boundary_points(self, list_of_points = None, num_of_bins = 5):
if list_of_points is None: list_of_points = self._PCs
x = [item[0] for item in list_of_points]
y = [item[1] for item in list_of_points]
temp = np.histogram2d(x,y, bins=[num_of_bins, num_of_bins])
hist_matrix = temp[0]
# add a set of zeros around this region
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins + 2), 1)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins +2), 1)
hist_matrix = (hist_matrix != 0).astype(int)
sum_of_neighbors = np.zeros(np.shape(hist_matrix)) # number of neighbors occupied with some points
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if i != 0: sum_of_neighbors[i,j] += hist_matrix[i - 1][j]
if j != 0: sum_of_neighbors[i,j] += hist_matrix[i][j - 1]
if i != np.shape(hist_matrix)[0] - 1: sum_of_neighbors[i,j] += hist_matrix[i + 1][j]
if j != np.shape(hist_matrix)[1] - 1: sum_of_neighbors[i,j] += hist_matrix[i][j + 1]
bin_width_0 = temp[1][1]-temp[1][0]
bin_width_1 = temp[2][1]-temp[2][0]
min_coor_in_PC_space_0 = temp[1][0] - 0.5 * bin_width_0 # multiply by 0.5 since we want the center of the grid
min_coor_in_PC_space_1 = temp[2][0] - 0.5 * bin_width_1
potential_centers = []
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if hist_matrix[i,j] == 0 and sum_of_neighbors[i,j] != 0: # no points in this block but there are points in neighboring blocks
temp_potential_center = [round(min_coor_in_PC_space_0 + i * bin_width_0, 2), round(min_coor_in_PC_space_1 + j * bin_width_1, 2)]
potential_centers.append(temp_potential_center)
return potential_centers
# this function is added after those old objects of A were created
def plotting_in_PC_space_with_coloring_option(self,
list_of_coordinate_files_for_plotting=None, # accept multiple files
color_option='pure'):
'''
by default, we are using training data, and we also allow external data input
'''
if list_of_coordinate_files_for_plotting is None:
PCs_to_plot = self._PCs
else:
temp_sincos = []
for item in list_of_coordinate_files_for_plotting:
temp_sincos += self.get_many_cossin_from_coordiantes_in_file(item)
temp_mid_result = self.get_mid_result(input_data = temp_sincos)
PCs_to_plot = [item[1] for item in temp_mid_result]
(x, y) = ([item[0] for item in PCs_to_plot], [item[1] for item in PCs_to_plot])
# coloring
if color_option == 'pure':
coloring = 'red'
elif color_option == 'step':
coloring = range(len(x))
fig, ax = plt.subplots()
ax.scatter(x,y, c=coloring)
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
plt.show()
return
But it seems that plotting_in_PC_space_with_coloring_option() was not binded to those old objects, is here any way to fix it (I do not want to recreate these objects since creation involves CPU-intensive calculation and would take very long time to do it)?
Thanks!
Something like this:
class A:
def q(self): print 1
a = A()
def f(self): print 2
setattr(A, 'f', f)
a.f()
This is called a monkey patch.

Categories