ID3 Algorithm in Python - python

I am trying to plot a decision tree using ID3 in Python. I am really new to Python and couldn't understand the implementation of the following code. I need to know how I can apply this code to my data.
from math import log
import operator
def entropy(data):
entries = len(data)
labels = {}
for feat in data:
label = feat[-1]
if label not in labels.keys():
labels[label] = 0
labels[label] += 1
entropy = 0.0
for key in labels:
probability = float(labels[key])/entries
entropy -= probability * log(probability,2)
return entropy
def split(data, axis, val):
newData = []
for feat in data:
if feat[axis] == val:
reducedFeat = feat[:axis]
reducedFeat.extend(feat[axis+1:])
newData.append(reducedFeat)
return newData
def choose(data):
features = len(data[0]) - 1
baseEntropy = entropy(data)
bestInfoGain = 0.0;
bestFeat = -1
for i in range(features):
featList = [ex[i] for ex in data]
uniqueVals = set(featList)
newEntropy = 0.0
for value in uniqueVals:
newData = split(data, i, value)
probability = len(newData)/float(len(data))
newEntropy += probability * entropy(newData)
infoGain = baseEntropy - newEntropy
if (infoGain > bestInfoGain):
bestInfoGain = infoGain
bestFeat = i
return bestFeat
def majority(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def tree(data,labels):
classList = [ex[-1] for ex in data]
if classList.count(classList[0]) == len(classList):
return classList[0]
if len(data[0]) == 1:
return majority(classList)
bestFeat = choose(data)
bestFeatLabel = labels[bestFeat]
theTree = {bestFeatLabel:{}}
del(labels[bestFeat])
featValues = [ex[bestFeat] for ex in data]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
theTree[bestFeatLabel][value] = tree(split/(data, bestFeat, value),subLabels)
return theTree
So what I did after this is the following:
infile=open("SData.csv","r")
data=infile.read()
tree(data)
The error which I got is "1 argument is missing" which is the label which I have to define and this is where I don't know what I have to put. I tried the variable for which I have to make the decision tree but it doesn't work:
tree(data,MinTemp)
Here I get an error "MinTemp is not defined".
Please help me out and let me know what I should do to have a look at the tree.
Following is the part of data and I want to generate a tree for MinTemp
MinTemp,Rainfall,Tempat9,RHat9,CAat9,WSat9
high,no,mild,normal,overcast,weak
high,no,mild,normal,cloudy,weak
high,no,mild,normal,cloudy,mild
high,yes,mild,high,cloudy,weak
high,yes,mild,high,cloudy,mild
medium,yes,mild,high,cloudy,mild
high,no,mild,high,overcast,weak
high,no,mild,normal,sunny,weak
high,no,hot,normal,sunny,weak
high,no,hot,normal,overcast,weak

Related

Python negative Value Error dimensions are not allowed

I am implementing genetic algorithm but I am facing an error after the first generation with the message: ValueError: negative dimensions are not allowed
I actually change the nfilters parameter from nfilters=[74,27,23] to nfilters=[64,128,256], I don't know if it is due to this parameters.
I declared my class sequential as follow:
class CNN(Sequential):
def __init__(self,nfilters,sfilters):
super().__init__()
tf.random.set_seed(0)
self.add(Conv2D(nfilters[0],kernel_size=(sfilters[0],sfilters[0]),padding='same',activation='relu',input_shape=(50,50,3)))
self.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
self.add(Conv2D(nfilters[1],kernel_size=(sfilters[1],sfilters[1]),padding='same',activation='relu'))
self.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
self.add(Conv2D(nfilters[2],kernel_size=(sfilters[2],sfilters[2]),padding='same',activation='relu'))
self.add(Conv2D(nfilters[2], kernel_size=(sfilters[2], sfilters[2]), padding='same', activation='relu'))
self.add(Flatten())
self.add(Dropout(0.5))
self.add(Dense(128,activation='relu'))
self.add(Dropout(0.5))
self.add(Dense(128, activation='relu'))
self.add(Dense(num_classes, activation='sigmoid'))
self.compile(loss=keras.losses.binary_crossentropy,
optimizer=tf.optimizers.Adam(learning_rate=0.001),
metrics=['accuracy'])
nfilters = [64,128,256] #nfilters = [74,27,23]
sfilters = [9,3,2] #sfilters = [9,3,2]
Then my class genetic is declared as the following:
class Genetic:
def __init__(self,pop_size,nlayers,max_nfilters,max_sfilters):
self.pop_size = pop_size
self.nlayers = nlayers
self.max_nfilters = max_nfilters
self.max_sfilters = max_sfilters
self.max_acc = 0
self.best_arch = np.zeros((1,6))
self.gen_acc = []
def generate_population(self):
np.random.seed(0)
pop_nlayers = np.random.randint(1,self.max_nfilters,(self.pop_size,self.nlayers))
pop_sfilters = np.random.randint(1,self.max_sfilters,(self.pop_size,self.nlayers))
pop_total = np.concatenate((pop_nlayers,pop_sfilters),axis=1)
return pop_total
def select_parents(self,pop,nparents,fitness):
parents = np.zeros((nparents,pop.shape[1]))
for i in range(nparents):
best = np.argmax(fitness)
parents[i] = pop[best]
fitness[best] = -99999
return parents
def crossover(self,parents):
nchild = self.pop_size - parents.shape[0]
nparents = parents.shape[0]
child = np.zeros((nchild,parents.shape[1]))
for i in range(nchild):
first = i % nparents
second = (i+1) % nparents
child[i,:2] = parents[first][:2]
child[i,2] = parents[second][2]
child[i,3:5] = parents[first][3:5]
child[i,5] = parents[second][5]
return child
def mutation(self,child):
for i in range(child.shape[0]):
val = np.random.randint(1,6)
ind = np.random.randint(1,4) - 1
if child[i][ind] + val > 100:
child[i][ind] -= val
else:
child[i][ind] += val
val = np.random.randint(1,4)
ind = np.random.randint(4,7) - 1
if child[i][ind] + val > 20:
child[i][ind] -= val
else:
child[i][ind] += val
return child
def fitness(self,pop,X,Y,epochs):
pop_acc = []
for i in range(pop.shape[0]):
nfilters = pop[i][0:3]
sfilters = pop[i][3:]
model = CNN(nfilters,sfilters)
#H = model.fit_generator(datagen.flow(X,Y,batch_size=256),epochs=epochs,callbacks=[early_stopping_monitor])
H = model.fit_generator(datagen.flow(X,Y,batch_size=256),steps_per_epoch=len(X_trainRusReshaped) / batch_size,epochs=epochs,validation_data=(X_testRusReshaped, Y_testRusHot),callbacks=[early_stopping_monitor])
acc = H.history['accuracy']
pop_acc.append(max(acc)*100)
if max(pop_acc) > self.max_acc:
self.max_acc = max(pop_acc)
self.best_arch = pop[np.argmax(pop_acc)]
self.gen_acc.append(max(pop_acc))
return pop_acc
def smooth_curve(self,factor,gen):
smoothed_points = []
for point in self.gen_acc:
if smoothed_points:
prev = smoothed_points[-1]
smoothed_points.append(prev*factor + point * (1-factor))
else:
smoothed_points.append(point)
plt.plot(range(gen+1),smoothed_points,'g',label='Smoothed training acc')
plt.xticks(np.arange(gen+1))
plt.legend()
plt.title('Fitness Accuracy vs Generations')
plt.xlabel('Generations')
plt.ylabel('Fitness (%)')
plt.show()
plt.savefig('smoothCurve.png')
When I launch these lines of codes, I have the error after 20 epochs on the first generation:
#Starting Genetic Algoritm
pop_size = 2 #10
nlayers = 3 #3
max_nfilters = 500 #100
max_sfilters = 20
epochs = 20
num_generations = 2 #10
genCNN = Genetic(pop_size,nlayers,max_nfilters,max_sfilters)
pop = genCNN.generate_population()
for i in range(num_generations+1):
pop_acc = genCNN.fitness(pop,X_trainRusReshaped,Y_trainRusHot,epochs)
print('Best Accuracy at the generation {}: {}'.format(i,genCNN.max_acc))
parents = genCNN.select_parents(pop,5,pop_acc.copy())
child = genCNN.crossover(parents)
child = genCNN.mutation(child)
pop = np.concatenate((parents,child),axis=0).astype('int')
Any idea where this error is coming from? I tried to increase max_filters from 100 to 500 but it does not solved anything.

Adaptive DBSCAN achievement

I am doing the DBSCAN clustering in python. I want to achieve an adaptive way to return the number of clusters by self calculating its eps and Minpts parameters. Below is my code.
import math
import copy
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
def loadDataSet(fileName, splitChar='\t'):
dataSet = []
with open(fileName) as fr:
for line in fr.readlines():
curline = line.strip().split(splitChar)
fltline = list(map(float, curline))
dataSet.append(fltline)
return dataSet
def dist(a,b):
return math.sqrt(math.pow(a[0]-b[0],2) + math.pow(a[1]-b[1],2))
def returnDk(matrix,k):
Dk = []
for i in range(len(matrix)):
Dk.append(matrix[i][k])
return Dk
def returnDkAverage(Dk):
sum = 0
for i in range(len(Dk)):
sum = sum + Dk[i]
return sum/len(Dk)
def CalculateDistMatrix(dataset):
DistMatrix = [[0 for j in range(len(dataset))] for i in range(len(dataset))]
for i in range(len(dataset)):
for j in range(len(dataset)):
DistMatrix[i][j] = dist(dataset[i], dataset[j])
return DistMatrix
def returnEpsCandidate(dataSet):
DistMatrix = CalculateDistMatrix(dataSet)
tmp_matrix = copy.deepcopy(DistMatrix)
for i in range(len(tmp_matrix)):
tmp_matrix[i].sort()
EpsCandidate = []
for k in range(1,len(dataSet)):
Dk = returnDk(tmp_matrix,k)
DkAverage = returnDkAverage(Dk)
EpsCandidate.append(DkAverage)
return EpsCandidate
def returnMinptsCandidate(DistMatrix,EpsCandidate):
MinptsCandidate = []
for k in range(len(EpsCandidate)):
tmp_eps = EpsCandidate[k]
tmp_count = 0
for i in range(len(DistMatrix)):
for j in range(len(DistMatrix[i])):
if DistMatrix[i][j] <= tmp_eps:
tmp_count = tmp_count + 1
MinptsCandidate.append(tmp_count/len(dataSet))
return MinptsCandidate
def returnClusterNumberList(dataset,EpsCandidate,MinptsCandidate):
np_dataset = np.array(dataset)
ClusterNumberList = []
for i in range(len(EpsCandidate)):
clustering = DBSCAN(eps= EpsCandidate[i],min_samples= MinptsCandidate[i]).fit(np_dataset)
num_clustering = max(clustering.labels_)
ClusterNumberList.append(num_clustering)
return ClusterNumberList
if __name__ == '__main__':
data = pd.read_csv('/Users/Desktop/Mic/recorder_test1/New folder/MFCCresultsforclustering/MFCCresultsforclustering.csv')
dataSet = data.iloc[:,0:13].values
EpsCandidate = returnEpsCandidate(dataSet)
DistMatrix = CalculateDistMatrix(dataSet)
MinptsCandidate = returnMinptsCandidate(DistMatrix,EpsCandidate)
ClusterNumberList = returnClusterNumberList(dataSet,EpsCandidate,MinptsCandidate)
print(EpsCandidate)
print(MinptsCandidate)
print('cluster number list is')
print(ClusterNumberList)
However, the output with the loading data set is all [-1]s. I am wondering where is the mistake. Am I right for this general direction? If not, how can I achieve the adaptive DBSCAN clustering?

How to plot character state changes from a presence-absence matrix on to a phylogeny

I am trying to assign character state changes from a presence-absence matrix to a phylogeny.
I have tried assigning each character to its leaf node, and then if the leaf node's sister has the same character I reassign the character to the parent node (and work back until all nodes are assigned). I am using a dummy dataset to try to achieve this:
Matrix
>Dme_001
1110000000000111
>Dme_002
1110000000000011
>Cfa_001
0110000000000011
>Mms_001
0110000000000011
>Hsa_001
0110000000000010
>Ptr_002
0110000000000011
>Mmu_002
0110000000000011
>Hsa_002
0110000000000011
>Ptr_001
0110000000000011
>Mmu_001
0110000000000011
Phylogeny
((Dme_001,Dme_002),(((Cfa_001,Mms_001),((Hsa_001,Ptr_001),Mmu_001)),(Ptr_002,(Hsa_002,Mmu_002))));
I assign internal nodes using ete3, so my output should be:
BranchID CharacterState Change
Node_1: 0 0->1
Hsa_001: 15 1->0
As my code assigns character states based on their sisters if a loss is encountered it messes up the output so that:
BranchID CharacterState Change
Node_1: 0 0->1
Node_3 15 0->1
Node_5 15 0->1
Node_8 15 0->1
Could someone please help me with this? I'm coding in python and developing tunnel vision. Thanks in advance
My code:
from ete3 import PhyloTree
from collections import Counter
import itertools
PAM = open('PAM','r')
gene_tree = '((Dme_001,Dme_002),(((Cfa_001,Mms_001),((Hsa_001,Ptr_001),Mmu_001)),(Ptr_002,(Hsa_002,Mmu_002))));'
NodeIDs = []
tree = PhyloTree(gene_tree)
edge = 0
for node in tree.traverse():
if not node.is_leaf():
node.name = "Node_%d" %edge
edge +=1
NodeIDs.append(node.name)
if node.is_leaf():
NodeIDs.append(node.name)
f = open('PAM','r')
taxa = []
pap = []
for line in f:
term = line.strip().split('\t')
taxa.append(term[0])
p = [p for p in term[1]]
pap.append(p)
statesD = dict(zip(taxa, pap))
def PlotCharacterStates():
Plots = []
events = []
for key, value in statesD.iteritems():
count = -1
for s in value:
count+=1
if s == CharacterState:
a = key, count
events.append(a)
Round3_events = []
while len(events) > 0:
for rel in Relationships:
node_store = []
sis_store = []
for event in events:
if rel[0] == event[0]:
node_store.append(event[1])
if rel[1] == event[0]:
sis_store.append(event[1])
if (len(node_store) > 0) and (len(sis_store) > 0):
place = rel, node_store, sis_store
Round3_events.append(place)
moved = []
for placement in Round3_events:
intercept = (set(placement[1]) & set(placement[2]))
node_plot = (set(placement[1]) - set(placement[2]))
sis_plot = (set(placement[2]) - set(placement[1]))
if len(node_plot) > 0:
for x in node_plot:
y = placement[0][0], x
Plots.append(y)
moved.append(y)
if len(sis_plot) > 0:
for x in sis_plot:
y = placement[0][1], x
Plots.append(y)
moved.append(y)
if len(intercept) > 0:
for x in intercept:
y = placement[0][2], x
y1 = placement[0][0], x
y2 = placement[0][1], x
moved.append(y1)
moved.append(y2)
events.append(y)
for event in events:
if event[0] == "Node_0":
Plots.append(event)
moved.append(event)
events2 = (set(events) - set(moved))
events = []
for event in events2:
events.append(event)
pl = set(Plots)
Plots = []
for p in pl:
Plots.append(p)
print CharacterState, Plots
'''
assign sisters to leaves, internals
'''
e = []
round1b_e = []
round2a_e = []
placements = []
Relationships = []
Rounds = []
for node in tree.traverse():
sisters = node.get_sisters()
parent = node.up
cycle1 = []
if node.is_leaf():
for sister in sisters:
if sister.is_leaf():
round1a = ["Round1a", node.name, sister.name, parent.name]
node_names = node.name, sister.name
Rounds.append(round1a)
e.append(node_names)
x = node.name, sister.name, parent.name, "leaf-leaf"
Relationships.append(x)
if not sister.is_leaf():
round1b = ["Round1b", node.name, sister.name, parent.name]
node_names = node.name, sister.name
Rounds.append(round1b)
round1b_e.append(node_names)
x = node.name, sister.name, parent.name, "node-leaf"
Relationships.append(x)
elif not node.is_leaf():
if not node.is_root():
for sister in sisters:
if not sister.is_leaf():
node_names = node.name, sister.name
round2a_e.append(node_names)
x = node.name, sister.name, parent.name, "node-node"
Relationships.append(x)
x = []
CharacterStates = []
for key, value in statesD.iteritems():
for value in value:
x.append(value)
y = sorted(set(x))
for x in y:
CharacterStates.append(x)
for CharacterState in CharacterStates:
PlotCharacterStates()

python create genetic algorithm no convergence to maximum or minimum

Hello i need help for create genetic algorithme for converge to maximum or minimum value.
I develop a code for found maximum sentence ascii sum, but my code not converge to maximum, my code make "yoyo" value
like this picture :
matploltib output
i share my code :
import random
import statistics
EVOLUTION=[]
words = [
["Un", "Des", "Une", "On", "Elle"],
["a", "eu", "avait", "est", "était", "fut"],
["soif", "rouge"]
]
def individual(data):
#return tuple(random.choice(range(len(feature))) for feature in data)
return tuple(random.choice(range(len(feature))) for feature in data)
def population(data, initial=100):
return [individual(data) for i in range(initial)]
def fitness(individual, data):
chaine=sentence(individual,words)
somme = 0
for caractere in chaine:
somme = somme + ord(caractere)
print(chaine)
print(somme)
EVOLUTION.append(somme)
return somme
#return sum(data[i][individual[i]] for i in range(len(individual)))
def grade(population, data):
fit = [fitness(ind, data) for ind in population]
return statistics.mean(fit)
def mutate(ind, data):
gene = random.randrange(0, len(ind))
clone = list(ind)
clone[gene] = random.randrange(0, len(data[gene]))
#print(sentence(tuple(clone),words))
return tuple(clone)
def cross(mother, father):
return tuple(round(statistics.mean(genes)) for genes in zip(mother, father))
def sentence(individual, words):
return ' '.join([words[i][individual[i]] for i in range(len(words))])
def evolve(population, data, retain=0.0, random_select=0.00, mutation_rate=0.00):
def cmp_ind(ind):
return fitness(ind, data)
sorted_population = sorted(population, key=cmp_ind, reverse=True)
len_retained = round(len(population) * retain)
retained = sorted_population[:len_retained]
random_selected = [
ind
for ind in sorted_population[len_retained:]
if random.random() <= random_select
]
mutated = [
mutate(ind, data)
for ind in sorted_population[len_retained:]
if random.random() <= mutation_rate
]
children = [
cross(random.choice(sorted_population),
random.choice(sorted_population))
for i in range(len(population) - len(random_selected) - len(mutated))
]
return random_selected + mutated + children
if __name__ == '__main__':
data = [[len(w) for w in ws] for ws in words]
initial_population = population(data, 30)
next_population = initial_population
max_iter = 3
for i in range(max_iter):
next_population = evolve(next_population, data)
sorted_population = sorted(next_population, key=lambda x: fitness(x, data))
best_individual = sorted_population[0]
print("best solution :")
chaine=sentence(best_individual,words)
somme = 0
for caractere in chaine:
somme = somme + ord(caractere)
print(chaine)
print(somme)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.plot(EVOLUTION)
plt.savefig('myfig')
i want to found highter solution in my fitness function
thanks for advance for your help

How to add a member function to an existing Python object?

Previously I created a lot of Python objects of class A, and I would like to add a new function plotting_in_PC_space_with_coloring_option() (the purpose of this function is to plot some data in this object) to class A and use those old objects to call plotting_in_PC_space_with_coloring_option().
An example is:
import copy
import numpy as np
from math import *
from pybrain.structure import *
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import pickle
import neural_network_related
class A(object):
"""the neural network for simulation"""
'''
todo:
- find boundary
- get_angles_from_coefficients
'''
def __init__(self,
index, # the index of the current network
list_of_coor_data_files, # accept multiple files of training data
energy_expression_file, # input, output files
preprocessing_settings = None,
connection_between_layers = None, connection_with_bias_layers = None,
PCs = None, # principal components
):
self._index = index
self._list_of_coor_data_files = list_of_coor_data_files
self._energy_expression_file = energy_expression_file
self._data_set = []
for item in list_of_coor_data_files:
self._data_set += self.get_many_cossin_from_coordiantes_in_file(item)
self._preprocessing_settings = preprocessing_settings
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
self._node_num = [8, 15, 2, 15, 8]
self._PCs = PCs
def save_into_file(self, filename = None):
if filename is None:
filename = "network_%s.pkl" % str(self._index) # by default naming with its index
with open(filename, 'wb') as my_file:
pickle.dump(self, my_file, pickle.HIGHEST_PROTOCOL)
return
def get_cossin_from_a_coordinate(self, a_coordinate):
num_of_coordinates = len(a_coordinate) / 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2);
normal_vectors_normalized = np.array(map(lambda x: x / sqrt(np.dot(x,x)), normal_vectors))
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :];normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:];
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2]; # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = range(len(normal_vectors_normalized_1))
sin_of_angles_vec = range(len(normal_vectors_normalized_1))
sin_of_angles = range(len(normal_vectors_normalized_1)) # initialization
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]));
return cos_of_angles + sin_of_angles
def get_many_cossin_from_coordinates(self, coordinates):
return map(self.get_cossin_from_a_coordinate, coordinates)
def get_many_cossin_from_coordiantes_in_file (self, filename):
coordinates = np.loadtxt(filename)
return self.get_many_cossin_from_coordinates(coordinates)
def mapminmax(self, my_list): # for preprocessing in network
my_min = min(my_list)
my_max = max(my_list)
mul_factor = 2.0 / (my_max - my_min)
offset = (my_min + my_max) / 2.0
result_list = np.array(map(lambda x : (x - offset) * mul_factor, my_list))
return (result_list, (mul_factor, offset)) # also return the parameters for processing
def get_mapminmax_preprocess_result_and_coeff(self,data=None):
if data is None:
data = self._data_set
data = np.array(data)
data = np.transpose(data)
result = []; params = []
for item in data:
temp_result, preprocess_params = self.mapminmax(item)
result.append(temp_result)
params.append(preprocess_params)
return (np.transpose(np.array(result)), params)
def mapminmax_preprocess_using_coeff(self, input_data=None, preprocessing_settings=None):
# try begin
if preprocessing_settings is None:
preprocessing_settings = self._preprocessing_settings
temp_setttings = np.transpose(np.array(preprocessing_settings))
result = []
for item in input_data:
item = np.multiply(item - temp_setttings[1], temp_setttings[0])
result.append(item)
return result
# try end
def get_expression_of_network(self, connection_between_layers=None, connection_with_bias_layers=None):
if connection_between_layers is None:
connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None:
connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
expression = ""
# first part: network
for i in range(2):
expression = '\n' + expression
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i])
bias_coef = connection_with_bias_layers[i].params
for j in range(np.size(mul_coef, 0)):
temp_expression = 'layer_%d_unit_%d = tanh( ' % (i + 1, j)
for k in range(np.size(mul_coef, 1)):
temp_expression += ' %f * layer_%d_unit_%d +' % (mul_coef[j, k], i, k)
temp_expression += ' %f);\n' % (bias_coef[j])
expression = temp_expression + expression # order of expressions matter in OpenMM
# second part: definition of inputs
index_of_backbone_atoms = [2, 5, 7, 9, 15, 17, 19];
for i in range(len(index_of_backbone_atoms) - 3):
index_of_coss = i
index_of_sins = i + 4
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_coss, index_of_coss, self._preprocessing_settings[index_of_coss][1], self._preprocessing_settings[index_of_coss][0])
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_sins, index_of_sins, self._preprocessing_settings[index_of_sins][1], self._preprocessing_settings[index_of_sins][0])
expression += 'raw_layer_0_unit_%d = cos(dihedral_angle_%d);\n' % (index_of_coss, i)
expression += 'raw_layer_0_unit_%d = sin(dihedral_angle_%d);\n' % (index_of_sins, i)
expression += 'dihedral_angle_%d = dihedral(p%d, p%d, p%d, p%d);\n' % \
(i, index_of_backbone_atoms[i], index_of_backbone_atoms[i+1],index_of_backbone_atoms[i+2],index_of_backbone_atoms[i+3])
return expression
def write_expression_into_file(self, out_file = None):
if out_file is None: out_file = self._energy_expression_file
expression = self.get_expression_of_network()
with open(out_file, 'w') as f_out:
f_out.write(expression)
return
def get_mid_result(self, input_data=None, connection_between_layers=None, connection_with_bias_layers=None):
if input_data is None: input_data = self._data_set
if connection_between_layers is None: connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None: connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
temp_mid_result = range(4)
mid_result = []
# first need to do preprocessing
for item in self.mapminmax_preprocess_using_coeff(input_data, self._preprocessing_settings):
for i in range(4):
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i]) # fix node_num
bias_coef = connection_with_bias_layers[i].params
previous_result = item if i == 0 else temp_mid_result[i - 1]
temp_mid_result[i] = np.dot(mul_coef, previous_result) + bias_coef
if i != 3: # the last output layer is a linear layer, while others are tanh layers
temp_mid_result[i] = map(tanh, temp_mid_result[i])
mid_result.append(copy.deepcopy(temp_mid_result)) # note that should use deepcopy
return mid_result
def get_PC_and_save_it_to_network(self):
'''get PCs and save the result into _PCs
'''
mid_result = self.get_mid_result()
self._PCs = [item[1] for item in mid_result]
return
def train(self):
####################### set up autoencoder begin #######################
node_num = self._node_num
in_layer = LinearLayer(node_num[0], "IL")
hidden_layers = [TanhLayer(node_num[1], "HL1"), TanhLayer(node_num[2], "HL2"), TanhLayer(node_num[3], "HL3")]
bias_layers = [BiasUnit("B1"),BiasUnit("B2"),BiasUnit("B3"),BiasUnit("B4")]
out_layer = LinearLayer(node_num[4], "OL")
layer_list = [in_layer] + hidden_layers + [out_layer]
molecule_net = FeedForwardNetwork()
molecule_net.addInputModule(in_layer)
for item in (hidden_layers + bias_layers):
molecule_net.addModule(item)
molecule_net.addOutputModule(out_layer)
connection_between_layers = range(4); connection_with_bias_layers = range(4)
for i in range(4):
connection_between_layers[i] = FullConnection(layer_list[i], layer_list[i+1])
connection_with_bias_layers[i] = FullConnection(bias_layers[i], layer_list[i+1])
molecule_net.addConnection(connection_between_layers[i]) # connect two neighbor layers
molecule_net.addConnection(connection_with_bias_layers[i])
molecule_net.sortModules() # this is some internal initialization process to make this module usable
####################### set up autoencoder end #######################
trainer = BackpropTrainer(molecule_net, learningrate=0.002,momentum=0.4,verbose=False, weightdecay=0.1, lrdecay=1)
data_set = SupervisedDataSet(node_num[0], node_num[4])
sincos = self._data_set
(sincos_after_process, self._preprocessing_settings) = self.get_mapminmax_preprocess_result_and_coeff(data = sincos)
for item in sincos_after_process: # is it needed?
data_set.addSample(item, item)
trainer.trainUntilConvergence(data_set, maxEpochs=50)
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
print("Done!\n")
return
def create_sge_files_for_simulation(self,potential_centers = None):
if potential_centers is None:
potential_centers = self.get_boundary_points()
neural_network_related.create_sge_files(potential_centers)
return
def get_boundary_points(self, list_of_points = None, num_of_bins = 5):
if list_of_points is None: list_of_points = self._PCs
x = [item[0] for item in list_of_points]
y = [item[1] for item in list_of_points]
temp = np.histogram2d(x,y, bins=[num_of_bins, num_of_bins])
hist_matrix = temp[0]
# add a set of zeros around this region
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins + 2), 1)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins +2), 1)
hist_matrix = (hist_matrix != 0).astype(int)
sum_of_neighbors = np.zeros(np.shape(hist_matrix)) # number of neighbors occupied with some points
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if i != 0: sum_of_neighbors[i,j] += hist_matrix[i - 1][j]
if j != 0: sum_of_neighbors[i,j] += hist_matrix[i][j - 1]
if i != np.shape(hist_matrix)[0] - 1: sum_of_neighbors[i,j] += hist_matrix[i + 1][j]
if j != np.shape(hist_matrix)[1] - 1: sum_of_neighbors[i,j] += hist_matrix[i][j + 1]
bin_width_0 = temp[1][1]-temp[1][0]
bin_width_1 = temp[2][1]-temp[2][0]
min_coor_in_PC_space_0 = temp[1][0] - 0.5 * bin_width_0 # multiply by 0.5 since we want the center of the grid
min_coor_in_PC_space_1 = temp[2][0] - 0.5 * bin_width_1
potential_centers = []
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if hist_matrix[i,j] == 0 and sum_of_neighbors[i,j] != 0: # no points in this block but there are points in neighboring blocks
temp_potential_center = [round(min_coor_in_PC_space_0 + i * bin_width_0, 2), round(min_coor_in_PC_space_1 + j * bin_width_1, 2)]
potential_centers.append(temp_potential_center)
return potential_centers
# this function is added after those old objects of A were created
def plotting_in_PC_space_with_coloring_option(self,
list_of_coordinate_files_for_plotting=None, # accept multiple files
color_option='pure'):
'''
by default, we are using training data, and we also allow external data input
'''
if list_of_coordinate_files_for_plotting is None:
PCs_to_plot = self._PCs
else:
temp_sincos = []
for item in list_of_coordinate_files_for_plotting:
temp_sincos += self.get_many_cossin_from_coordiantes_in_file(item)
temp_mid_result = self.get_mid_result(input_data = temp_sincos)
PCs_to_plot = [item[1] for item in temp_mid_result]
(x, y) = ([item[0] for item in PCs_to_plot], [item[1] for item in PCs_to_plot])
# coloring
if color_option == 'pure':
coloring = 'red'
elif color_option == 'step':
coloring = range(len(x))
fig, ax = plt.subplots()
ax.scatter(x,y, c=coloring)
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
plt.show()
return
But it seems that plotting_in_PC_space_with_coloring_option() was not binded to those old objects, is here any way to fix it (I do not want to recreate these objects since creation involves CPU-intensive calculation and would take very long time to do it)?
Thanks!
Something like this:
class A:
def q(self): print 1
a = A()
def f(self): print 2
setattr(A, 'f', f)
a.f()
This is called a monkey patch.

Categories