Problems with float overflow in backpropagation - python

In few trained lines, some numbers have too many digits that overflow float precision.
Im new to Machine Learning, Python (my college teacher recommended using Python) and this is my first StackOverflow question.
I googled first, ofc, but i didnt find something to help me.
This http://jrusev.github.io/post/hacking-mnist/ looks like interesting, but i cannot compare to my code because of my lack of experience. (I've always worked as Front-End)
import pandas as pd
import numpy as np
# Global variables
outputDictionary = {'0':[1,0,0,0,0,0,0,0,0,0], '1':[0,2,0,0,0,0,0,0,0,0],
'2':[0,0,1,0,0,0,0,0,0,0], '3':[0,0,0,1,0,0,0,0,0,0], '4':[0,0,0,0,1,0,0,0,0,0],
'5':[0,0,0,0,0,1,0,0,0,0], '6':[0,0,0,0,0,0,1,0,0,0], '7':[0,0,0,0,0,0,0,1,0,0],
'8':[0,0,0,0,0,0,0,0,1,0], '9':[0,0,0,0,0,0,0,0,0,1] }
learningRate = 0.2
middleLayerSize = 100
outputSize = 10
inputSize = 11
v = np.random.uniform(-1.00, 1.00,(inputSize, middleLayerSize)) # [linhas, middleLayerSize]
w = np.random.uniform(-1.00, 1.00,(middleLayerSize, outputSize)) # [middleLayerSize, outputSize]
errors = []
inputCsv = pd.read_csv('a.csv')
inputData = []
# Functions
def prepareData():
for row in inputCsv.itertuples(index=False):
arrRow = list(row)
for i in range(len(arrRow)):
if(i != 0):
arrRow[i] = float(arrRow[i]) / 255
inputData.append(arrRow)
def train(maxEpochs):
global errors
global graph
global inputData
for epoch in range(maxEpochs):
errorCount = 0
print('Period ' + str(epoch))
for row in inputData:
expectedNumber = row.pop(0)
expectedNumberObj = outputDictionary[str(expectedNumber)]
zIn = calcZIn(row)
zOutput = calcDelta(zIn, middleLayerSize)
yIn = calcYIn(zOutput)
yOutput = calcDelta(yIn, outputSize)
validate = validadeOutput(expectedNumberObj, yOutput)
if(validate == False):
errorCount+= 1;
propagateError(expectedNumberObj, row, yOutput, zOutput, zIn, yIn)
errors.append(errorCount)
print(errorCount)
def calcZIn(row):
result = []
for j in range(middleLayerSize):
result.append(0)
for i in range(inputSize):
result[j] += v[i,j] * row[i]
return result
def calcYIn(zOutput):
result = []
for j in range(outputSize):
result.append(0)
for i in range(middleLayerSize):
result[j] += w[i,j] * zOutput[i]
return result
def calcDelta(arr, arrSize):
deltas = []
for i in range(arrSize):
deltas.append(activationFunction(arr[i]))
return deltas
def activationFunction(x):
return 1.0 / (1.0 + np.exp(-x))
def validadeOutput(targetObj, yOutput):
for i in range(len(yOutput)):
if(targetObj[i] != yOutput[i]):
return False
return True
def propagateError(expectedArr, row, yOutput, zOutput, zIn, yIn):
errorY = calcError(expectedArr, yOutput, yIn, outputSize)
errorW = calcWeightCorrection(errorY, zOutput, middleLayerSize, outputSize)
sumError = sumDelta(errorY, w, middleLayerSize, outputSize)
errorZ = calcError(sumError, zOutput, zIn, middleLayerSize)
errorV = calcWeightCorrection(errorZ, row, inputSize, middleLayerSize)
updateWeight(w, errorW, middleLayerSize, outputSize)
updateWeight(v, errorV, inputSize, middleLayerSize)
def calcError(expectedArr, outputArr, inArr, size):
error = []
for i in range(size):
error.append((expectedArr[i] - outputArr[i]) * inArr[i] * (1.0 - inArr[i]));
return error
def calcWeightCorrection(error, output, lenght1, length2):
delta = [];
for i in range(lenght1):
delta.append([])
for j in range(length2):
delta[i].append(learningRate * error[j] * output[i])
return delta
def sumDelta(error, weights, lenght1, length2):
delta = []
for i in range(lenght1):
deltaValue = 0.0
for j in range(length2):
deltaValue += error[j] * weights[i][j];
delta.append(deltaValue)
return delta
def updateWeight(weight, delta, lenght1, length2):
# (lenght1)
# print(length2)
for i in range(lenght1):
for j in range(length2):
# print(str(i) + ' - ' + str(j))
weight[i][j] += delta[i][j]
# Execution
prepareData()
train(1)```

Related

Python - While trying to calculate RSI(Relative strength index - stock indicator) my results are "upside down" and shifted

I am trying to calculate RSI using simple functions.
The general formula for it is:
RSI = 100/(1+RS), where RS = Exponential Moving Average of gains / -||- of losses.
Here is what I am getting:
enter image description here
Here it is how should it look like:
enter image description here
I have everything double checked or even triple checked, but I can't find any mistake.
Thus I need your help, I know that the question is very simple though I need some help, I have no idea where I have made the mistake.
The general idea of RSI is that it should be low where the price is "low" and high, where the price is high, and generally no matter what I try I have it upside down.
def EMA(close_price_arr, n):
a = (2/n + 1)
EMA_n = np.empty((1, len(close_price_arr)))
for i in range(len(close_price_arr)):
if i < n:
# creating NaN values where it is impossible to calculate EMA to drop it later after connecting the whole database
EMA_n[0, i] = 'NaN'
if i >= n:
# Calaculating nominator and denominator of EMA
for j in range(n):
nominator_ema += close_price_arr[i - j] * a**(j)
denominator_ema += a**(j)
EMA_n[0, i] = nominator_ema / denominator_ema
nominator_ema = 0
denominator_ema = 0
return EMA_n
def gains(close_price_arr):
gain_arr = np.empty((len(close_price_arr) - 1))
for i in range(len(close_price_arr)):
if i == 0:
pass
if i >= 1:
if close_price_arr[i] > close_price_arr[i - 1]:
gain_arr[i - 1] = (close_price_arr[i] - close_price_arr[i-1])
else:
gain_arr[i - 1] = 0
return gain_arr
def losses(close_price_arr):
loss_arr = np.empty((len(close_price_arr) - 1))
for i in range(len(close_price_arr)):
if i == 0:
pass
if i >= 1:
if close_price_arr[i] < close_price_arr[i - 1]:
loss_arr[i - 1] = abs(close_price_arr[i] - close_price_arr[i - 1])
else:
loss_arr[i - 1] = 0
return loss_arr
def RSI(gain_arr, loss_arr, n):
EMA_u = EMA(gain_arr, n)
EMA_d = EMA(loss_arr, n)
EMA_diff = EMA_u / EMA_d
x,y = EMA_diff.shape
print(x, y)
RSI_n = np.empty((1, y))
for i in range(y):
if EMA_diff[0, i] == 'NaN':
RSI_n[0, i] = 'NaN'
print(i)
else:
RSI_n[0, i] = 100 / (1 + EMA_diff[0, i])
return RSI_n
#contextmanager
def show_complete_array():
oldoptions = np.get_printoptions()
np.set_printoptions(threshold=np.inf)
try:
yield
finally:
np.set_printoptions(**oldoptions)
np.set_printoptions(linewidth=3000)
pd.set_option('display.max_columns', None)
# Specyfying root folder, file folder and file
FILE = 'TVC_SILVER, 5.csv'
FOLDER = 'src'
PROJECT_ROOT_DIR = '.'
csv_path = os.path.join(PROJECT_ROOT_DIR, FOLDER, FILE)
# reading csv
price_data = pd.read_csv(csv_path, delimiter=',')
price_data_copy = price_data.copy()
price_data_nodate = price_data.copy().drop('time', axis=1)
price_data_np = price_data_nodate.to_numpy(dtype='float32')
close_price = price_data_np[:, 3]
EMA15 = EMA(close_price_arr=close_price, n=15)
EMA55 = EMA(close_price_arr=close_price, n=55)
gain = gains(close_price_arr=close_price)
loss = losses(close_price_arr=close_price)
RSI14 = RSI(gain_arr=gain, loss_arr=loss, n=14)
Try this:
"""dataset is a dataframe"""
def RSI(dataset, n=14):
delta = dataset.diff()
dUp, dDown = delta.copy(), delta.copy()
dUp[dUp < 0] = 0
dDown[dDown > 0] = 0
RolUp = pd.Series(dUp).rolling(window=n).mean()
RolDown = pd.Series(dDown).rolling(window=n).mean().abs()
RS = RolUp / RolDown
rsi= 100.0 - (100.0 / (1.0 + RS))
return rsi

Mpi4py: printing and plotting during execution

I recently start working with mpi in order to use it to accelerate some code (a sph/gravity simulation).
So far it seem to be working. The position of my particle have change between the beginning and the end of the program, task manager show that several python thread are working...
But i have two problem:
1/ i can't print text during the execution of the program. The text is only print once it's finished
2/ I'm unable to create a graph using matplotlib.
In both case, neither python nor mpi return any error. My guess, for the text at least, is that its print when the execution end with mpiexec.
thanks for any insight !
here the code i run it with !mpiexec -n 8 python mpi4py_test.py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from mpi4py import MPI
class particule:
def __init__(self, h, pos, vel = [0, 0], m = 1, P = 0, density = 0, acc = [0, 0]):
self.x, self.y = pos
self.u, self.v = vel
self.acc_x, self.acc_y = acc
self.m = m
self.h = h # kernel lenght
self.P = P # Pressure
self.density = density
def kernel(part_a, part_b ):
" monaghan cubic spline "
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / h
tmp = 0
if r < 1:
tmp = cst * (1 - 3/2* r**2 + 3/4*r**3)
elif r < 2:
tmp = cst * (1/4*(2-r)**3)
return tmp
def grad_kernel(part_a, part_b):
cst = 1 / (np.pi * part_a.h**3)
dist = np.sqrt((part_b.x-part_a.x)**2 + (part_b.y-part_a.y)**2 )
r = dist / part_a.h
tmp = 0
if r < 1:
tmp = cst * (9/4 * r**2 - 3*r)
elif r < 2:
tmp = cst * (-3/4*(2-r)**2)
return tmp
class hash_grid:
def __init__(self, cell_size):
self.cell_size = cell_size
self.cell = {}
self.part_list = []
def key(self, part):
return (part.x//self.cell_size,
part.y//self.cell_size)
def add(self, part):
tmp = self.key(part)
self.cell.setdefault(tmp, []).append(part)
self.part_list.append(part)
def neighbours(self, part):
idx = self.key(part)
cell_N = None
cell_S = None
cell_E = None
cell_W = None
cell_NE = None
cell_NW = None
cell_SE = None
cell_SW = None
if (idx[0], idx[1]+1) in self.cell: cell_N = (idx[0], idx[1]+1)
if (idx[0], idx[1]-1) in self.cell: cell_S = (idx[0], idx[1]-1)
if (idx[0]+1, idx[1]) in self.cell: cell_E = (idx[0]+1, idx[1])
if (idx[0]-1, idx[1]) in self.cell: cell_W = (idx[0]-1, idx[1])
if (idx[0]+1, idx[1]+1) in self.cell: cell_NE = (idx[0]+1, idx[1]+1)
if (idx[0]-1, idx[1]+1) in self.cell: cell_NW = (idx[0]-1, idx[1]+1)
if (idx[0]+1, idx[1]-1) in self.cell: cell_SE = (idx[0]+1, idx[1]-1)
if (idx[0]-1, idx[1]-1) in self.cell: cell_SW = (idx[0]-1, idx[1]-1)
return [value for cel in (idx, cell_N, cell_S, cell_E, cell_W, cell_NE, cell_NW, cell_SE, cell_SW) if cel!=None for value in self.cell.get(cel) ]
def split(to_split, nb_chunk):
"take a list and split it most evenly possible"
result = []
q, r = divmod(len(to_split), nb_chunk)
curr = 0
last = 0
for i in range(nb_chunk):
if r>0:
last = curr + q + 1
result.append(to_split[curr: last])
r = r-1
curr = last
else:
last = curr + q
result.append(to_split[curr: last])
curr = last
return result
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
n_part = 2000
h = 2
grid = hash_grid(h)
points = np.zeros((n_part, 2))
sim_range = 20
dt = 0.005
n_iter = 2500
k = 5
for _i in range(n_part):
pos = np.random.uniform(-sim_range, sim_range, 2)
vel = np.random.uniform(-.2, .2, 2)
p = particule(h, pos, vel)
grid.add(p)
points[_i, :] = pos
for part in grid.part_list:
part.density = 0
part.P = 0
for p in grid.neighbours(part):
part.density = part.density + p.m * kernel(part, p)
part.P = k * ( part.density )**2 # - density_0)
data = split(grid.part_list,8)
for t in range(10):
if rank == 0 :
"1 - verlet 1/2 ; serial"
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
part.x, part.y = part.x + part.u * dt, part.y + part.v * dt
"2 - update grid ; serial"
jnk = grid.part_list
del grid
grid=hash_grid(h)
for p in jnk:
grid.add(p)
grid_bcast = grid
data_b = grid
chunk_of_part_list = split(grid.part_list,8)
else:
grid_bcast=None
chunk_of_part_list = None
data_b=None
grid_bcast = comm.bcast(grid_bcast, root=0)
chunk_of_part_list = comm.scatter(chunk_of_part_list, root=0)
"3 - get acc ; parallel"
for part in chunk_of_part_list:
part.acc_x = 0
part.acc_y = 0
for p in grid_bcast.neighbours(part):
if p != part:
r = np.sqrt((p.x-part.x)**2 + (p.y-part.y)**2)
if r==0: pass
else:
part.acc_x = part.acc_x - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.x - part.x)/r
part.acc_y = part.acc_y - p.m * (part.P/part.density**2 + p.P/p.density**2) * grad_kernel(part, p) * (p.y - part.y)/r
dist = np.sqrt(part.x**2+part.y**2)
part.acc_x = part.acc_x - .5 * part.x -1*part.u
part.acc_y = part.acc_y - .5 * part.y -1*part.v
chunk_of_part_list = comm.gather(chunk_of_part_list,root=0)
"4 - verlet 2/2 ; serial"
if rank == 0:
grid.part_list = list(matplotlib.cbook.flatten(chunk_of_part_list))
for i, part in enumerate(grid.part_list):
part.u, part.v = part.u + part.acc_x * dt/2, part.v + part.acc_y * dt/2
points[i,0], points[i,1] = part.x, part.y # for the figure
if rank==0:
print(t, flush=True)
if rank == 0:
print('point 0', points[0,:])
fig = plt.figure()
ax = fig.add_subplot()
sc = ax.scatter(points[:, 0], points[:, 1],s=3)
ax.set_aspect('equal', 'box')
ax.set_xlim(-sim_range,sim_range)
ax.set_ylim(-sim_range,sim_range)

Optimizing a complex algorithm

I know this is not an ideal place for questions of this scope, but I'm not sure where else to ask this or how to break it down. I've been working on a function for the past couple weeks, that runs, but for it to be feasible for my purposes, I need to speed it up 200-300x.
I have an image array, where all pixels of similar color have been averaged and set to that average value. Then I have a 2D array of the same height and width, which labels each unique and non-contiguous feature of the image.
Using these I need to assess the size of each feature and its level of contrast to each of its neighbors. These values are used in an equation and if the output of that equation is below a certain threshold, that feature is merged with its most similar neighbor.
I've uploaded the image and the feature label array (printed with numpy.savetext()) to OneDrive and attached links
code:
def textureRemover(pix, labeledPix, ratio = 1.0):
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
for regionID in range(numElements):
start = time.clock()
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 200:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
pix
labeledPix
I know I'm asking for a lot of help, but I've been stuck on this for a few weeks and am unsure what else I can do. Any help will be greatly appreciated!
Here is an optimized version of most of your logic (I underestimated the amount of work that would be...). I skipped the >200 branch and am using fake data because I couldn't access your link. When I switch off your >200 branch your and my code appear to give the same result but mine is quite a bit faster on the fake example.
Sample output:
original
26.056154000000003
optimized
0.763613000000003
equal
True
Code:
import numpy as np
from numpy.lib.stride_tricks import as_strided
def mockdata(m, n, k):
colors = np.random.random((m, n, 3))
i, j = np.ogrid[:m, :n]
labels = np.round(k*k * (np.sin(0.05 * i) + np.sin(0.05 * j)**2)).astype(int) % k
return colors, labels
DIAG_NEIGHBORS = True
MAXIMUMCONTRAST = 443.405
def textureRemover2(pix, labeledPix, ratio=1.0):
start = time.clock()
pix, labeledPix = pix.copy(), labeledPix.copy()
pixf, labeledPixf = pix.reshape(-1, 3), labeledPix.ravel()
m, n = labeledPix.shape
s, t = labeledPix.strides
# find all sizes in O(n)
sizes = np.bincount(labeledPixf)
n_ids = len(sizes)
# make index for quick access to labeled areas
lblidx = np.split(np.argsort(labeledPixf), np.cumsum(sizes[:-1]))
lblidx[0] = None
# find all mean colors in O(n)
regionMeans = np.transpose([np.bincount(labeledPix.ravel(), px)
/ np.maximum(sizes, 1)
for px in pix.reshape(-1, 3).T])
# find all neighbors in O(n)
horz = set(frozenset(p) for bl in as_strided(labeledPix, (m,n-1,2), (s,t,t))
for p in bl)
vert = set(frozenset(p) for bl in as_strided(labeledPix, (m-1,n,2), (s,t,s))
for p in bl)
nb = horz|vert
if DIAG_NEIGHBORS:
dwnrgt = set(frozenset(p) for bl in as_strided(
labeledPix, (m-1,n-1,2), (s,t,s+t)) for p in bl)
dwnlft = set(frozenset(p) for bl in as_strided(
labeledPix[::-1], (m-1,n-1,2), (-s,t,t-s)) for p in bl)
nb = nb|dwnrgt|dwnlft
nb = {p for p in nb if len(p) == 2 and not 0 in p}
nb_dict = {}
for a, b in nb:
nb_dict.setdefault(a, set()).add(b)
nb_dict.setdefault(b, set()).add(a)
maxSize = labeledPix.size - sizes[0]
for id_ in range(1, n_ids):
nbs = list(nb_dict.get(id_, set()))
if not nbs:
continue
d = regionMeans[id_] - regionMeans[nbs]
d = np.einsum('ij,ij->i', d, d)
mnd = np.argmin(d)
if d[mnd] < ((ratio - sizes[id_]/maxSize) * MAXIMUMCONTRAST)**2:
mn = nbs[mnd]
lrg, sml = (id_, mn) if sizes[id_] >= sizes[mn] else (mn, id_)
sizes[lrg], sizes[sml] = sizes[lrg] + sizes[sml], 0
for nb in nb_dict[sml]:
nb_dict[nb].remove(sml)
nb_dict[nb].add(lrg)
nb_dict[lrg].update(nb_dict[sml])
nb_dict[lrg].remove(lrg)
nb_dict[sml] = set()
pixf[lblidx[sml]] = regionMeans[lrg]
labeledPixf[lblidx[sml]] = lrg
lblidx[lrg], lblidx[sml] = np.r_[lblidx[lrg],lblidx[sml]], None
print(time.clock() - start)
return pix
from scipy.ndimage.morphology import binary_dilation
import time
STRUCTEL = np.ones((3,3), int) if DIAG_NEIGHBORS else np.array([[0,1,0],[1,1,1],[0,1,0]], int)
def getNeighbors(labeledPix, regionID):
nb = set(labeledPix[binary_dilation(labeledPix == regionID, structure=STRUCTEL)])
nb.remove(regionID)
return sorted(nb)
numpy = np
def textureRemover(pix, labeledPix, ratio = 1.0):
pix, labeledPix = pix.copy(), labeledPix.copy()
numElements = numpy.amax(labeledPix)
maxSize = numpy.count_nonzero(labeledPix)
MAXIMUMCONTRAST = 443.405
start = time.clock()
for regionID in range(numElements):
regionID += 1
if regionID not in labeledPix:
continue
#print(regionID)
#print((regionID / numElements) * 100, '%')
neighborIDs = getNeighbors(labeledPix, regionID)
if 0 in neighborIDs:
neighborIDs.remove(0) #remove white value
regionMask = labeledPix == regionID
region = pix[regionMask]
size = numpy.count_nonzero(regionMask)
contrastMin = (ratio - (size / maxSize)) * MAXIMUMCONTRAST
regionMean = region.mean(axis = 0)
if len(neighborIDs) > 20000:
contrast = numpy.zeros(labeledPix.shape)
contrast[labeledPix!=0] = numpy.sqrt(numpy.sum((regionMean - pix[labeledPix!=0])**2, axis = -1))
significantMask = (contrast < contrastMin)
significantContrasts = list(numpy.unique(contrast[significantMask]))
significantNeighbors = {}
for significantContrast in significantContrasts:
minContrast = min(significantContrasts)
if labeledPix[contrast == minContrast][0] in neighborIDs:
significantNeighbors[minContrast] = labeledPix[contrast == minContrast][0]
else:
significantContrasts.pop(significantContrasts.index(minContrast))
else:
significantNeighbors = {}
for neighborID in neighborIDs:
neighborMask = labeledPix == neighborID
neighbor = pix[neighborMask]
neighborMean = neighbor.mean(axis = 0)
contrast = numpy.sqrt(numpy.sum((regionMean - neighborMean)**2, axis = -1))
if contrast < contrastMin:
significantNeighbors[contrast] = neighborID
if significantNeighbors:
contrasts = significantNeighbors.keys()
minContrast = min(contrasts)
minNeighbor = significantNeighbors[minContrast]
neighborMask = labeledPix == minNeighbor
neighborSize = numpy.count_nonzero(neighborMask)
if neighborSize <= size:
labeledPix[neighborMask] = regionID
pix[neighborMask] = regionMean
else:
labeledPix[regionMask] = minNeighbor
pix[regionMask] = pix[neighborMask].mean(axis = 0)
print(time.clock() - start)
return pix
data = mockdata(200, 200, 1000)
print('original')
res0 = textureRemover(*data)
print('optimized')
res2 = textureRemover2(*data)
print('equal')
print(np.allclose(res0, res2))

How to add a member function to an existing Python object?

Previously I created a lot of Python objects of class A, and I would like to add a new function plotting_in_PC_space_with_coloring_option() (the purpose of this function is to plot some data in this object) to class A and use those old objects to call plotting_in_PC_space_with_coloring_option().
An example is:
import copy
import numpy as np
from math import *
from pybrain.structure import *
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.datasets.supervised import SupervisedDataSet
import pickle
import neural_network_related
class A(object):
"""the neural network for simulation"""
'''
todo:
- find boundary
- get_angles_from_coefficients
'''
def __init__(self,
index, # the index of the current network
list_of_coor_data_files, # accept multiple files of training data
energy_expression_file, # input, output files
preprocessing_settings = None,
connection_between_layers = None, connection_with_bias_layers = None,
PCs = None, # principal components
):
self._index = index
self._list_of_coor_data_files = list_of_coor_data_files
self._energy_expression_file = energy_expression_file
self._data_set = []
for item in list_of_coor_data_files:
self._data_set += self.get_many_cossin_from_coordiantes_in_file(item)
self._preprocessing_settings = preprocessing_settings
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
self._node_num = [8, 15, 2, 15, 8]
self._PCs = PCs
def save_into_file(self, filename = None):
if filename is None:
filename = "network_%s.pkl" % str(self._index) # by default naming with its index
with open(filename, 'wb') as my_file:
pickle.dump(self, my_file, pickle.HIGHEST_PROTOCOL)
return
def get_cossin_from_a_coordinate(self, a_coordinate):
num_of_coordinates = len(a_coordinate) / 3
a_coordinate = np.array(a_coordinate).reshape(num_of_coordinates, 3)
diff_coordinates = a_coordinate[1:num_of_coordinates, :] - a_coordinate[0:num_of_coordinates - 1,:] # bond vectors
diff_coordinates_1=diff_coordinates[0:num_of_coordinates-2,:];diff_coordinates_2=diff_coordinates[1:num_of_coordinates-1,:]
normal_vectors = np.cross(diff_coordinates_1, diff_coordinates_2);
normal_vectors_normalized = np.array(map(lambda x: x / sqrt(np.dot(x,x)), normal_vectors))
normal_vectors_normalized_1 = normal_vectors_normalized[0:num_of_coordinates-3, :];normal_vectors_normalized_2 = normal_vectors_normalized[1:num_of_coordinates-2,:];
diff_coordinates_mid = diff_coordinates[1:num_of_coordinates-2]; # these are bond vectors in the middle (remove the first and last one), they should be perpendicular to adjacent normal vectors
cos_of_angles = range(len(normal_vectors_normalized_1))
sin_of_angles_vec = range(len(normal_vectors_normalized_1))
sin_of_angles = range(len(normal_vectors_normalized_1)) # initialization
for index in range(len(normal_vectors_normalized_1)):
cos_of_angles[index] = np.dot(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles_vec[index] = np.cross(normal_vectors_normalized_1[index], normal_vectors_normalized_2[index])
sin_of_angles[index] = sqrt(np.dot(sin_of_angles_vec[index], sin_of_angles_vec[index])) * np.sign(sum(sin_of_angles_vec[index]) * sum(diff_coordinates_mid[index]));
return cos_of_angles + sin_of_angles
def get_many_cossin_from_coordinates(self, coordinates):
return map(self.get_cossin_from_a_coordinate, coordinates)
def get_many_cossin_from_coordiantes_in_file (self, filename):
coordinates = np.loadtxt(filename)
return self.get_many_cossin_from_coordinates(coordinates)
def mapminmax(self, my_list): # for preprocessing in network
my_min = min(my_list)
my_max = max(my_list)
mul_factor = 2.0 / (my_max - my_min)
offset = (my_min + my_max) / 2.0
result_list = np.array(map(lambda x : (x - offset) * mul_factor, my_list))
return (result_list, (mul_factor, offset)) # also return the parameters for processing
def get_mapminmax_preprocess_result_and_coeff(self,data=None):
if data is None:
data = self._data_set
data = np.array(data)
data = np.transpose(data)
result = []; params = []
for item in data:
temp_result, preprocess_params = self.mapminmax(item)
result.append(temp_result)
params.append(preprocess_params)
return (np.transpose(np.array(result)), params)
def mapminmax_preprocess_using_coeff(self, input_data=None, preprocessing_settings=None):
# try begin
if preprocessing_settings is None:
preprocessing_settings = self._preprocessing_settings
temp_setttings = np.transpose(np.array(preprocessing_settings))
result = []
for item in input_data:
item = np.multiply(item - temp_setttings[1], temp_setttings[0])
result.append(item)
return result
# try end
def get_expression_of_network(self, connection_between_layers=None, connection_with_bias_layers=None):
if connection_between_layers is None:
connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None:
connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
expression = ""
# first part: network
for i in range(2):
expression = '\n' + expression
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i])
bias_coef = connection_with_bias_layers[i].params
for j in range(np.size(mul_coef, 0)):
temp_expression = 'layer_%d_unit_%d = tanh( ' % (i + 1, j)
for k in range(np.size(mul_coef, 1)):
temp_expression += ' %f * layer_%d_unit_%d +' % (mul_coef[j, k], i, k)
temp_expression += ' %f);\n' % (bias_coef[j])
expression = temp_expression + expression # order of expressions matter in OpenMM
# second part: definition of inputs
index_of_backbone_atoms = [2, 5, 7, 9, 15, 17, 19];
for i in range(len(index_of_backbone_atoms) - 3):
index_of_coss = i
index_of_sins = i + 4
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_coss, index_of_coss, self._preprocessing_settings[index_of_coss][1], self._preprocessing_settings[index_of_coss][0])
expression += 'layer_0_unit_%d = (raw_layer_0_unit_%d - %f) * %f;\n' % \
(index_of_sins, index_of_sins, self._preprocessing_settings[index_of_sins][1], self._preprocessing_settings[index_of_sins][0])
expression += 'raw_layer_0_unit_%d = cos(dihedral_angle_%d);\n' % (index_of_coss, i)
expression += 'raw_layer_0_unit_%d = sin(dihedral_angle_%d);\n' % (index_of_sins, i)
expression += 'dihedral_angle_%d = dihedral(p%d, p%d, p%d, p%d);\n' % \
(i, index_of_backbone_atoms[i], index_of_backbone_atoms[i+1],index_of_backbone_atoms[i+2],index_of_backbone_atoms[i+3])
return expression
def write_expression_into_file(self, out_file = None):
if out_file is None: out_file = self._energy_expression_file
expression = self.get_expression_of_network()
with open(out_file, 'w') as f_out:
f_out.write(expression)
return
def get_mid_result(self, input_data=None, connection_between_layers=None, connection_with_bias_layers=None):
if input_data is None: input_data = self._data_set
if connection_between_layers is None: connection_between_layers = self._connection_between_layers
if connection_with_bias_layers is None: connection_with_bias_layers = self._connection_with_bias_layers
node_num = self._node_num
temp_mid_result = range(4)
mid_result = []
# first need to do preprocessing
for item in self.mapminmax_preprocess_using_coeff(input_data, self._preprocessing_settings):
for i in range(4):
mul_coef = connection_between_layers[i].params.reshape(node_num[i + 1], node_num[i]) # fix node_num
bias_coef = connection_with_bias_layers[i].params
previous_result = item if i == 0 else temp_mid_result[i - 1]
temp_mid_result[i] = np.dot(mul_coef, previous_result) + bias_coef
if i != 3: # the last output layer is a linear layer, while others are tanh layers
temp_mid_result[i] = map(tanh, temp_mid_result[i])
mid_result.append(copy.deepcopy(temp_mid_result)) # note that should use deepcopy
return mid_result
def get_PC_and_save_it_to_network(self):
'''get PCs and save the result into _PCs
'''
mid_result = self.get_mid_result()
self._PCs = [item[1] for item in mid_result]
return
def train(self):
####################### set up autoencoder begin #######################
node_num = self._node_num
in_layer = LinearLayer(node_num[0], "IL")
hidden_layers = [TanhLayer(node_num[1], "HL1"), TanhLayer(node_num[2], "HL2"), TanhLayer(node_num[3], "HL3")]
bias_layers = [BiasUnit("B1"),BiasUnit("B2"),BiasUnit("B3"),BiasUnit("B4")]
out_layer = LinearLayer(node_num[4], "OL")
layer_list = [in_layer] + hidden_layers + [out_layer]
molecule_net = FeedForwardNetwork()
molecule_net.addInputModule(in_layer)
for item in (hidden_layers + bias_layers):
molecule_net.addModule(item)
molecule_net.addOutputModule(out_layer)
connection_between_layers = range(4); connection_with_bias_layers = range(4)
for i in range(4):
connection_between_layers[i] = FullConnection(layer_list[i], layer_list[i+1])
connection_with_bias_layers[i] = FullConnection(bias_layers[i], layer_list[i+1])
molecule_net.addConnection(connection_between_layers[i]) # connect two neighbor layers
molecule_net.addConnection(connection_with_bias_layers[i])
molecule_net.sortModules() # this is some internal initialization process to make this module usable
####################### set up autoencoder end #######################
trainer = BackpropTrainer(molecule_net, learningrate=0.002,momentum=0.4,verbose=False, weightdecay=0.1, lrdecay=1)
data_set = SupervisedDataSet(node_num[0], node_num[4])
sincos = self._data_set
(sincos_after_process, self._preprocessing_settings) = self.get_mapminmax_preprocess_result_and_coeff(data = sincos)
for item in sincos_after_process: # is it needed?
data_set.addSample(item, item)
trainer.trainUntilConvergence(data_set, maxEpochs=50)
self._connection_between_layers = connection_between_layers
self._connection_with_bias_layers = connection_with_bias_layers
print("Done!\n")
return
def create_sge_files_for_simulation(self,potential_centers = None):
if potential_centers is None:
potential_centers = self.get_boundary_points()
neural_network_related.create_sge_files(potential_centers)
return
def get_boundary_points(self, list_of_points = None, num_of_bins = 5):
if list_of_points is None: list_of_points = self._PCs
x = [item[0] for item in list_of_points]
y = [item[1] for item in list_of_points]
temp = np.histogram2d(x,y, bins=[num_of_bins, num_of_bins])
hist_matrix = temp[0]
# add a set of zeros around this region
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins), 0)
hist_matrix = np.insert(hist_matrix, num_of_bins, np.zeros(num_of_bins + 2), 1)
hist_matrix = np.insert(hist_matrix, 0, np.zeros(num_of_bins +2), 1)
hist_matrix = (hist_matrix != 0).astype(int)
sum_of_neighbors = np.zeros(np.shape(hist_matrix)) # number of neighbors occupied with some points
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if i != 0: sum_of_neighbors[i,j] += hist_matrix[i - 1][j]
if j != 0: sum_of_neighbors[i,j] += hist_matrix[i][j - 1]
if i != np.shape(hist_matrix)[0] - 1: sum_of_neighbors[i,j] += hist_matrix[i + 1][j]
if j != np.shape(hist_matrix)[1] - 1: sum_of_neighbors[i,j] += hist_matrix[i][j + 1]
bin_width_0 = temp[1][1]-temp[1][0]
bin_width_1 = temp[2][1]-temp[2][0]
min_coor_in_PC_space_0 = temp[1][0] - 0.5 * bin_width_0 # multiply by 0.5 since we want the center of the grid
min_coor_in_PC_space_1 = temp[2][0] - 0.5 * bin_width_1
potential_centers = []
for i in range(np.shape(hist_matrix)[0]):
for j in range(np.shape(hist_matrix)[1]):
if hist_matrix[i,j] == 0 and sum_of_neighbors[i,j] != 0: # no points in this block but there are points in neighboring blocks
temp_potential_center = [round(min_coor_in_PC_space_0 + i * bin_width_0, 2), round(min_coor_in_PC_space_1 + j * bin_width_1, 2)]
potential_centers.append(temp_potential_center)
return potential_centers
# this function is added after those old objects of A were created
def plotting_in_PC_space_with_coloring_option(self,
list_of_coordinate_files_for_plotting=None, # accept multiple files
color_option='pure'):
'''
by default, we are using training data, and we also allow external data input
'''
if list_of_coordinate_files_for_plotting is None:
PCs_to_plot = self._PCs
else:
temp_sincos = []
for item in list_of_coordinate_files_for_plotting:
temp_sincos += self.get_many_cossin_from_coordiantes_in_file(item)
temp_mid_result = self.get_mid_result(input_data = temp_sincos)
PCs_to_plot = [item[1] for item in temp_mid_result]
(x, y) = ([item[0] for item in PCs_to_plot], [item[1] for item in PCs_to_plot])
# coloring
if color_option == 'pure':
coloring = 'red'
elif color_option == 'step':
coloring = range(len(x))
fig, ax = plt.subplots()
ax.scatter(x,y, c=coloring)
ax.set_xlabel("PC1")
ax.set_ylabel("PC2")
plt.show()
return
But it seems that plotting_in_PC_space_with_coloring_option() was not binded to those old objects, is here any way to fix it (I do not want to recreate these objects since creation involves CPU-intensive calculation and would take very long time to do it)?
Thanks!
Something like this:
class A:
def q(self): print 1
a = A()
def f(self): print 2
setattr(A, 'f', f)
a.f()
This is called a monkey patch.

FloatingPointError: overflow encountered in double_scalars

I've set up numpy.seterr as follows:
np.seterr(invalid='raise', over ='raise', under='raise')
And I'm getting the following error:
c = beta[j,i] + oneminusbeta[j,i]
FloatingPointError: overflow encountered in double_scalars
I've checked what beta[j,i] and oneminusbeta[j,i] are at the point of crash, and these are their values:
beta: -131.340389182
oneminusbeta: 0.0
Please note, this line of addition (beta[j,i] + oneminusbeta[j,i]) has run for thousands of lines in a loop (that performs image classification) before crashing here at this point.
How can I deal with this? Is it necessary to change the type of the numpy arrays?
This is how I've initialized them:
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
Is it possible to cast the individual values before adding them up? Rather than changing the entire array declarations? Or is this even a serious issue? Would it be safe to simply turn off the numpy.seterr configuration and let the calculations go ahead without raising the error?
Edit
Someone suggested below, and I suspected as well, that the values being added shouldn't cause an overflow. Then how can I find out where the overflow is really happening?
This is my code:
epthreshold = 709
enthreshold = -708
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
And when I check my log file, this is the line I get:
weights[5550, 13] = math.exp(beta: -131.340389182 + oneminusbeta: 0.0)
Edit 2
Here's the rest of my code, where variables n,m and H have already been initialized to integer values:
import numba
import numpy as np
import statsmodels.api as sm
weights = np.empty([n,m])
for curr_n in range(n):
for curr_m in range(m):
weights[curr_n,curr_m] = 1.0/(n)
beta = np.empty([m,n])
oneminusbeta = np.empty([m,n])
for curr_class in range(m):
for curr_sample in range(n):
beta[curr_class,curr_sample] = 1./m
epthreshold = 709 # positive exponential threshold
enthreshold = -708
for h in range(H):
print "Boosting round %d ... " % h
z = np.empty([n,m])
for j in range(m): # computing working responses and weights, Step 2(a)(i)
for i in range(no_samples):
i_class = y[i] #get the correct class for the current sample
if h == 0:
z[i,j] = (int(j==i_class) - beta[j,i])/((beta[j,i])*(1. - beta[j,i]))
weights[i,j] = beta[j,i]*(1. - beta[j,i])
else:
if j == i_class:
z[i,j] = math.exp(np.clip(-beta[j,i],enthreshold, epthreshold))
else:
z[i,j] = -math.exp(np.clip(oneminusbeta[j,i], enthreshold, epthreshold))
f.write("weights["+str(i)+", " + str(j)+"] = math.exp(beta: " +str(beta[j,i])+ " + oneminusbeta: " + str(oneminusbeta[j,i])+")\n" )
c = beta[j,i] + oneminusbeta[j,i]
weights[i,j] = math.exp(np.clip(c, enthreshold, epthreshold))
g_h = np.zeros([1,1])
j = 0
# Calculating regression coefficients per class
# building the parameters per j class
for y1_w in zip(z.T, weights.T):
y1, w = y1_w
temp_g = sm.WLS(y1, X, w).fit() # Step 2(a)(ii)
if np.allclose(g_h,0):
g_h = temp_g.params
else:
g_h = np.c_[g_h, temp_g.params]
j = j + 1
if np.allclose(g,0):
g = g_h
else:
g = g + g_h # Step(2)(a)(iii)
# now set g(x), function coefficients according to new formula, step (2)(b)
sum_g = g.sum(axis=1)
for j in range(m):
diff = (g[:,j] - ((1./m) * sum_g))
g[:,j] = ((m-1.)/m) * diff
g_per_round[h,:,j] = g[:,j]
#Now computing beta, Step 2(c)...."
Q = 0.
e = 0.
for j in range(m):
# Calculating beta and oneminusbeta for class j
aj = 0.0
for i in range(no_samples):
i_class = y[i]
X1 = X[i].reshape(1, no_features)
g1 = g[:,j].reshape(no_features, 1)
gc = g[:,i_class].reshape(no_features, 1)
dot = 1. + float(np.dot(X1, g1)) - float(np.dot(X1,gc))
aj = dot
sum_e = 0.
a_q = []
a_q.append(0.)
for j2 in range(m): # calculating sum of e's except for all j except where j=i_class
if j2 != i_class: # g based on j2, not necessarily g1?
g2 = g[:,j2].reshape(no_features, 1)
dot1 = 1. + float(np.dot(X1, g2)) - float(np.dot(X1,gc))
e2 = math.exp(np.clip(dot1,enthreshold, epthreshold))
sum_e = sum_e + e2
a_q.append(dot1)
if (int(j==i_class) == 1):
a_q_arr = np.array(a_q)
alpha = np.array(a_q_arr[1:])
Q = mylogsumexp(f,a_q_arr, 1, 0)
sumalpha = mylogsumexp(f,alpha, 1, 0)
beta[j,i] = -Q
oneminusbeta[j,i] = sumalpha - Q
else:
alpha = a_q
alpha = np.array(alpha[1:])
a_q_arr = np.array(a_q)
Q = mylogsumexp(f,a_q_arr, 0, aj)
sumalpha = log(math.exp(np.clip(Q, enthreshold, epthreshold)) - math.exp(np.clip(aj, enthreshold, epthreshold)))
beta[j,i] = aj - Q
oneminusbeta[j,i] = sumalpha - Q
and the function mylogsumexp is:
def mylogsumexp(f, a, is_class, maxaj, axis=None, b=None):
np.seterr(over="raise", under="raise", invalid="raise")
threshold = -sys.float_info.max
maxthreshold = sys.float_info.max
epthreshold = 709 # positive exponential threshold
enthreshold = -708
a = asarray(a)
if axis is None:
a = a.ravel()
else:
a = rollaxis(a, axis)
if is_class == 1:
a_max = a.max(axis=0)
else:
a_max = maxaj
#bnone = " none "
if b is not None:
a_max = maxaj
b = asarray(b)
if axis is None:
b = b.ravel()
else:
b = rollaxis(b, axis)
a = np.clip(a - a_max, enthreshold, epthreshold)
midout = np.sum(np.exp(a), axis=0)
midout = 1.0 + np.clip(midout - math.exp(a_max), threshold, maxthreshold)
out = np.log(midout)
else:
a = np.clip(a - a_max, enthreshold, epthreshold)
out = np.log(np.sum(np.exp(a)))
out += a_max
if out == float("inf"):
out = maxthreshold
if out == float("-inf"):
out = threshold
return out

Categories