Related
I have a nonuniformly sampled data that I am trying to apply a Gaussian filter to. I am using python's numpy library to solve this. The data is of XY type, here is how it looks like:
[[ -0.96 390.63523024]
[ -1.085 390.68523024]
[ -1.21 390.44023023]
...
[-76.695 390.86023024]
[-77.105 392.51023024]
[-77.155 392.10023024]]
And here is a link to the whole *.npz file.
Here is my approach:
I start with defining a Gaussian function
Then I start scanning the data with a while loop along the X axis
Within each step of the loop:
I select a portion of data that is within two cutoff lengths
shift the X axis of the selected data portion to make it symmetrical around 0
calculate my Gaussian function at every point, multiply with corresponding Y values, sum and divide by number of elements
Move to next point
Here is how code looks like:
import numpy as np
import matplotlib.pyplot as plt
xy = np.load('1D_data.npz')['arr_0']
def g_func(xx, w=1.0):
a = 0.47 * w
return (1 / a) * np.exp((xx / a) ** 2 * (-np.pi))
x, y, x_, y_ = xy[:, 0], xy[:, 1], [], []
counter, xi, ww = 0, x[0], 1.0
while xi > np.amin(x):
curr_x = x[(x < xi) & (x >= xi - 2 * ww)]
g, ysel = [], []
for i, els in enumerate(curr_x):
xil = els - curr_x[0] + abs(curr_x[0] - curr_x[-1]) / 2
g.append(g_func(xil, ww))
ysel.append(y[counter + i])
y_.append(np.sum(np.multiply(g, ysel)) / len(g))
x_.append(xi)
counter += 1
xi = x[counter]
plt.plot(x, y, '-k')
plt.plot(x_, y_, '-r')
plt.show()
The output doesn't look right though. (See the fig below) Even if discarding the edges, the convolution is very noisy and the values do not seem to correspond to the data. What am I possibly doing wrong?
You made one mistake in your code:
Before multiplying g with y_sel, y_sel is not centered.
The reason why y_sel should be centered is because we want to add the relative differences weighted by the Gaussian to the entry at the center. If you multiply g with y_sel directly, not just the values of the neighboring entries within the window, but also the value of the center entry will be weighted by the Gaussian. This will definitely change the function values dramatically.
Below is my solution using numpy
def g_func(xx, w=1.0):
mean = np.mean(xx)
a = 0.47 * w
return (1 / a) * np.exp(((xx-mean) / a) ** 2 * (-np.pi))
def get_convolution(array,half_window_size):
array = np.concatenate((np.repeat(array[0],half_window_size),
array,
np.repeat(array[-1],half_window_size)))
window_inds = [list(range(ind-half_window_size,ind+half_window_size+1)) \
for ind in range(half_window_size,len(array)-half_window_size)]
return np.take(array,window_inds)
xy = np.load('1D_data.npz')['arr_0']
x, y = xy[:, 0], xy[:, 1]
half_window_size = 4
x_conv = np.apply_along_axis(g_func,axis=1,arr=get_convolution(x,half_window_size=half_window_size))
y_conv = get_convolution(y,half_window_size=half_window_size)
y_mean = np.mean(y_conv,axis=1)
y_centered = y_conv - y_mean[:,None]
smoothed = np.sum(x_conv*y_centered,axis=1) / (half_window_size*2) + y_mean
fig,ax = plt.subplots(figsize=(10,6))
ax.plot(x, y, '-k')
ax.plot(x, smoothed, '-r')
running the code, the output is
UPDATE
In order to unify w with half_window_size, here is one possibility, the idea is to let the standard deviation of the Gaussian to be 2*half_window_size
def g_func(xx):
std = len(xx)
mean = np.mean(xx)
return 1 / (std*np.sqrt(2*np.pi)) * np.exp(-1/2*((xx-mean)/std)**2)
def get_convolution(array,half_window_size):
array = np.concatenate((np.repeat(array[0],half_window_size),
array,
np.repeat(array[-1],half_window_size)))
window_inds = [list(range(ind-half_window_size,ind+half_window_size+1)) \
for ind in range(half_window_size,len(array)-half_window_size)]
return np.take(array,window_inds)
xy = np.load('1D_data.npz')['arr_0']
x, y = xy[:, 0], xy[:, 1]
half_window_size = 4
x_conv = np.apply_along_axis(g_func,axis=1,arr=get_convolution(x,half_window_size=half_window_size))
y_conv = get_convolution(y,half_window_size=half_window_size)
y_mean = np.mean(y_conv,axis=1)
y_centered = y_conv - y_mean[:,None]
smoothed = np.sum(x_conv*y_centered,axis=1) / (half_window_size*2) + y_mean
fig,ax = plt.subplots(figsize=(10,6))
ax.plot(x, y, '-k')
ax.plot(x, smoothed, '-r')
I have an almost fully connected graph in python with roughly 3k nodes and 9M edges. Each node in this graph is represented by a point in R^3 and each edge represents the distance between them. I tried iterating through all of the edges and just plotting them with matplotlib using a 3d projection. Something like this:
fig, ax = plt.subplots(ncols = len(network.positions) - 1, subplot_kw={'projection': '3d'})
for idx in range(len(network.positions) - 1):
x = network.positions[idx][0, :].numpy()
y = network.positions[idx][1, :].numpy()
z = network.positions[idx][2, :].numpy()
ax[idx].scatter(x,y,z, alpha = .5, s=.01, label = "layer " + str(idx + 1), c='r')
x = network.positions[idx + 1][0, :].numpy()
y = network.positions[idx + 1][1, :].numpy()
z = network.positions[idx + 1][2, :].numpy()
ax[idx].scatter(x,y,z, alpha = .5, s=.01, label = "layer " + str(idx + 2), c = 'b')
for idx_1 in tqdm(range(int(network.positions[idx].shape[1]))):
for idx_2 in range(int(network.positions[idx + 1].shape[1])):
x_1, y_1, z_1 = network.positions[idx][:, idx_1]
x_2, y_2, z_2 = network.positions[idx][:, idx_2]
ax[idx].plot([x_1, x_2], [y_1, y_2], [z_1, z_2])
However, this is terribly inefficient - because of the many calls to plt.plot. I need a way that I can do something similar but weight the color or transparence of each line by a corresponding weight.
Thanks,
Ameet
First of all, yes, I've searched and tried on my own, but I cannot find nothing useful and therefore I'm stucked. I want to plot countourf in a Neuronal Network in Keras.
# X = X axis data, Y = Y axis data, Z = predictions (prob of being 1 of each (X, Y)row data)
X.shape = (1701, 1)
Y.shape = (1701, 1)
Z.shape = (1701, )
First of all, I've seen that many codes that use contourforder its elements from lowest to highest value. Is this necessary?
Second, as contourf user guide says,
X and Y must both be 2-D with the same shape as Z, or they must both be 1-D such that len(X) == M is the number of columns in Z and len(Y) == N is the number of rows in Z.
so my Z has to be Z.shape = (1701, 1701) or reshaping X and Y to 2-D array each one, right?
Which one should be the best option?
PD: I've tried to reshape to Z.shape = (1701, 1701), but I do not know how to give that shape. The idea is to create a plot like the image in this link: decision boundary
EDIT
What I would like to lnow is how to reshape Z if Z is a 1701 array of 0 <= value <= 1, I mean, from Z.shape = (1701, ) to Z.shape = (1701, 1701)
Combine the X and Y into a single 2d feature vector X:
X = np.vstack((X.reshape((1701, )), Y.reshape((1701, )))).T # X.shape = (1701, 2)
n_input_dim = X.shape[1]
model = Sequential()
model.add(Dense(4, input_dim=n_input_dim, activation='elu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X, y, verbose=0, epochs=1000)
# Plot decision space
plt.figure(figsize=(4, 4), dpi=150)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.05))
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, alpha=0.4)
plt.scatter(X[:, 0], X[:, 1], c=y, s=20, edgecolor='k')
On a sample data X, y = make_moons(100, noise=0.3, random_state=0), it would be the plot:
In an ML course, I m taking, I have 100 entries of data, and I'm using it in a Perceptron Algorithm.
What I want is to show a plot like this one.
As you can see above we have the data represented by point in red and blue and the different calculated lines that minimize the error. This is the output that I want.. Here is my Data and my code.
data.csv
0.78051,-0.063669,1
0.28774,0.29139,1
0.40714,0.17878,1
0.2923,0.4217,1
0.50922,0.35256,1
0.27785,0.10802,1
0.27527,0.33223,1
0.43999,0.31245,1
0.33557,0.42984,1
0.23448,0.24986,1
0.0084492,0.13658,1
0.12419,0.33595,1
0.25644,0.42624,1
0.4591,0.40426,1
0.44547,0.45117,1
0.42218,0.20118,1
0.49563,0.21445,1
0.30848,0.24306,1
0.39707,0.44438,1
0.32945,0.39217,1
0.40739,0.40271,1
0.3106,0.50702,1
0.49638,0.45384,1
0.10073,0.32053,1
0.69907,0.37307,1
0.29767,0.69648,1
0.15099,0.57341,1
0.16427,0.27759,1
0.33259,0.055964,1
0.53741,0.28637,1
0.19503,0.36879,1
0.40278,0.035148,1
0.21296,0.55169,1
0.48447,0.56991,1
0.25476,0.34596,1
0.21726,0.28641,1
0.67078,0.46538,1
0.3815,0.4622,1
0.53838,0.32774,1
0.4849,0.26071,1
0.37095,0.38809,1
0.54527,0.63911,1
0.32149,0.12007,1
0.42216,0.61666,1
0.10194,0.060408,1
0.15254,0.2168,1
0.45558,0.43769,1
0.28488,0.52142,1
0.27633,0.21264,1
0.39748,0.31902,1
0.5533,1,0
0.44274,0.59205,0
0.85176,0.6612,0
0.60436,0.86605,0
0.68243,0.48301,0
1,0.76815,0
0.72989,0.8107,0
0.67377,0.77975,0
0.78761,0.58177,0
0.71442,0.7668,0
0.49379,0.54226,0
0.78974,0.74233,0
0.67905,0.60921,0
0.6642,0.72519,0
0.79396,0.56789,0
0.70758,0.76022,0
0.59421,0.61857,0
0.49364,0.56224,0
0.77707,0.35025,0
0.79785,0.76921,0
0.70876,0.96764,0
0.69176,0.60865,0
0.66408,0.92075,0
0.65973,0.66666,0
0.64574,0.56845,0
0.89639,0.7085,0
0.85476,0.63167,0
0.62091,0.80424,0
0.79057,0.56108,0
0.58935,0.71582,0
0.56846,0.7406,0
0.65912,0.71548,0
0.70938,0.74041,0
0.59154,0.62927,0
0.45829,0.4641,0
0.79982,0.74847,0
0.60974,0.54757,0
0.68127,0.86985,0
0.76694,0.64736,0
0.69048,0.83058,0
0.68122,0.96541,0
0.73229,0.64245,0
0.76145,0.60138,0
0.58985,0.86955,0
0.73145,0.74516,0
0.77029,0.7014,0
0.73156,0.71782,0
0.44556,0.57991,0
0.85275,0.85987,0
0.51912,0.62359,0
And now this is my code. The first part
import numpy as np
import pandas as pd
# Setting the random seed, feel free to change it and see different solutions.
np.random.seed(42)
import matplotlib.pyplot as plt
def stepFunction(t):
return 1 if t >= 0 else 0
def prediction(X, W, b):
return stepFunction((np.matmul(X, W) + b)[0])
# TODO: Fill in the code below to implement the perceptron trick.
# INPUTS
# data X, the labels y,
# the weights W (as an array), and the bias b,
# The function weights and bias W, b, according to the perceptron algorithm,
# and return W and b.
def perceptronStep(X, y, W, b, learn_rate=0.01):
for i in range(len(X)):
y_hat = prediction(X[i], W, b)
if y[i] - y_hat == 1:
W[0] += X[i][0] * learn_rate
W[1] += X[i][1] * learn_rate
b += learn_rate
elif y[i] - y_hat == -1:
W[0] -= X[i][0] * learn_rate
W[1] -= X[i][1] * learn_rate
b -= learn_rate
return W, b
# This function runs the perceptron algorithm repeatedly on the dataset,
# and returns a few of the boundary lines obtained in the iterations,
# for plotting purposes.
# Feel free to play with the learning rate and the num_epochs,
# and see your results plotted below.
def trainPerceptronAlgorithm(X, y, learn_rate=0.01, num_epochs=25):
x_min, x_max = min(X.T[0]), max(X.T[0])
y_min, y_max = min(X.T[1]), max(X.T[1])
W = np.array(np.random.rand(2, 1))
b = np.random.rand(1)[0] + x_max
# These are the solution lines that get plotted below.
boundary_lines = []
for i in range(num_epochs):
# In each epoch, we apply the perceptron step.
W, b = perceptronStep(X, y, W, b, learn_rate)
# Here I have a doubt . Why if y = W0*x1 + W1*x2 + b
# So we can get x2 =y/W1 -(W0*x1)/W1 -b/W1 + y/W1)
# If we remove y/W1 we just get intercept and slope
# But why we are not using the last term y/W1
boundary_lines.append((-W[0] / W[1], -b / W[1]))
return boundary_lines
# Get data and plot the points
data = pd.read_csv('data.csv', header = None)
X = data.iloc[:, :2].values
y = data.iloc[:, -1].values
x1 = X[:, 0]
x2 = X[:, 1]
color = ['red' if value == 1 else 'blue' for value in y]
plt.scatter(x1, x2, marker='o', color=color)
plt.xlabel('X1 input feature')
plt.ylabel('X2 input feature')
plt.title('Perceptron regression for X1, X2')
plt.show()
When you run this code you correctly get
So now I want to plot the line in the same plot the lines that represent the best function for each iteration.For that, I commented the last line above plt.show() and did
# So now lets plot the lines that represent the best function for each iteration
boundary_lines = trainPerceptronAlgorithm(X, y)
x_lin = np.linspace(0, 1, 100)
for line in boundary_lines:
Θo, Θ1 = line
Θ1 = Θ1[0]
Θo = Θo[0]
# TODO: The equation of the error function is
# y = W0*x1 + W1*x2 + b
# So we can get x2 =y/W1 -(W0*x1)/W1 -b/W1 + y/W1)
# If we remove y/W1 we just get intercept and slope
# boundary_lines.append((-W[0] / W[1], -b / W[1])
# plt.axes([-0.5, -0.5, 1.5, 1.5])
plt.plot(x_lin, (Θ1 * x_lin / Θo))
plt.draw()
plt.pause(5)
input("Press enter to continue")
plt.close()
But that does not get me the expected result.
Why doesn't this get the expected result?
The mistake is in plt.plot(x_lin, (Θ1 * x_lin / Θo)) where instead of Θ1 * x_lin / Θo you should have Θo * x_lin + Θ1.
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.scatter(x1, x2, marker='o', color=color)
for i, line in enumerate(boundary_lines):
Θo, Θ1 = line
if i == len(boundary_lines) - 1:
c, ls, lw = 'k', '-', 2
else:
c, ls, lw = 'g', '--', 1.5
ax.plot(x_lin, Θo * x_lin + Θ1, c=c, ls=ls, lw=lw)
plt.show()
Result:
I am trying to follow the book by Daume
http://ciml.info/dl/v0_99/ciml-v0_99-ch04.pdf (page 43).
To fit a model for vanilla perceptron in python using numpy and without
using sciki-learn library.
The algorithm is given in the book
How can we implement this model in practice?
So far I have learned how to read the data and labels:
def read_data(infile):
data = np.loadtxt(infile)
X = data[:,:-1]
Y = data[:,-1]
return X, Y
The help will be appreciated!!
One way I figured out is this:
(Better ideas are always welcome!!)
#!python
# -*- coding: utf-8 -*-#
"""
Perceptron Algorithm.
#author: Bhishan Poudel
#date: Oct 31, 2017
"""
# Imports
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
import os, shutil
np.random.seed(100)
def read_data(infile):
data = np.loadtxt(infile)
X = data[:,:-1]
Y = data[:,-1]
return X, Y
def plot_boundary(X,Y,w,epoch):
try:
plt.style.use('seaborn-darkgrid')
# plt.style.use('ggplot')
#plt.style.available
except:
pass
# Get data for two classes
idxN = np.where(np.array(Y)==-1)
idxP = np.where(np.array(Y)==1)
XN = X[idxN]
XP = X[idxP]
# plot two classes
plt.scatter(XN[:,0],XN[:,1],c='b', marker='_', label="Negative class")
plt.scatter(XP[:,0],XP[:,1],c='r', marker='+', label="Positive class")
# plt.plot(XN[:,0],XN[:,1],'b_', markersize=8, label="Negative class")
# plt.plot(XP[:,0],XP[:,1],'r+', markersize=8, label="Positive class")
plt.title("Perceptron Algorithm iteration: {}".format(epoch))
# plot decision boundary orthogonal to w
# w is w2,w1, w0 last term is bias.
if len(w) == 3:
a = -w[0] / w[1]
b = -w[0] / w[2]
xx = [ 0, a]
yy = [b, 0]
plt.plot(xx,yy,'--g',label='Decision Boundary')
if len(w) == 2:
x2=[ w[0], w[1], -w[1], w[0]]
x3=[ w[0], w[1], w[1], -w[0]]
x2x3 =np.array([x2,x3])
XX,YY,U,V = list(zip(*x2x3))
ax = plt.gca()
ax.quiver(XX,YY,U,V,scale=1, color='g')
# Add labels
plt.xlabel('X')
plt.ylabel('Y')
# limits
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
plt.xlim(x_min,x_max)
plt.ylim(y_min,y_max)
# lines from origin
plt.axhline(y=0, color='k', linestyle='--',alpha=0.2)
plt.axvline(x=0, color='k', linestyle='--',alpha=0.2)
plt.grid(True)
plt.legend(loc=1)
plt.show()
# Always clost the plot
plt.close()
def predict(X,w):
return np.sign(np.dot(X, w))
def plot_contour(X,Y,w,mesh_stepsize):
try:
plt.style.use('seaborn-darkgrid')
# plt.style.use('ggplot')
#plt.style.available
except:
pass
# Get data for two classes
idxN = np.where(np.array(Y)==-1)
idxP = np.where(np.array(Y)==1)
XN = X[idxN]
XP = X[idxP]
# plot two classes with + and - sign
fig, ax = plt.subplots()
ax.set_title('Perceptron Algorithm')
plt.xlabel("X")
plt.ylabel("Y")
plt.plot(XN[:,0],XN[:,1],'b_', markersize=8, label="Negative class")
plt.plot(XP[:,0],XP[:,1],'y+', markersize=8, label="Positive class")
plt.legend()
# create a mesh for contour plot
# We first make a meshgrid (rectangle full of pts) from xmin to xmax and ymin to ymax.
# We then predict the label for each grid point and color it.
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
# Get 2D array for grid axes xx and yy (shape = 700, 1000)
# xx has 700 rows.
# xx[0] has 1000 values.
xx, yy = np.meshgrid(np.arange(x_min, x_max, mesh_stepsize),
np.arange(y_min, y_max, mesh_stepsize))
# Get 1d array for x and y axes
xxr = xx.ravel() # shape (700000,)
yyr = yy.ravel() # shape (700000,)
# ones vector
# ones = np.ones(xxr.shape[0]) # shape (700000,)
ones = np.ones(len(xxr)) # shape (700000,)
# Predict the score
Xvals = np.c_[ones, xxr, yyr]
scores = predict(Xvals, w)
# Plot contour plot
scores = scores.reshape(xx.shape)
ax.contourf(xx, yy, scores, cmap=plt.cm.Paired)
# print("xx.shape = {}".format(xx.shape)) # (700, 1000)
# print("scores.shape = {}".format(scores.shape)) # (700, 1000)
# print("scores[0].shape = {}".format(scores[0].shape)) # (1000,)
# show the plot
plt.savefig("Perceptron.png")
plt.show()
plt.close()
def perceptron_sgd(X, Y,epochs):
"""
X: data matrix without bias.
Y: target
"""
# add bias to X's first column
ones = np.ones(X.shape[0]).reshape(X.shape[0],1)
X1 = np.append(ones, X, axis=1)
w = np.zeros(X1.shape[1])
final_iter = epochs
for epoch in range(epochs):
print("\n")
print("epoch: {} {}".format(epoch, '-'*30))
misclassified = 0
for i, x in enumerate(X1):
y = Y[i]
h = np.dot(x, w)*y
if h <= 0:
w = w + x*y
misclassified += 1
print('misclassified? yes w: {} '.format(w,i))
else:
print('misclassified? no w: {}'.format(w))
pass
if misclassified == 0:
final_iter = epoch
break
return w, final_iter
def gen_lin_separable_data(data, data_tr, data_ts,data_size):
mean1 = np.array([0, 2])
mean2 = np.array([2, 0])
cov = np.array([[0.8, 0.6], [0.6, 0.8]])
X1 = np.random.multivariate_normal(mean1, cov, size=int(data_size/2))
y1 = np.ones(len(X1))
X2 = np.random.multivariate_normal(mean2, cov, size=int(data_size/2))
y2 = np.ones(len(X2)) * -1
with open(data,'w') as fo, \
open(data_tr,'w') as fo1, \
open(data_ts,'w') as fo2:
for i in range( len(X1)):
line = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X1[i][0], X1[i][1], y1[i])
line2 = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X2[i][0], X2[i][1], y2[i])
fo.write(line)
fo.write(line2)
for i in range( len(X1) - 20):
line = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X1[i][0], X1[i][1], y1[i])
line2 = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X2[i][0], X2[i][1], y2[i])
fo1.write(line)
fo1.write(line2)
for i in range((len(X1) - 20), len(X1) ):
line = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X1[i][0], X1[i][1], y1[i])
line2 = '{:5.2f} {:5.2f} {:5.0f} \n'.format(X2[i][0], X2[i][1], y2[i])
fo2.write(line)
fo2.write(line2)
def main():
"""Run main function."""
# generate linearly separable data
data = 'data.txt'
data_tr = 'data_train.txt'
data_ts = 'data_test.txt'
data_size = 200
gen_lin_separable_data(data, data_tr, data_ts,data_size)
# read data
epochs = 20
X_train, Y_train = read_data(data_tr)
X_test, Y_test = read_data(data_ts)
# fit perceptron
w, final_iter = perceptron_sgd(X_train,Y_train,epochs)
print('w = ', w)
plot_boundary(X_test,Y_test,w,final_iter)
# contour plot
mesh_stepsize = 0.01
plot_contour(X_test,Y_test,w,mesh_stepsize)
if __name__ == "__main__":
main()
The decision boundary looks like this:
There is an implementation of perceptron in my recent repo: NP_ML. The example result is: