I'm plotting contours to visually represent my neural network classification with a given set of data. I've noticed these black dots appear as the model is training through all its epochs. What do they represent?
Code is as follows (skipped explaining how neural network was constructed):
# Plotting Function
def plot_model(sess, model, xy, labels, feature_lambda, title=''):
from pandas import DataFrame
xx, yy = np.meshgrid(np.linspace(-1.2,1.2,400), np.linspace(-1.2,1.2,400))
prediction = sess.run(model, feed_dict={X: np.array([feature_lambda(xxval, yyval) for xxval, yyval in zip(xx.flatten(), yy.flatten())])})
Z = prediction.reshape(xx.shape)
df = DataFrame(dict(x=xy[:,0], y=xy[:,1], label=labels.flatten()))
markers = {0:'bs', 1:'r^'}
_, ax = plt.subplots(figsize=(5, 5))
cs = ax.contourf(xx, yy, Z, 20, cmap='coolwarm', alpha=.9)
ax.clabel(cs, colors='gray')
cs = ax.contour(xx, yy, Z, cmap='gray', levels=[0, 0.5, 1.0], linestyles='--', linewidths=2)
ax.clabel(cs, colors='k')
predictions = sess.run(model, feed_dict={X: xy})
for k, xy0 in df[['x', 'y']].iterrows():
x0, y0 = xy0.values
plt.plot(x0, y0, markers[labels[k][0]], mec='k')
ax.set_xlim([-1.1, 1.1])
ax.set_ylim([-1.1, 1.1])
plt.grid(linestyle='--', alpha=0.5)
plt.title(title)
plt.show()
# Generate data
xy, labels = make_blobs(n_samples=200, center_box=(-1, 1), centers=6, cluster_std=0.1, random_state=20)
labels = labels % 2
labels = labels.reshape(-1, 1)
ftr_fn = lambda x, y: [x, y]
features = np.array([ftr_fn(xval, yval) for xval, yval in xy])
# Stochastic Method
batch_size = 25
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for epoch in range(MaxEpoch):
if epoch % 5 == 0:
curr_loss = sess.run(loss, feed_dict={X: features, y: labels})
plot_model(sess, yhat, xy, labels, ftr_fn, 'Epoch {} \n (loss={:.3f})'.format(epoch, curr_loss))
for X_batch, y_batch in generate_batches(batch_size, features_shuffled, labels_shuffled):
sess.run(train, feed_dict={X: X_batch, y: y_batch})
loss_final = sess.run(loss, feed_dict={X: features, y: labels})
plot_model(sess, yhat, xy, labels, ftr_fn, 'Epoch 25 \n (loss={:.3f})'.format(loss_final))
Epoch 0
Epoch 10 - Bottom Right Corner
Epoch 25 - Bottom
Related
I have generated a balanced dataset of 4000 examples, 2000 for the negative class and 2000 for the positive one. Then, I've build a neural net with one single hidden layer and 3 neurons with a ReLU activation function and an output layer with a sigmoid. The cost function is a standard cross-entropy function and I chose Adam as optimizer. Using minibatches of 15 examples, after 1000 epochs of running the final accuracy 96.37%, so I am assuming that the model is doing well on the test set. But when I want to display the decision boundary, that's what I get:
I cannot figure out if the problem is a code error or the model just needs mode training. Script I'm using for this:
# implement a neural network that finds a decision boundary under a
constraint on the second hidden layer with tensorflow
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import normalize
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tf_utils import random_mini_batches
import matplotlib.pyplot as plt
def generate_dataset():
np.random.seed(2)
# positive class samples
d1_x = np.random.normal(5, 10, 1000)
d1_y = np.random.normal(5, 2, 1000)
d2_x = np.random.normal(40, 20, 1000)
d2_y = np.random.normal(2, 1, 1000)
# negative class samples
d3_x = np.random.normal(60, 5, 2000)
d3_y = np.random.normal(10, 1, 2000)
plt.scatter(d1_x, d1_y, color='b')
plt.scatter(d2_x, d2_y, color='b')
plt.scatter(d3_x, d3_y, color='r')
Y = np.zeros((4000, 1))
d_x = np.concatenate([d1_x, d2_x, d3_x])
d_y = np.concatenate([d1_y, d2_y, d3_y])
d_x = d_x.reshape(d_x.shape[0], 1)
d_y = d_y.reshape(d_y.shape[0], 1)
X = np.concatenate([d_x, d_y], axis=1)
Y[2000:] = 1
return X, Y
# define a tensorflow model 5-3-1 with two hideen layers and the output
being scalar
costs = []
print_cost = True
learning_rate = .0009
minibatch_size = 15
num_epochs = 1000
XX, YY = generate_dataset()
XX, YY = shuffle(XX, YY)
X_norm = normalize(XX)
X_train, X_test, y_train, y_test = train_test_split(X_norm, YY,
test_size=0.2, random_state=42)
X_train = np.transpose(X_train)
y_train = np.transpose(y_train)
X_test = np.transpose(X_test)
y_test = np.transpose(y_test)
# define train and test sets
m = XX.shape[1] # input dimension
n = YY.shape[1] # output dimension
X = tf.placeholder(tf.float32, shape = [m, None], name = 'X')
y = tf.placeholder(tf.float32, shape = [n, None], name = 'y')
# model parameters
n1 = 3 # output dimension of the first hidden layer
#n2 = 4 # output dimension of the second hidden layer
#n3 = 2
W1 = tf.get_variable("W1", [n1, m],
initializer=tf.contrib.layers.xavier_initializer(seed=1))
b1 = tf.get_variable("b1", [n1 ,1], initializer=tf.zeros_initializer)
#W2 = tf.get_variable("W2", [n2, n1],
initializer=tf.contrib.layers.xavier_initializer(seed=1))
#b2 = tf.get_variable("b2", [n2, 1], initializer=tf.zeros_initializer)
#W3 = tf.get_variable("W3", [n3, n2],
initializer=tf.contrib.layers.xavier_initializer(seed=1))
#b3 = tf.get_variable("b3", [n3, 1], initializer=tf.zeros_initializer)
W4 = tf.get_variable("W4", [n, n1],
initializer=tf.contrib.layers.xavier_initializer(seed=1))
b4 = tf.get_variable("b4", [n, 1], initializer=tf.zeros_initializer)
# forward propagation
z1 = tf.add(tf.matmul(W1, X), b1)
a1 = tf.nn.relu(z1)
#z2 = tf.add(tf.matmul(W2, a1), b2)
#a2 = tf.nn.relu(z2)
#z3 = tf.add(tf.matmul(W3, a2), b3)
#a3 = tf.nn.relu(z3)
z4 = tf.add(tf.matmul(W4, a1), b4)
pred = tf.nn.sigmoid(z4)
# cost function
cost = tf.reduce_mean(tf.losses.log_loss(labels=y, predictions=pred)) #
logit is the probability estimate given by the model --> this is what is used inside the formula, not the net input z
# ADAM optimizer
optimizer =
tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# metrics
correct_prediction = tf.less_equal(tf.abs(pred - y), 0.5)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init = tf.global_variables_initializer()
with tf.Session() as sess:
seed = 1
sess.run(init)
for epoch in range(num_epochs):
epoch_cost = 0
seed += 1
num_minibatches = int(X_train.shape[0] / minibatch_size)
minibatches = random_mini_batches(X_train, y_train, minibatch_size, seed)
for minibatch in minibatches:
(minibatch_X, minibatch_Y) = minibatch
_, minibatch_cost = sess.run([optimizer, cost], feed_dict={X:minibatch_X, y:minibatch_Y})
epoch_cost += minibatch_cost / minibatch_size
# Print the cost every epoch
if print_cost == True and epoch % 100 == 0:
print("Cost after epoch %i: %f" % (epoch, epoch_cost))
if print_cost == True and epoch % minibatch_size == 0:
costs.append(epoch_cost)
#plt.plot(costs)
#plt.show()
cp, val_accuracy = sess.run([correct_prediction, accuracy], feed_dict={X: X_test, y: y_test})
# plot the cost
# plt.plot(np.squeeze(costs))
# plt.ylabel('cost'), feed_dict={X: X_test, y: y_test})
# plt.xlabel('iterations (per fives)')
# plt.title("Learning rate =" + str(learning_rate))
# plt.show()
cmap = plt.get_cmap('Paired')
# Define region of interest by data limits
xmin, xmax = min(XX[:, 0]) - 1, max(XX[:, 0]) + 1
ymin, ymax = min(XX[:, 1]) - 1, max(XX[:, 1]) + 1
steps = 100
x_span = np.linspace(xmin, xmax, steps)
y_span = np.linspace(ymin, ymax, steps)
xx, yy = np.meshgrid(x_span, y_span)
A = np.concatenate([[xx.ravel()], [yy.ravel()]], axis=0)
A = normalize(A, axis=0)
# Make predictions across region of interest
predictions = sess.run(pred, feed_dict={X: A})
# Plot decision boundary in region of interest
z = predictions.reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=cmap, alpha=.5)
plt.show()
# Get predicted labels on training data and plot
#train_labels = model.predict(X)
#ax.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, lw=0)
I currently trained a logistic model for a decision boundary that looks like this:
using the following code that I got online:
x_min, x_max = xbatch[:, 0].min() - .5, xbatch[:, 0].max() + .5
y_min, y_max = xbatch[:, 1].min() - .5, xbatch[:, 1].max() + .5
h = 0.05
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
X = np.vstack( ( xx.reshape(1, np.product(xx.shape)), yy.reshape(1, np.product(yy.shape)) ) ).T
# Predict the function value for the whole grid
z1 = np.dot(X, w1_pred)+b1_pred
h1 = 1 / (1 + np.exp(-z1))
z2 = np.dot(h1, w2_pred)+b2_pred
y_hat = 1 / (1 + np.exp(-z2))
pred = np.round(y_hat)
Z = pred.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z)
plt.scatter(xbatch[:, 0], xbatch[:, 1], c=ybatch, s=40, edgecolors="grey", alpha=0.9)
My question is this:
is there a way to plot the decision line without meshgrid or contour?
I would like to just plot the wave sigmoid function on the graph. without the colours or contours so it looks like this:
Use contour with level=[0.5] for sigmoid should work.
A Synthetic training set:
train_X = np.random.multivariate_normal([2.2, 2.2], [[0.1,0],[0,0.1]], 150)
train_Y = np.zeros(150)
train_X = np.concatenate((train_X, np.random.multivariate_normal([1.4, 1.3], [[0.05,0],[0,0.3]], 50)), axis=0)
train_Y = np.concatenate((train_Y, np.ones(50)))
train_X = np.concatenate((train_X, np.random.multivariate_normal([1.3, 2.9], [[0.05,0],[0,0.05]], 50)), axis=0)
train_Y = np.concatenate((train_Y, np.ones(50)))
train_X = np.concatenate((train_X, np.random.multivariate_normal([2.5, 0.95], [[0.1,0],[0,0.1]], 50)), axis=0)
train_Y = np.concatenate((train_Y, np.ones(50)))
An example model:
x = tf.placeholder(tf.float32, [None, 2])
y = tf.placeholder(tf.float32, [None,1])
#Input to hidden units
w_i_h = tf.Variable(tf.truncated_normal([2, 2],mean=0, stddev=0.1))
b_i_h = tf.Variable(tf.zeros([2]))
hidden = tf.sigmoid(tf.matmul(x, w_i_h) + b_i_h)
#hidden to output
w_h_o = tf.Variable(tf.truncated_normal([2, 1],mean=0, stddev=0.1))
b_h_o = tf.Variable(tf.zeros([1]))
logits = tf.sigmoid(tf.matmul(hidden, w_h_o) + b_h_o)
cost = tf.reduce_mean(tf.square(logits-y))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(cost)
correct_prediction = tf.equal(tf.sign(logits-0.5), tf.sign(y-0.5))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
#Initialize all variables
init = tf.global_variables_initializer()
#Launch the graph
with tf.Session() as sess:
sess.run(init)
for epoch in range(3000):
_, c = sess.run([optimizer, cost], feed_dict={x:train_X, y:np.reshape(train_Y, (train_Y.shape[0],1))})
if epoch%1000 == 0:
print('Epoch: %d' %(epoch+1), 'cost = {:0.4f}'.format(c), end='\r')
acc = sess.run([accuracy] , feed_dict={x:train_X, y:np.reshape(train_Y, (train_Y.shape[0],1))})
print('\n Accuracy:', acc)
xx, yy = np.mgrid[0:3.5:0.1, 0:3.5:0.1]
grid = np.c_[xx.ravel(), yy.ravel()]
pred_1 = sess.run([logits], feed_dict={x:grid})
The output:
Z = np.array(pred_1).reshape(xx.shape)
plt.contour(xx, yy, Z, levels=[0.5], cmap='gray')
plt.scatter(train_X[:,0], train_X[:,1], s=20, c=train_Y, cmap='jet', vmin=0, vmax=1)
plt.show()
:
I am using tensorflow to do a linear regression. Here I am facing a problem:
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8,6)
data = pd.read_csv('./data.csv')
xs = data["A"][:100]
ys = data["B"][:100]
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
W = tf.Variable(tf.random_normal([1]),name = 'weight')
b = tf.Variable(tf.random_normal([1]),name = 'bias')
Y_pred = tf.add(tf.multiply(X,W), b)
sample_num = xs.shape[0]
loss = tf.reduce_sum(tf.pow(Y_pred - Y,2))/sample_num
learning_rate = 0.0001
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
n_samples = xs.shape[0]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(100):
for x,y in zip(xs,ys):
_, l = sess.run([optimizer, loss], feed_dict={X: x, Y:y})
W, b = sess.run([W, b])
plt.plot(xs, ys, 'bo', label='Real data')
plt.plot(xs, xs*W + b, 'r', label='Predicted data')
plt.legend()
plt.show()
The data.csv is here.
The plot is diametrically opposed to what I expected:
So, what is the problem? I am a beginner of python and tensorflow, and just can't reach the points.
As Nipun mentioned, try AdamOptimizer instead of GradientDescentOptimizer.
You will often find that AdamOptimizer is generally a better optimizer than GradientDescentOptimizer and reaches the minima much faster.
It does so by adapting the learning rate instead of keeping it constant (0.0001 in your case).
Also, more the number of epochs, better the model (not considering over-fitting here).
Since your learning rate and the number of epochs are too small, your regression models haven't converged. Therefore, you may need to increase the learning rate and use the tf.train.AdamOptimizer.
Here I set the learning rate to 2, epochs=10000 and got the following graph.
Here I have given the code with the comments where necessary.
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8, 6)
data = pd.read_csv('./data.csv')
xs = data["A"][:100]
ys = data["B"][:100]
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
Y_pred = tf.add(tf.multiply(X, W), b)
loss = tf.reduce_mean(tf.pow(Y_pred - Y, 2))
learning_rate = 2 #increase the learning rate
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)#use the AdamOptimizer
BATCH_SIZE = 8 #Batch Size define here
n_samples = xs.shape[0]
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(10000): #increase the num of epoches
for start, end in zip(range(0, n_samples, BATCH_SIZE), # mini batch Gradientdecent
range(BATCH_SIZE, n_samples + 1, BATCH_SIZE)):
_, l = sess.run([optimizer, loss], feed_dict={X: xs[start:end], Y: ys[start:end]})
prediction = sess.run(Y_pred, feed_dict={X: xs})
#W, b = sess.run([W, b])
plt.plot(xs, ys, 'bo', label='Real data')
plt.plot(xs, prediction, 'r', label='Predicted data')
plt.legend()
plt.show()
Also, you can use the mini batch gradientdescent method to accelerate the convergence as the code above.
Moreover, you can increase the number of epochs and learning rate further to get the optimal result.
Hope this helps.
I am trying to create a graph showing the correlation between mini batch accuracy and validation accuracy of a neural net.
But instead, I have a crazy graph that is flickering at a super high frequency and is zoomed in on a very small portion of the graph.
Here is my code:
num_nodes=1024
batch_size = 128
beta = 0.01
def animate(i):
graph_data = open('NeuralNetData.txt','r').read()
lines = graph_data.split('\n')
xs = []
ys = []
for line in lines:
if len(line) > 1:
x, y = line.split(',')
xs.append(x)
ys.append(y)
ax1.clear()
ax1.plot(xs, ys,label='validation accuracy')
ax1.legend(loc='lower right')
ax1.set_ylabel("Accuracy(%)", fontsize=15)
ax1.set_xlabel("Images Seen", fontsize=15)
ax1.set_title("Neural Network Accuracy Data\nStochastic Gradient Descent", fontsize=10)
plt.show()
def animate2(i):
graph_data = open('NeuralNetData2.txt','r').read()
lines = graph_data.split('\n')
xs = []
ys = []
for line in lines:
if len(line) > 1:
x, y = line.split(',')
xs.append(x)
ys.append(y)
ax1.plot(xs, ys, label='mini-batch accuracy')
ax1.legend(loc='lower right')
plt.tight_layout()
plt.show()
style.use('fivethirtyeight')
#Creating Graph
fig = plt.figure(figsize=(50,50))
ax1 = fig.add_subplot(1,1,1)
#1 hidden layer using RELUs and trying regularization techniques
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights_1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_nodes]))
biases_1 = tf.Variable(tf.zeros([num_nodes]))
weights_2 = tf.Variable(tf.truncated_normal([num_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits_1 = tf.matmul(tf_train_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
# Normal loss function
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels))
# Loss function with L2 Regularization with beta=0.01
regularizers = tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2)
loss = tf.reduce_mean(loss + beta * regularizers)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training
train_prediction = tf.nn.softmax(logits_2)
# Predictions for validation
logits_1 = tf.matmul(tf_valid_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
valid_prediction = tf.nn.softmax(logits_2)
# Predictions for test
logits_1 = tf.matmul(tf_test_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
test_prediction = tf.nn.softmax(logits_2)
num_steps = 3001
open("NeuralNetData.txt","w").close()
open("NeuralNetData.txt","a+")
open("NeuralNetData2.txt","w+").close()
open("NeuralNetData2.txt","a+")
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
f= open("NeuralNetData.txt","a")
t= open("NeuralNetData2.txt","a")
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
images_seen = step* batch_size
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (images_seen % 1000 == 0):
print("Minibatch loss at step {}: {}".format(step, l))
print("Minibatch accuracy: {:.1f}".format(accuracy(predictions, batch_labels)))
print("Validation accuracy: {:.1f}".format(accuracy(valid_prediction.eval(), valid_labels)))
x=str(images_seen)
y=str(accuracy(valid_prediction.eval(), valid_labels))
f.write(x+','+y+'\n')
f.close()
r=str(accuracy(predictions, batch_labels))
t.write(x+','+r+'\n')
t.close()
ani = animation.FuncAnimation(fig, animate, interval=1000)
ani2 = animation.FuncAnimation(fig, animate2, interval=1000)
print("Test accuracy: {:.1f}".format(accuracy(test_prediction.eval(), test_labels)))
First, don't call plt.show() inside an updating function that is called by FuncAnimation. Instead it should probably called exactly once at the end of the script.
Second, it seems you are using two different FuncAnimations which work on the same axes (ax1). One of those is clearing that axes. So what may happen is that the plot is updated by one function while it is cleared by the other - the outcome is probably close to chaos.
Third, you are creating 6002 FuncAnimations instead of only one or two. Each of them will operate on the same axes. So if the above already produced chaos, this will produce 6002 times chaos.
I am an extreme beginner at tensorflow, and i was tasked to do a simple linear regression using my csv data which contains 2 columns, Height & State of Charge(SoC), where both values are float.
In CSV file, Height is the first col while SoC is the second col.
Using Height i'm suppose to predict SoC
I'm completely lost as to what i have to add in the "Fit all training data" portion of the code. I've looked at other linear regression models and their codes are mind boggling, such as this one:
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
sess.run(training_step,feed_dict={X:train_x,Y:train_y})
cost_history = np.append(cost_history,sess.run(cost,feed_dict={X: train_x,Y: train_y}))
#calculate mean square error
pred_y = sess.run(y_, feed_dict={X: test_x})
mse = tf.reduce_mean(tf.square(pred_y - test_y))
print("MSE: %.4f" % sess.run(mse))
#plot cost
plt.plot(range(len(cost_history)),cost_history)
plt.axis([0,training_epochs,0,np.max(cost_history)])
plt.show()
fig, ax = plt.subplots()
ax.scatter(test_y, pred_y)
ax.plot([test_y.min(), test_y.max()], [test_y.min(), test_y.max()], 'k--', lw=3)
ax.set_xlabel('Measured')
ax.set_ylabel('Predicted')
plt.show()
I've just been able to get data from my CSV file without error using this guide:
TensorFlow: Reading and using data from CSV file
Full Code:
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
rng = np.random
from numpy import genfromtxt
from sklearn.datasets import load_boston
# Parameters
learning_rate = 0.01
training_epochs = 1000
display_step = 50
n_samples = 221
X = tf.placeholder("float") # create symbolic variables
Y = tf.placeholder("float")
filename_queue = tf.train.string_input_producer(["battdata.csv"],shuffle=False)
reader = tf.TextLineReader(skip_header_lines=1)
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the
# decoded result.
record_defaults = [[1.], [1.]]
col1, col2= tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1])
# Set model weights
W = tf.Variable(rng.randn(), name="weight")
b = tf.Variable(rng.randn(), name="bias")
# Construct a linear model
pred = tf.add(tf.multiply(col1, W), b) # XW + b <- y = mx + b where W is gradient, b is intercept
# Mean squared error
cost = tf.reduce_sum(tf.pow(pred-col2, 2))/(2*n_samples)
# Gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
_, cost_value = sess.run([optimizer,cost])
for (x, y) in zip(col2, col1):
sess.run(optimizer, feed_dict={X: x, Y: y})
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost, feed_dict={X: col2, Y:col1})
print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={X: col2, Y: col1})
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, sess.run(W) * col2 + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
coord.request_stop()
coord.join(threads)
Error:
INFO:tensorflow:Error reported to Coordinator: , Attempted to use a closed Session.
--------------------------------------------------------------------------- TypeError Traceback (most recent call
last) in ()
8 for epoch in range(training_epochs):
9 _, cost_value = sess.run([optimizer,cost])
---> 10 for (x, y) in zip(*col1, col2):
11 sess.run(optimizer, feed_dict={X: x, Y: y})
12
C:\Users\Shiina\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py
in iter(self)
514 TypeError: when invoked.
515 """
--> 516 raise TypeError("'Tensor' object is not iterable.")
517
518 def bool(self):
TypeError: 'Tensor' object is not iterable.
The error is because your are trying to iterate over tensors in for (x, y) in zip(col2, col1) which is not allowed. The other issues with the code is that you have input pipeline queues setup and then your also trying to feed in through feed_dict{}, which is wrong. Your training part should look something like this:
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
sess.run(init)
# Fit all training data
for epoch in range(training_epochs):
_, cost_value = sess.run([optimizer,cost])
#Display logs per epoch step
if (epoch+1) % display_step == 0:
c = sess.run(cost)
print( "Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost)
print ("Training cost=", training_cost, "W=", sess.run(W), "b=", sess.run(b), '\n')
#Plot data after completing training
train_X = []
train_Y = []
for i in range(input_size): #Your input data size to loop through once
X, Y = sess.run([col1, pred]) # Call pred, to get the prediction with the updated weights
train_X.append(X)
train_Y.append(y)
#Graphic display
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.legend()
plt.show()
coord.request_stop()
coord.join(threads)