How to pass input to placeholders? - python

import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
import tensorflow as tf
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 6)
df1 = pd.read_csv("TrainData.csv")
df2 = pd.read_csv("TestData.csv")
train_data_X = np.asanyarray(df1['ENGINE SIZE'])
train_data_Y = np.asanyarray(df1['CO2 EMISSIONS'])
test_data_X = np.asanyarray(df2['ENGINE SIZE'])
test_data_Y = np.asanyarray(df2['CO2 EMISSIONS'])
W = tf.Variable(20.0, name= 'Weight')
b = tf.Variable(30.0, name= 'Bias')
X = tf.placeholder(tf.float32, name= 'Input')
Y = tf.placeholder(tf.float32, name= 'Output')
Y = W*X + b
loss = tf.reduce_mean(tf.square(Y - train_data_Y))
optimizer = tf.train.GradientDescentOptimizer(0.05)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
loss_values = []
train_data = []
for step in range(100):
_, loss_val, a_val, b_val = sess.run([train, loss, W, b], feed_dict={X:train_data_X, Y:train_data_Y})
loss_values.append(loss_val)
if step % 5 == 0:
print(step, loss_val, a_val, b_val)
train_data.append([a_val, b_val])
plt.plot(loss_values, 'ro')
plt.show()
I am trying to make a linear regression model to detect CO2 emission by giving size of engine as input. I am using the above code in tensorflow.
1) When I use this code Weight and Bias remains unchanged. What is the problem in code?
2) Also if I want engine size and milage both as inputs. what code changes should be made
Thanks in advance

There were few mistakes in the code which are mentioned below :
You were using placeholder Y = W*X + b, which in later section of the code was used to feed data (feed_dict={X:train_data_X, Y:train_data_Y}). You should have used another variable for prediction (not the placeholder which you were using to feed data) and then you should have been able to calculate loss function. However, required changes have been made. Check prediction= W*X + b in the below code
You were passing complete data in feed_dict at once (feed_dict={X:train_data_X, Y:train_data_Y}). However, you need to pass single data value at a time (feed_dict={X:x, Y:y})
Below code with the needful correction should be working fine.
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
import tensorflow as tf
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20, 6)
df1 = pd.read_csv("TrainData.csv")
df2 = pd.read_csv("TestData.csv")
train_data_X = np.asanyarray(df1['ENGINE SIZE'])
train_data_Y = np.asanyarray(df1['CO2 EMISSIONS'])
test_data_X = np.asanyarray(df2['ENGINE SIZE'])
test_data_Y = np.asanyarray(df2['CO2 EMISSIONS'])
W = tf.Variable(20.0, name= 'Weight')
b = tf.Variable(30.0, name= 'Bias')
X = tf.placeholder(tf.float32, name= 'Input')
Y = tf.placeholder(tf.float32, name= 'Output')
prediction= W*X + b
loss = tf.reduce_mean(tf.square(prediction - Y))
optimizer = tf.train.GradientDescentOptimizer(0.05)
train = optimizer.minimize(loss)
loss_values = []
train_data = []
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for step in range(100):
for (x,y) in zip(train_data_X,train_data_Y):
_, loss_val, a_val, b_val = sess.run([train, loss, W, b], feed_dict={X:x, Y:y})
loss_values.append(loss_val)
if step % 5 == 0:
print(step, loss_val, a_val, b_val)
train_data.append([a_val, b_val])
plt.plot(loss_values, 'ro')
plt.show()
Note : Because of incorrect choice of loss function, your loss keeps on increasing with every step.
I have mentioned a loss function below, which might work for your data. I am not sure how your data looks like, but you can give this a try if you want and let me know if this worked.
n_samples = train_data_X.shape[0]
loss = tf.reduce_sum(tf.pow(prediction - Y, 2)) / (2 * n_samples)
Response to your second query.
Assuming that your data has column name as MILEAGE, You can perform the below changes in train_data_X and test_data_X. Rest of the code will remain the same as above.
train_data_X = np.asanyarray(df1[['ENGINE SIZE','MILEAGE']])
train_data_Y = np.asanyarray(df1['CO2 EMISSIONS'])
test_data_X = np.asanyarray(df2[['ENGINE SIZE','MILEAGE']])
test_data_Y = np.asanyarray(df2['CO2 EMISSIONS'])

Related

TensorFlow: are all the basic operations in python overidden in Tensorflow

I am new to tensorflow, and trying to write loss function(squared loss) using basic python operators, but it is not working. Can anyone tell me where I went wrong. Thanks in adavnce
n = x_data.shape[0]
L = (Y_pred-y)**2
loss = (1/n)*tf.reduce_sum(L)
I get loss=0.0 when I run the corresponding session
_ ,_m, _c, _l = session.run([optimizer,m,c,loss], feed_dict={x: x_data, y: y_data})
y is a placeholder
loss = tf.reduce_mean(tf.squared_difference(Y_pred,y))
this works just fine?
Complete Code:
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#downloading dataset
!wget -nv -O /resources/data/PierceCricketData.csv https://ibm.box.com/shared/static/reyjo1hk43m2x79nreywwfwcdd5yi8zu.csv
df = pd.read_csv("/resources/data/PierceCricketData.csv")
df.head()
%matplotlib inline
x_data, y_data = (df["Chirps"].values,df["Temp"].values)
plt.plot(x_data, y_data, 'ro')
# label the axis
plt.xlabel("# Chirps per 15 sec")
plt.ylabel("Temp in Farenhiet")
x = tf.placeholder(tf.float32, shape=x_data.shape)
y = tf.placeholder(tf.float32, shape=y_data.shape)
m = tf.Variable(3.0, name='m')
c = tf.Variable(2.0, name='c')
Y_pred = m*x+c
n = x_data.shape[0]
L = (Y_pred*nf-y*nf)**2
loss = (1/n)*tf.reduce_sum(L)
# loss = tf.reduce_mean(tf.squared_difference(Y_pred,y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
session = tf.Session()
session.run(tf.global_variables_initializer())
convergenceTolerance = 0.0001
previous_m = np.inf
previous_c = np.inf
steps = {}
steps['m'] = []
steps['c'] = []
losses=[]
for k in range(100000):
_ ,_m, _c, _l = session.run([optimizer,m,c,loss], feed_dict={x: x_data, y: y_data})
steps['m'].append(_m)
steps['c'].append(_c)
losses.append(_l)
if (np.abs(previous_m - _m) <= convergenceTolerance) or (np.abs(previous_c - _c) <= convergenceTolerance):
print "Finished by Convergence Criterion"
print k
print _l
break
previous_m = _m,
previous_c = _c,
print(losses)
Output I get is [0.0, 0.0]
Why?
Here is the official TensorFlow implementation of mean_squared_error :
from tensorflow.python.framework import ops, math_ops
#tf_export("losses.mean_squared_error")
def mean_squared_error(labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
if labels is None:
raise ValueError("labels must not be None.")
if predictions is None:
raise ValueError("predictions must not be None.")
with ops.name_scope(scope, "mean_squared_error",(predictions, labels, weights)) as scope:
predictions = math_ops.to_float(predictions)
labels = math_ops.to_float(labels)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(losses, weights, scope, loss_collection, reduction=reduction)
As you can see in their source code you should make sure that the tensors have the same dtype. Hope that answers your question.

ValueError: Cannot feed value of shape (200,) for Tensor 'Placeholder_32:0', which has shape '(?, 1)'

I am new to tensorflow. This code is just for a simple neural network.
I think the problem maybe is from:
x_data = np.linspace(-0.5,0.5,200)[:np..newaxis]
I tried to write without [:np.newaxis], but it looks like the same.
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_data = np.linspace(-0.5,0.5,200)[:np.newaxis]
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])
Weights_L1 = tf.Variable(tf.random_normal([1,10]))
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L1 = tf.matmul(x,Weights_L1) + biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)
Weights_L2 = tf.Variable(tf.random_normal([10,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)
loss = tf.reduce_mean(tf.square(y-prediction))
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train_step,feed_dict={x:x_data,y:y_data})
prediction_value = sess.run(prediction,feed_dict={x:x_data})
plt.figure()
plt.scatter(x_data,y_data)
plt.plot(x_data,prediction_value,'r-',lw=5)
plt.show()
The defined placeholders (both x and y) are 2-dimensional, so you should reshape the input arrays to rank 2. Try to add this:
x_data = x_data.reshape([-1,1])
y_data = y_data.reshape([-1,1])

What causes overfitting in the algorithm

import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
#reproducible random seed
seed = 1
np.random.seed(seed)
#Import and normalize the data
df = pd.read_csv('creditcard.csv')
#Exploring the data
# print df.head()
# print df.describe()
# print df.isnull().sum()
# count_class = pd.value_counts(df['Class'])
# count_class.plot(kind = 'bar')
# plt.title('Fraud class histogram')
# plt.xlabel('class')
# plt.ylabel('Frequency')
# plt.show()
# print('Clearly the data is totally unbalanced!')
#to normalize the amount column
# data['normAmount'] = StandardScaler().fit_transform(data['Amount'].reshape(-1, 1))
df['normAmount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1))
df = df.drop(['Time','V28','V27','V26','V25','V24','V23','V22','V20','V15','V13','V8','Amount'], axis =1)
X = df.iloc[:,df.columns!='Class']
Y = df.iloc[:,df.columns=='Class']
# number of records in the minority class
number_record_fraud = len(df[df.Class==1])
fraud_indices = np.array(df[df.Class==1].index)
#picking normal class
normal_indices = np.array(df[df.Class==0].index)
#select random x(number_record_fraud) numbers from normal_indices
random_normal_indices = np.random.choice(normal_indices,number_record_fraud,replace=False)
random_normal_indices = np.array(random_normal_indices)
#under sample data
under_sample_indices = np.concatenate([fraud_indices,random_normal_indices])
under_sample_data = df.iloc[under_sample_indices,:]
X_undersample = under_sample_data.iloc[:,under_sample_data.columns!='Class']
Y_undersample = under_sample_data.iloc[:,under_sample_data.columns=='Class']
# split data into train and test dataset
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size = 0.3)
X_train_undersample,X_test_undersample,Y_train_undersample,Y_test_undersample = train_test_split(X_undersample,Y_undersample,test_size=0.3)
#parameters
learning_rate = 0.05
training_epoch = 10
batch_size = 43
display_step = 1
#tf graph input
x = tf.placeholder(tf.float32,[None,18])
y = tf.placeholder(tf.float32,[None,1])
#set model weights
w = tf.Variable(tf.zeros([18,1]))
b = tf.Variable(tf.zeros([1]))
#construct model
pred = tf.nn.softmax(tf.matmul(x,w) + b) #softmax activation
#minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred),reduction_indices=1))
#Gradient descent
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#initializing variables
init = tf.global_variables_initializer()
#launch the graph
with tf.Session() as sess:
sess.run(init)
#training cycle
for epoch in range(training_epoch):
total_batch = len(X_train_undersample)/batch_size
avg_cost = 0
#loop over all the batches
for batch in range(total_batch):
batch_xs = X_train.iloc[(batch)*batch_size:(batch+1) *batch_size]
batch_ys = Y_train.iloc[(batch)*batch_size:(batch+1) *batch_size]
# run optimizer and cost operation
_,c= sess.run([optimizer,cost],feed_dict={x:batch_xs,y:batch_ys})
avg_cost += c/total_batch
correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#disply log per epoch step
if (epoch+1) % display_step == 0:
train_accuracy, newCost = sess.run([accuracy, cost], feed_dict={x: X_test,y: Y_test})
print "test_set_accuracy:",accuracy.eval({x:X_test_undersample,y:Y_test_undersample})*100
print "whole_set_accuracy:",accuracy.eval({x:X,y:Y})*100
# print train_accuracy
# print "cost",newCost
print
print 'optimization finished.'
Things I've tried to figure out what's causing it:
Tried changing train dataset length.
Dropped some not needed fields.
Tried putting validation blocks.
Dataset :link
There can be multiple reasons of why it is overfitting , and as well there can be multiple ways to debug it and to fix it. Its hard to tell just from the code, because it also depends on the data, but here are some common reaons as well as fixes:
Too small dataset, adding more data its a common overfitting fix
Too complex model, if you have many features, or complex polonomial features, try to reducing complexity using feature selection
Add regularization: i dont see regularization in your code, try to add it.

Same code on mnist from two different data sources, but very different results

I followed tensorflow tutorial and the my code yield 90%ish prediction rate as expected.
However, I tried the same code with mnist data set from Kaggle in the link below,
https://www.kaggle.com/c/digit-recognizer
but the prediction rate is completely screwed.
I carefully checked the training data and labels format, but could not resolve where the difference come from. Please help me with the problem.
import tensorflow as tf
import pandas as pd
import math
import pylab as plt
import numpy as np
df = pd.read_csv("data/train.csv")
data1 = df.iloc[:,1:].values.astype(np.float32).copy()
label_ = (df.iloc[:,0].values.astype(np.int32).copy())
label1 = np.zeros([label_.shape[0],10]).astype(np.float32)
label1[np.arange(label_.shape[0]),label_] = 1
print(data1.shape)
print(label1[:3,:])
print(label_[:3])
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
graph = tf.Graph()
optimizer, loss, prediction = None, None, None
x,y = None, None
with graph.as_default():
x = tf.placeholder(tf.float32,[None, 784])
y = tf.placeholder(tf.float32,[None, 10])
hidden = 100
W1 = tf.Variable(tf.truncated_normal([784,hidden]))
b1 = tf.Variable(tf.zeros([hidden]))
W2 = tf.Variable(tf.truncated_normal([hidden,10]))
b2 = tf.Variable(tf.zeros([10]))
hidden = tf.nn.relu(tf.matmul(x,W1) + b1)
logits = tf.matmul(hidden,W2)+b2
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits,y))
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
prediction = tf.nn.softmax(logits)
test_prediction = tf.nn.softmax(logits)
def accuracy(predict, label):
return 100*np.sum(np.argmax(predict,1)==np.argmax(label,1))/float(label.shape[0])
with tf.Session(graph=graph) as sess:
tf.initialize_all_variables().run()
dd = mnist.train.images
ll = mnist.train.labels
#dd = data1
#ll = label1
for step in range(10000):
s = step*100%(dd.shape[0])
e = (step+1)*100%(dd.shape[0])
if s>e:
s = (step+1)*100%(dd.shape[0])
e = (step+2)*100%(dd.shape[0])
data = dd[s:e,:]
label = ll[s:e,:]
_,l,predict = sess.run([optimizer, loss, prediction], feed_dict={x:data,y:label})
if step %100 ==0:
print("step at %i: loss at %f and accuracy of %.2f%%" % (step,l,accuracy(predict,label)))

Tensorflow logistic regression different output

hello I am trying to preform logistic regression using tensor-flow ( sorry if my code looks dumb) and I have written the cost function once in numpy and once in tensor-flow , I am getting different results for the same starting weights , could some one help me?
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.datasets.samples_generator import make_blobs
DataSize=1000
data, y = make_blobs(n_samples=1000, centers=2, n_features=2,random_state=1,center_box=(-5.0,5.0))
plt.scatter(data[:,0],data[:,1])
plt.show(block=False)
x=np.linspace(-1,5,1000)
b=np.ones([1,1])
W=np.ones([2,1])
asd=W*x.T+b
pred=np.dot(data,W)+b
plt.plot(x,asd[0])
plt.show(block=False)
result=((1))/(1+np.exp(-pred))
s=np.log(result)
J=-(y.T.dot(s)+(1-y).T.dot(1-s))/1000
print ("cost in numpy",J)
#
with tf.variable_scope("scopi",reuse=True):
X = tf.placeholder(tf.float32 )
Y = tf.placeholder(tf.float32 )
b = tf.Variable(tf.ones((1,1)),name="bias")
W = tf.Variable(tf.ones((1,2)),name="weights")
ypred=W*X+b
hx=tf.reduce_sum(tf.sigmoid(ypred),reduction_indices=1)
#cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
J=-tf.reduce_sum(tf.mul(tf.transpose(Y),hx)+tf.mul(tf.transpose(1-Y),(1-hx)))/1000
opti=tf.train.AdamOptimizer(0.1).minimize(J)
with tf.Session() as session:
session.run(tf.initialize_all_variables())
h = session.run(J, feed_dict={X: data, Y: y})
print ("cost in tensorflow", h)
# epoch = 100
# for i in range(epoch):
# for j in range(DataSize):
# session.run(opti, feed_dict={X: data[j], Y: y[j]})
#
#
#
#
#
# if i%10==0:
#
# a=session.run(J,feed_dict={X:data,Y:y})
#
# print ("cost ", a)
Cost sample of the cost functions:
('cost in numpy', array([ 2.37780175])) ('cost in tensorflow', 0.073667422)
You are initializing the weights to random values in this line:
session.run(tf.initialize_all_variables())
After that line you can set the values with something like this:
session.run(tf.assign(b,tf.ones((1,2))))

Categories