Logits and labels must be same size - python

I'm trying to create a neural network that takes 13 features as input from multiple csv files one at a time and measure accuracy after each iteration. Here is my code snippet:
import tensorflow as tf
import numpy as np
from tensorflow.contrib.layers import fully_connected
import os
import pandas as pd
n_inputs = 13
n_hidden1 = 30
n_hidden2 = 10
n_outputs = 2
learning_rate = 0.01
n_epochs = 40
batch_size = 1
patient_id = os.listdir('./subset_numerical')
output = pd.read_csv('output.csv')
sepsis_pat = output['output'].tolist()
X = tf.placeholder(tf.float32, shape=[None, n_inputs], name="X")
y = tf.placeholder(tf.int64, shape=[None], name="y")
def data_processor(n):
id = pd.read_csv('./subset_numerical/'+patient_id[n])
id_input = np.array([id['VALUE'].tolist()])
for s in sepsis_pat:
if str(s) == str(patient_id[n].split('.')[0]):
a = 1
try:
if a == 1:
a = 0
return [id_input, np.array([1])]
except:
return [id_input, np.array([0])]
def test_set():
id_combined = []
out = []
for p in range(300, len(patient_id)):
try:
id1 = pd.read_csv('./subset_numerical/' + patient_id[p])
id_input1 = np.array(id1['VALUE'].tolist())
id_combined.append(id_input1)
for s in sepsis_pat:
if str(s) == str(patient_id[p].split('.')[0]):
a = 1
try:
if a == 1:
a = 0
out.append([1, 0])
except:
out.append([0, 1])
except:
pass
return [np.array(id_combined), np.array(out)]
# Declaration of hidden layers and calculation of loss goes here
# Construction phase begins
with tf.name_scope("dnn"):
hidden1 = fully_connected(X, n_hidden1, scope="hidden1")
hidden2 = fully_connected(hidden1, n_hidden2, scope="hidden2")
logits = fully_connected(hidden2, n_outputs, scope="outputs", activation_fn=None) # We will apply softmax here later
# Calculating loss
with tf.name_scope("loss"):
xentropy = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
loss = tf.reduce_mean(xentropy, name="loss")
# Training with gradient descent optimizer
with tf.name_scope("train"):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
# Measuring accuracy
with tf.name_scope("eval"):
correct = tf.nn.in_top_k(logits, y, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
accuracy_summary = tf.summary.scalar('accuracy', accuracy)
# Variable initialization and saving model goes here
# Construction is finished. Let's get this to work.
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
a = 0
for iteration in range(300 // batch_size):
X_batch, y_batch = data_processor(iteration)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
acc_train = accuracy.eval(feed_dict={X: X_batch, y: y_batch})
X_test, y_test = test_set()
acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
print(epoch, "Train accuracy:", acc_train, "Test accuracy:", acc_test)
save_path = saver.save(sess, "./my_model_final.ckpt")
But I'm stuck with this error:
logits and labels must be same size: logits_size=[1,2] labels_size=[1,1]
The error seems to occur at this line:
correct = tf.nn.in_top_k(logits, y, 1)
What am I doing wrong?

Based on your error log provided, the problem is in this line of your code:
xentropy = tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits)
Ensure that both of them have same shape and dtype.
The shape should be of the format [batch_size, num_classes] and dtype should be of type float16, float32 or float64. Check the documentation of softmax_cross_entropy_with_logits for more details.

Since you've defined n_outputs = 2, the shape of logits is [?, 2] (? means batch size), while the shape of y is just [?]. In order to apply the softmax loss function, the last FC layer should return a flat tensor, which can be compared with y.
Solution: set n_outputs = 1.

Related

TensorFlow model's cost is constantly at 0

So I've been learning TensorFlow with this Computer Vision project and I'm not sure if I understand it well enough. I think I got the session part right, although graph seems to be the issue here. Here is my code:
def model_train(placeholder_dimensions, filter_dimensions, strides, learning_rate, num_epochs, minibatch_size, print_cost = True):
# for training purposes
tf.reset_default_graph()
# create datasets
train_set, test_set = load_dataset() custom function and and custom made dataset
X_train = np.array([ex[0] for ex in train_set])
Y_train = np.array([ex[1] for ex in train_set])
X_test = np.array([ex[0] for ex in test_set])
Y_test = np.array([ex[1] for ex in test_set])
#convert to one-hot encodings
Y_train = tf.one_hot(Y_train, depth = 10)
Y_test = tf.one_hot(Y_test, depth = 10)
m = len(train_set)
costs = []
tf.reset_default_graph()
graph = tf.get_default_graph()
with graph.as_default():
# create placeholders
X, Y = create_placeholders(*placeholder_dimensions)
# initialize parameters
parameters = initialize_parameters(filter_dimensions)
# forward propagate
Z4 = forward_propagation(X, parameters, strides)
# compute cost
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = Z4, labels = Y))
# define optimizer for backpropagation that minimizes the cost function
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# initialize variables
init = tf.global_variables_initializer()
# start session
with tf.Session() as sess:
sess.run(init)
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size)
# get random minibatch
minibatches = random_minibatches(np.array([X_train, Y_train]), minibatch_size)
for minibatch in minibatches:
minibatch_X, minibatch_Y = minibatch
_ , temp_cost = sess.run([optimizer, cost], {X: minibatch_X, Y: minibatch_Y})
minibatch_cost += temp_cost / num_minibatches
if print_cost == True and epoch % 5 == 0:
print('Cost after epoch %i: %f' %(epoch, minibatch_cost))
if print_cost == True:
costs.append(minibatch_cost)
# plot the costs
plot_cost(costs, learning_rate)
# calculate correct predictions
prediction = tf.argmax(Z4, 1)
correct_prediction = tf.equal(prediction, tf.argmax(Y, 1))
# calculate accuracy on test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print('Training set accuracy:', train_accuracy)
print('Test set accuracy:', test_accuracy)
return parameters
where create_placeholder and initialize_parameters function are as follows:
def initialize_parameters(filter_dimensions):
# initialize weight parameters for convolution layers
W1 = tf.get_variable('W1', shape = filter_dimensions['W1'])
W2 = tf.get_variable('W2', shape = filter_dimensions['W2'])
parameters = {'W1': W1, 'W2': W2}
return parameters
def forward_propagation(X, parameters, strides):
with tf.variable_scope('model1'):
# first block
Z1 = tf.nn.conv2d(X, parameters['W1'], strides['conv1'], padding = 'VALID')
A1 = tf.nn.relu(Z1)
P1 = tf.nn.max_pool(A1, ksize = strides['pool1'], strides = strides['pool1'], padding = 'VALID')
# second block
Z2 = tf.nn.conv2d(P1, parameters['W2'], strides['conv2'], padding = 'VALID')
A2 = tf.nn.relu(Z2)
P2 = tf.nn.max_pool(A2, ksize = strides['pool2'], strides = strides['pool2'], padding = 'VALID')
# flatten
F = tf.contrib.layers.flatten(P2)
# dense block
Z3 = tf.contrib.layers.fully_connected(F, 50)
A3 = tf.nn.relu(Z3)
# output
Z4 = tf.contrib.layers.fully_connected(A3, 10, activation_fn = None)
return Z4
I have previous experience with Keras, yet i can't find what is the problem here.
I would check 2 things first:
#convert to one-hot encodings
Y_train = tf.one_hot(Y_train, depth = 10)
Y_test = tf.one_hot(Y_test, depth = 10)
Check if this code is outputting what you expect.
and second : check the model initialization, again, if it looks like you expect.
Just my 2 cents

TensorFlow "InvalidArgumentError" for value fed to placeholder via feed_dict

I"m working through the an example problem with TensorFlow (working with placeholders specifically) and don't understand why I'm receiving (what appears to be) a shape/type error when I'm fairly confident those are what they should be.
I've tried playing around with the various float types in X_batch & y_batch, tried changing the size from being "None" (unspecified) to what I will be passing in (100), none of which have worked
import tensorflow as tf
import numpy as np
from sklearn.datasets import fetch_california_housing
def fetch_batch(epoch, batch_index, batch_size, X, y):
np.random.seed(epoch * batch_index)
indices = np.random.randint(m, size=batch_size)
X_batch = X[indices]
y_batch = y[indices]
return X_batch.astype('float32'), y_batch.astype('float32')
if __name__ == "__main__":
housing = fetch_california_housing()
m, n = housing.data.shape
# standardizing input data
standardized_housing = (housing.data - np.mean(housing.data)) / np.std(housing.data)
std_housing_bias = np.c_[np.ones((m, 1)), standardized_housing]
# using the size "n+1" to account for the bias term
X = tf.placeholder(tf.float32, shape=(None, n+1), name='X')
y = tf.placeholder(tf.float32, shape=(None, 1), name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1, 1), dtype=tf.float32, name='theta')
y_pred = tf.matmul(X, theta, name='predictions')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
n_epochs = 1000
learning_rate = 0.01
batch_size = 100
n_batches = int(np.ceil(m / batch_size))
# using the Gradient Descent Optimizer class from tensorflow's optimizer selection
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
# creates a node in the computational graph that initializes all variables when it is run
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size, std_housing_bias, \
housing.target.reshape(-1, 1))
print(X_batch.shape, X_batch.dtype, y_batch.shape, y_batch.dtype)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
if epoch % 100 == 0:
print(f"Epoch {epoch} MSE = {mse.eval()}")
best_theta = theta.eval()
print("Mini Batch Gradient Descent Beta Estimates")
print(best_theta)
The error I'm getting is:
InvalidArgumentError: You must feed a value for placeholder tensor 'X' with dtype float and shape [?,9]
[[node X (defined at /Users/marshallmcquillen/Scripts/lab.py:25) ]]
I've thrown a print statement printing X_batch and y_batch properties, and they are what I expect them to be but still aren't working.
The mse you want to evaluate is also dependent on placeholder X and y therefore you need to provide with feed_dict as well. You can fix it by changing the line to
if epoch % 100 == 0:
print(f"Epoch {epoch} MSE = {mse.eval(feed_dict={X: X_batch, y: y_batch})}")
But since you are trying to evaluate the model, it is reasonable to use a test dataset. So ideally it would be
if epoch % 100 == 0:
print(f"Epoch {epoch} MSE = {mse.eval(feed_dict={X: X_test, y: y_test})}")

Error training Cifar-10 Model Tensorflow - Accuracy is 0 and will not optimize and losses not reported

I am currently trying to train my model to categorize the cifar-10 dataset. I read the data like this:
def convert_images(raw):
raw_float = np.array(raw, dtype = float)
images = raw_float.reshape([-1,3,32,32])
images = images.transpose([0,2,3,1])
return images
def load_data(filename):
data = unpickle(filename)
raw_images = data[b'data']
labels = np.array(data[b'labels'])
images = convert_images(raw_images)
return images, labels
def load_training_data():
images = np.zeros(shape=[50000,32,32,3], dtype = float)
labels = np.zeros(shape = [50000], dtype = int)
begin = 0
for i in range(5):
filename = "data_batch_" + str(i+1)
images_batch, labels_batch = load_data(filename)
num_images = len(images_batch)
end = begin + num_images
images[begin:end, :] = images_batch
labels[begin:end] = labels_batch
begin = end
return images, labels, OneHotEncoder(categorical_features=labels, n_values=10)
What this does is reshape the data so that it is a 4d array with 32x32x3 values for the pixels and rgb colors. I define my model like this (i first reshape X to be a row vector because the 4d array creates errors):
X = tf.placeholder(tf.float32, [None,32,32,3])
Y_labeled = tf.placeholder(tf.int32, [None])
data = load_training_data()
with tf.name_scope('dnn'):
XX = tf.reshape(X, [-1,3072])
hidden1 = tf.layers.dense(XX, 300, name = 'hidden1', activation = tf.nn.relu)
hidden2 = tf.layers.dense(hidden1, 200, name = 'hidden2', activation = tf.nn.relu)
hidden3 = tf.layers.dense(hidden2, 200, name = 'hidden3', activation = tf.nn.relu)
hidden4 = tf.layers.dense(hidden3, 100, name = 'hidden4', activation = tf.nn.relu)
logits = tf.layers.dense(hidden4, 10, name = 'outputs')
with tf.name_scope('loss'):
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = (Y_labeled), logits = logits)
loss = tf.reduce_mean(cross_entropy, name = 'loss')
learning_rate = 0.01
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
with tf.name_scope('eval'):
correct = tf.nn.in_top_k(logits,Y_labeled, 1)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
init = tf.global_variables_initializer()
batch_size = 100
n_epochs = 50
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
for iteration in range(50000 // batch_size):
X_batch = data[0][iteration*batch_size:(iteration+1)*batch_size]
y_batch = data[1][iteration*batch_size:(iteration+1)*batch_size]
#X_batch, y_batch = data.train.next_batch(batch_size)
sess.run(training_op, feed_dict = {X: X_batch,Y_labeled: y_batch})
acc_train = accuracy.eval(feed_dict = {X: X_batch,Y_labeled: y_batch})
print(epoch, "train accuracy:", acc_train, "loss", loss)
I want to define a simple model that has 4 hidden layers. When I run this it compiles with no errors and starts "training", but the accuracy is 0.0 and it does not print any losses. I am not sure if the error is in my calculation of accuracy and loss or in my definition of the model.
There seem to be a problem with the way you feed your labels. When you create the placholder Y_labeled = tf.placeholder(tf.int32, [None, 10]) it seems to be a vector of dimension 10 but later when you create the label numpy tensor labels = np.zeros(shape = [50000], dtype = int) it seems to be a scalar.
This is why you have this error, the placeholder needs to be fed with a tensor of dimension (batch_size, 10) but you feed it with (batch_size, 0)

AdadeltaOptimizer Example code

Does anyone have an example for code that uses tf.train.AdadeltaOptimizer with good results?
I have a TF graph, that was originally set with tf.train.AdamOptimizer, and is working well. When I replace it with AdadeltaOptimizer, with the default params, it gives lousy results.
I used Cuda 7.5.
The below is example code which works with 'AdadeltaOptimizer' optimizer. It works with 'Adam'. The only difference between them that Adam is insensitive to "learning rate" and 'Adadelta' is sensitive.
I advice you to read more about optimization algorithm (like here).
In your own example, just try to change 'learning rate' to be smaller or bigger (it is named 'hyperparameter optimization').
Note:
From my experience, 'Adam' is a very good optimizer for RNN, better than 'AdaDelta' (using example code, 'Adam' achieve better score much faster). On the other hand, for CNN, SGD+Momentum works best.
Code, which learn MNIST classification using Bi-LSTM:
# Mnist classification using Bi-LSTM
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
learning_rate = 0.01
training_epochs = 100
batch_size = 64
seq_length = 28
heigh_image = 28
hidden_size = 128
class_numer = 10
input = tf.placeholder(tf.float32, [None, None, heigh_image])
target = tf.placeholder(tf.float32, [None, class_numer])
seq_len = tf.placeholder(tf.int32, [None])
def fulconn_layer(input_data, output_dim, activation_func=None):
input_dim = int(input_data.get_shape()[1])
W = tf.Variable(tf.random_normal([input_dim, output_dim]))
b = tf.Variable(tf.random_normal([output_dim]))
if activation_func:
return activation_func(tf.matmul(input_data, W) + b)
else:
return tf.matmul(input_data, W) + b
with tf.name_scope("BiLSTM"):
with tf.variable_scope('forward'):
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
with tf.variable_scope('backward'):
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(hidden_size, forget_bias=1.0, state_is_tuple=True)
outputs, states = tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fw_cell, cell_bw=lstm_bw_cell, inputs=input,sequence_length=seq_len, dtype=tf.float32, scope="BiLSTM")
# As we have Bi-LSTM, we have two output, which are not connected. So merge them
outputs = tf.concat(2, outputs)
# As we want do classification, we only need the last output from LSTM.
last_output = outputs[:,0,:]
# Create the final classification layer
yhat = fulconn_layer(last_output, class_numer)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(yhat, target))
optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate).minimize(cost) # AdamOptimizer
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(target, 1), tf.argmax(yhat, 1)), tf.float32))
gpu_opts = tf.GPUOptions(per_process_gpu_memory_fraction=0.3)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_opts)) as session:
session.run(tf.initialize_all_variables())
print ("Start Learing")
for epoch in range(training_epochs):
for i in range(int(mnist.train.num_examples/batch_size)):
x_batch, y_batch = mnist.train.next_batch(batch_size)
x_batch = x_batch.reshape([batch_size, seq_length, heigh_image])
train_seq_len = np.ones(batch_size) * seq_length
session.run([optimizer], feed_dict={input: x_batch, target: y_batch, seq_len: train_seq_len})
train_accuracy = session.run(accuracy, feed_dict={input: x_batch, target: y_batch, seq_len: train_seq_len})
x_test = mnist.test.images.reshape([-1, seq_length, heigh_image])
y_test = mnist.test.labels
test_seq_len = np.ones(x_test.shape[0]) * seq_length
test_accuracy = session.run(accuracy, feed_dict={input: x_test, target: y_test, seq_len: test_seq_len})
print("epoch: %d, train_accuracy: %3f, test_accuracy: %3f" % (epoch, train_accuracy, test_accuracy))

How do I change the symbolic variable (tf.Variable) in tensorflow?

I have written my self a tensorflow class as below, but I met some problem when I am trying to set some weight to zero after training manually in function refine_init_weight. In this function, I tried setting all numbers to zero once it is below some value and see how would the accuracy rate change. The problem is, when I reran self.sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}), it seems that its value does not changed accordingly. I am just wondering where should I change the symbolic variable (the accuracy depends on the weights I changed) at this case?
import tensorflow as tf
from nncomponents import *
from helpers import *
from sda import StackedDenoisingAutoencoder
class DeepFeatureSelection:
def __init__(self, X_train, X_test, y_train, y_test, weight_init='sda', hidden_dims=[100, 100, 100], epochs=1000,
lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0, learning_rate=0.1, optimizer='FTRL'):
# Initiate the input layer
# Get the dimension of the input X
n_sample, n_feat = X_train.shape
n_classes = len(np.unique(y_train))
self.epochs = epochs
# Store up original value
self.X_train = X_train
self.y_train = one_hot(y_train)
self.X_test = X_test
self.y_test = one_hot(y_test)
# Two variables with undetermined length is created
self.var_X = tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='x')
self.var_Y = tf.placeholder(dtype=tf.float32, shape=[None, n_classes], name='y')
self.input_layer = One2OneInputLayer(self.var_X)
self.hidden_layers = []
layer_input = self.input_layer.output
# Initialize the network weights
weights, biases = init_layer_weight(hidden_dims, X_train, weight_init)
print(type(weights[0]))
# Create hidden layers
for init_w,init_b in zip(weights, biases):
self.hidden_layers.append(DenseLayer(layer_input, init_w, init_b))
layer_input = self.hidden_layers[-1].output
# Final classification layer, variable Y is passed
self.softmax_layer = SoftmaxLayer(self.hidden_layers[-1].output, n_classes, self.var_Y)
n_hidden = len(hidden_dims)
# regularization terms on coefficients of input layer
self.L1_input = tf.reduce_sum(tf.abs(self.input_layer.w))
self.L2_input = tf.nn.l2_loss(self.input_layer.w)
# regularization terms on weights of hidden layers
L1s = []
L2_sqrs = []
for i in xrange(n_hidden):
L1s.append(tf.reduce_sum(tf.abs(self.hidden_layers[i].w)))
L2_sqrs.append(tf.nn.l2_loss(self.hidden_layers[i].w))
L1s.append(tf.reduce_sum(tf.abs(self.softmax_layer.w)))
L2_sqrs.append(tf.nn.l2_loss(self.softmax_layer.w))
self.L1 = tf.add_n(L1s)
self.L2_sqr = tf.add_n(L2_sqrs)
# Cost with two regularization terms
self.cost = self.softmax_layer.cost \
+ lambda1*(1.0-lambda2)*0.5*self.L2_input + lambda1*lambda2*self.L1_input \
+ alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2*self.L1
# FTRL optimizer is used to produce more zeros
# self.optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate).minimize(self.cost)
self.optimizer = optimize(self.cost, learning_rate, optimizer)
self.accuracy = self.softmax_layer.accuracy
self.y = self.softmax_layer.y
def train(self, batch_size=100):
sess = tf.Session()
self.sess = sess
sess.run(tf.initialize_all_variables())
for i in xrange(self.epochs):
x_batch, y_batch = get_batch(self.X_train, self.y_train, batch_size)
sess.run(self.optimizer, feed_dict={self.var_X: x_batch, self.var_Y: y_batch})
if i % 2 == 0:
l = sess.run(self.cost, feed_dict={self.var_X: x_batch, self.var_Y: y_batch})
print('epoch {0}: global loss = {1}'.format(i, l))
self.selected_w = sess.run(self.input_layer.w)
print("Train accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_train, self.var_Y: self.y_train}))
print("Test accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))
print(self.selected_w)
print(len(self.selected_w[self.selected_w==0]))
print("Final test accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))
def refine_init_weight(self, threshold=0.001):
refined_w = np.copy(self.selected_w)
refined_w[refined_w < threshold] = 0
self.input_layer.w.assign(refined_w)
print("Test accuracy refined:",self.sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))
(I'll just repost my comment as an answer)
You need to run the assign operation you created, otherwise it is just added to the graph and never executed.
assign_op = self.input_layer.w.assign(refined_w)
self.sess.run(assign_op)
If you want to do this in Tensorflow you could create a boolean mask of the weight variable with tf.greater and tf.less, convert this mask to tf.float32 and multiply it with the weight array.

Categories