This question already has answers here:
Custom TensorFlow metric: true positive rate at given false positive rate
(2 answers)
Closed 1 year ago.
I'm trying to add some metrics to a BERT-style model, but struggling with tf.metrics. For most metrics it's pretty straightforward that you can use tf.metrics.mean, but for a metric like false positive rate it's not. I know there is tf.metrics.false_positives and tf.metrics.true_negatives, but since tf.metrics also have an associated op, you can't just do fpr = fp / (fp + tn). How does one go about this?
Here is the code:
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.metrics_impl import _aggregate_across_towers
from tensorflow.python.ops.metrics_impl import true_negatives
from tensorflow.python.ops.metrics_impl import false_positives
from tensorflow.python.ops.metrics_impl import _remove_squeezable_dimensions
def false_positive_rate(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_alarm',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = true_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_false_positive_rate(true_n, false_p, name):
return array_ops.where(
math_ops.greater(true_n + false_p, 0),
math_ops.div(false_p, true_n + false_p), 0, name)
def once_across_towers(_, true_n, false_p):
return compute_false_positive_rate(true_n, false_p, 'value')
false_positive_rate = _aggregate_across_towers(
metrics_collections, once_across_towers, true_n, false_p)
update_op = compute_false_positive_rate(true_negatives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return false_positive_rate, update_op
Related
I'm trying to build a sequential neural network with keras. I generate a dataset with inserting randoms in a known function and train my model with this dataset, long enough to get a steady loss. Then I ask the model to predict the x_train values, but instead of predicting something close to y_train, it returns the same value regardless of the input x. This value also happens to be the average of y_train values. I don't understand what I'm doing wrong and why this is happening.
I'm using the following function for training the model:
def train_model(x_train,y_train,batch_size,input_size,layer_sizes,activations,optimizer,epochs,loss='MeanSquaredError'):
assert len(layer_sizes) == len(activations)
n_layers=len(layer_sizes)
model = Sequential()
model.add(LayerNormalization(input_dim=input_size))
model.add(Dense(layer_sizes[0],kernel_regularizer='l2',kernel_initializer='ones',activation=activations[0],input_dim=input_size,name='layer1'))
for i in range(1,n_layers):
model.add(Dense(layer_sizes[i],kernel_initializer='ones',activation=activations[i],name=f'layer{i+1}'))
model.compile(
optimizer = optimizer,
loss = loss, #MeanSquaredLogarithmicError
)
print(model.summary())
history = model.fit(x_train,y_train,batch_size=batch_size,epochs=epochs)
loss_history = history.history['loss']
plt.scatter(x=np.arange(1,epochs+1),y=loss_history)
plt.show()
return model
I then created an arbitrary function (just for test purposes) as:
def func(x1,x2,x3,x4):
y=(x1**3+(x2*x3+2))/(x4+x2*x1)
return y
and made a random dataset with this function:
def random_points_in_range(n,ranges):
points = np.empty((n,len(ranges)))
for i,element in enumerate(ranges):
start=min(element[1],element[0])
interval=abs(element[1]-element[0])
rand_check = np.random.rand(n)
randoms = ( rand_check*interval ) + start
points[:,i] = randoms.T
return points
def generate_random_dataset(n=200,ranges=[(0,10),(0,10),(0,10),(0,10)]):
x_dataset = random_points_in_range(n,ranges)
y_dataset = np.empty(n)
for i in range(n):
x1,x2,x3,x4 = x_dataset[i]
y_dataset[i] = func(x1,x2,x3,x4)
return x_dataset,y_dataset
I then train a model with these functions:
x_train,y_train = generate_random_dataset()
layer_sizes = [6,8,10,10,1]
activations = [LeakyReLU(),'relu','swish','relu','linear']
opt = Adam(learning_rate=0.001)
epochs = 3000
model=train_model(x_train,y_train,5,4,layer_sizes,activations,opt,epochs,loss='MeanSquaredError')
if you want to run the code these are things you need to import:
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LayerNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
I am using TF2 (2.3.0) NN to approximate the function y which solves the ODE: y'+3y=0
I have defined cutsom loss class and function in which I am trying to differentiate the single output with respect to the single input so the equation holds, provided that y_true is zero:
from tensorflow.keras.losses import Loss
import tensorflow as tf
class CustomLossOde(Loss):
def __init__(self, x, model, name='ode_loss'):
super().__init__(name=name)
self.x = x
self.model = model
def call(self, y_true, y_pred):
with tf.GradientTape() as tape:
tape.watch(self.x)
y_p = self.model(self.x)
dy_dx = tape.gradient(y_p, self.x)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
return loss
but running the following NN:
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
from custom_loss_ode import CustomLossOde
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
loss = CustomLossOde(model.input, model)
model.compile(optimizer=Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99),loss=loss)
model.run_eagerly = True
model.fit(x_train, y_train, batch_size=16, epochs=30)
for now I am getting 0 loss from the fisrt epoch, which doesn't make any sense.
I have printed both y_true and y_test from within the function and they seem OK so I suspect that the problem is in the gradien which I didn't succeed to print.
Apprecitate any help
Defining a custom loss with the high level Keras API is a bit difficult in that case. I would instead write the training loop from scracth, as it allows a finer grained control over what you can do.
I took inspiration from those two guides :
Advanced Automatic Differentiation
Writing a training loop from scratch
Basically, I used the fact that multiple tape can interact seamlessly. I use one to compute the loss function, the other to calculate the gradients to be propagated by the optimizer.
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras import Input
num_samples = 1024
x_train = 4 * (tf.random.uniform((num_samples, )) - 0.5)
y_train = tf.zeros((num_samples, ))
inputs = Input(shape=(1,))
x = Dense(16, 'tanh')(inputs)
x = Dense(8, 'tanh')(x)
x = Dense(4)(x)
y = Dense(1)(x)
model = Model(inputs=inputs, outputs=y)
# using the high level tf.data API for data handling
x_train = tf.reshape(x_train,(-1,1))
dataset = tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(1)
opt = Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.99)
for step, (x,y_true) in enumerate(dataset):
# we need to convert x to a variable if we want the tape to be
# able to compute the gradient according to x
x_variable = tf.Variable(x)
with tf.GradientTape() as model_tape:
with tf.GradientTape() as loss_tape:
loss_tape.watch(x_variable)
y_pred = model(x_variable)
dy_dx = loss_tape.gradient(y_pred, x_variable)
loss = tf.math.reduce_mean(tf.square(dy_dx + 3 * y_pred - y_true))
grad = model_tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grad, model.trainable_variables))
if step%20==0:
print(f"Step {step}: loss={loss.numpy()}")
I have defined custom metric for tensorflow.keras to compute macro-f1-score after every epoch as follows:
from tensorflow import argmax as tf_argmax
from sklearn.metric import f1_score
def macro_f1(y_true, y_pred):
# labels are one-hot encoded. so, need to convert
# [1,0,0] to 0 and
# [0,1,0] to 1 and
# [0,0,1] to 2. Then pass these arrays to sklearn f1_score.
y_true = tf_argmax(y_true, axis=1)
y_pred = tf_argmax(y_pred, axis=1)
return f1_score(y_true, y_pred, average='macro')
and using it during model compilation
model_4.compile(loss = 'categorical_crossentropy',
optimizer = Adam(lr=init_lr, decay=init_lr / num_epochs),
metrics = [Recall(name='recall') #, weighted_f1
macro_f1])
and when i try to fit like this:
history_model_4 = model_4.fit(train_image_generator.flow(x=train_imgs, y=train_targets, batch_size=batch_size),
validation_data = (val_imgs, val_targets),
epochs=num_epochs,
class_weight=mask_weights_train,
callbacks=[model_save_cb, early_stop_cb, epoch_times_cb],
verbose=2)
this is the error:
OperatorNotAllowedInGraphError: in user code:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py:806 train_function *
return step_function(self, iterator)
<ipython-input-57-a890ea61878e>:6 macro_f1 *
return f1_score(y_true, y_pred, average='macro')
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1095 f1_score *
return fbeta_score(y_true, y_pred, 1, labels=labels,
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1217 fbeta_score *
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1478 precision_recall_fscore_support *
labels = _check_set_wise_labels(y_true, y_pred, average, labels,
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1301 _check_set_wise_labels *
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:80 _check_targets *
check_consistent_length(y_true, y_pred)
/usr/local/lib/python3.6/dist-packages/sklearn/utils/validation.py:209 check_consistent_length *
uniques = np.unique(lengths)
<__array_function__ internals>:6 unique **
/usr/local/lib/python3.6/dist-packages/numpy/lib/arraysetops.py:263 unique
ret = _unique1d(ar, return_index, return_inverse, return_counts)
/usr/local/lib/python3.6/dist-packages/numpy/lib/arraysetops.py:311 _unique1d
ar.sort()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:877 __bool__
self._disallow_bool_casting()
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:487 _disallow_bool_casting
"using a `tf.Tensor` as a Python `bool`")
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/ops.py:474 _disallow_when_autograph_enabled
" indicate you are trying to use an unsupported feature.".format(task))
OperatorNotAllowedInGraphError: using a `tf.Tensor` as a Python `bool` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.
What caused such errors and how do I fix it and use it as one of my evaluation metrics at the end of ever y epoch?
EDIT 1:
note: all of this has been done in a jupyter notebook, i have added ">>>"s to seperate lines
# getting a batch to pass to model
>>> a_batch = train_image_generator.flow(x=train_imgs, y=train_targets, batch_size=batch_size).next()
# checking its' type to ensure that it's what i though it is
>>> type(a_batch)
# passing the batch to the model
>>> logits = model_4(a_batch)
# checking the type of output
>>> type(logits)
tensorflow.python.framework.ops.EagerTensor
# extracting only the passed targets to calculate f1-score
>>> _, dummy_targets = a_batch
# checking it's type
>>> type(dummy_targets)
numpy.ndarray
>>> macro_f1(y_true=dummy_targets, y_pred=logits)
0.0811965811965812
sklearn is not TensorFlow code - it is always recommended to avoid using arbitrary Python code in TF that gets executed inside TF's execution graph.
TensorFlow addons already has an implementation of the F1 score (tfa.metrics.F1Score), so change your code to use that instead of your custom metric
Make sure you pip install tensorflow-addons first and then
import tensorflow_addons as tfa
model_4.compile(loss = 'categorical_crossentropy',
optimizer = Adam(lr=init_lr, decay=init_lr / num_epochs),
metrics = [Recall(name='recall') #, weighted_f1
tfa.metrics.F1Score(average='macro')])
I have a binary classification problem with categories background (bg) = 0, signal (sig) = 1, for which I am training NNs. For monitoring purposes, I am trying to implement a custom metric in Keras with TensorFlow backend that does the following:
1) Calculate the threshold on my NN output which would result in a false positive rate (classifying bg as signal) of X (in this case X = 0.02, but it could be anything).
2) Calculate the true positive rate at this threshold.
Given numpy arrays y_true, y_pred, I would write a function like:
def eff_at_2percent_metric(y_true, y_pred):
#Find list of bg events
bg_list = np.argwhere(y_true < 0.5)
#Order by the NN output
ordered_bg_predictions = np.flip(np.sort(y_pred[bg_list]),axis=0)
#Find the threshold with 2% false positive rate
threshold = ordered_bg_predictions[0.02*round(len(ordered_bg_list))]
#Find list of signal events
sig_list = np.argwhere(y_true > 0.5)
#Order these by NN output
ordered_sig_predictions = np.sort(y_pred[sig_list])
#Find true positive rate with this threshold
sig_eff = 1 - np.searchsorted(ordered_sig_predictions,threshold)/len(ordered_sig_predictions)
return sig_eff
Of course, this does not work because to implement a custom metric, y_true and y_pred are supposed to be TensorFlow tensors rather than numpy arrays. Is there any way I can make this work correctly?
There's a metric for sensitivity at specificity, which I believe is equivalent (specificity is one minus FPR).
You can implement your own metric, and here is an example for the false positive rate:
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.metrics_impl import _aggregate_across_towers
from tensorflow.python.ops.metrics_impl import true_negatives
from tensorflow.python.ops.metrics_impl import false_positives
from tensorflow.python.ops.metrics_impl import _remove_squeezable_dimensions
def false_positive_rate(labels,
predictions,
weights=None,
metrics_collections=None,
updates_collections=None,
name=None):
if context.executing_eagerly():
raise RuntimeError('tf.metrics.recall is not supported is not '
'supported when eager execution is enabled.')
with variable_scope.variable_scope(name, 'false_alarm',
(predictions, labels, weights)):
predictions, labels, weights = _remove_squeezable_dimensions(
predictions=math_ops.cast(predictions, dtype=dtypes.bool),
labels=math_ops.cast(labels, dtype=dtypes.bool),
weights=weights)
false_p, false_positives_update_op = false_positives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
true_n, true_negatives_update_op = true_negatives(
labels,
predictions,
weights,
metrics_collections=None,
updates_collections=None,
name=None)
def compute_false_positive_rate(true_n, false_p, name):
return array_ops.where(
math_ops.greater(true_n + false_p, 0),
math_ops.div(false_p, true_n + false_p), 0, name)
def once_across_towers(_, true_n, false_p):
return compute_false_positive_rate(true_n, false_p, 'value')
false_positive_rate = _aggregate_across_towers(
metrics_collections, once_across_towers, true_n, false_p)
update_op = compute_false_positive_rate(true_negatives_update_op,
false_positives_update_op, 'update_op')
if updates_collections:
ops.add_to_collections(updates_collections, update_op)
return false_positive_rate, update_op
You can adapt the code to the true positive rate.
I want to train, evaluate the accuracy and eventually predict with my model. This is my first time using high level APIs such as tf.estimator.
I'm getting a value error from estimator.train(train_input_fn):
'ValueError: features should be a dictionary of `Tensor's. Given type: '
I'm not sure what is going on here. My model is taking 3 inputs and producing a binary output from one neuron.
Before this error I was getting an error about the requested shape not equal to the actual shape, or something along those lines. I fixed it by reducing the batchSize down to 1, instead of 100. I'm sure this isn't going to do so well when it comes to training though.
Any ideas? Heres my code:
import tensorflow as tf
import numpy as np
import sys
sys.path.insert(0, '/Users/blairburns/Documents/DeepLearning/BackgroundColourPredictor/Dataset/Testing/')
sys.path.insert(0, '/Users/blairburns/Documents/DeepLearning/BackgroundColourPredictor/Dataset/Training/')
#other files
from TestDataNormaliser import *
from TrainDataNormaliser import *
learning_rate = 0.01
trainingIteration = 15
batchSize = 1
displayStep = 2
#Layers using tf.layers
def get_logits(features):
l1 = tf.layers.dense(features, 3, activation=tf.nn.relu)
l2 = tf.layers.dense(l1, 4, activation=tf.nn.relu)
l3 = tf.layers.dense(l2, 1, activation=None)
a = l3
return a
#cost function
def get_loss(a, labels):
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y * tf.log(a)))
return tf.nn.sigmoid_cross_entropy_with_logits(logits=a, labels=labels)
#cross_entropy = tf.reduce_mean((l3 - y)**2)
#cross_entropy = -tf.reduce_sum(y*tf.log(a))-tf.reduce_sum((1-y)*tf.log(1-a))
#optimizer
def get_train_op(loss):
learning_rate = 1e-3
optimizer = tf.train.RMSPropOptimizer(learning_rate)
return optimizer.minimize(loss, global_step=tf.train.get_global_step())
#training
####
def get_inputs(feature_data, label_data, batch_size, n_epochs=None, shuffle=True):
dataset = tf.data.Dataset.from_tensor_slices(
(feature_data, label_data))
dataset = dataset.repeat(n_epochs)
if shuffle:
dataset = dataset.shuffle(len(feature_data))
dataset = dataset.batch(batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def model_fn(features, labels, mode):
a = get_logits(features)
loss = get_loss(a, labels)
train_op = get_train_op(loss)
predictions = tf.greater(a, 0)
accuracy = tf.metrics.accuracy(labels, predictions)
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops={'Accuracy': accuracy},
predictions=predictions
)
def train_input_fn():
return get_inputs(
trainArrayValues,
trainArrayLabels,
batchSize
)
def eval_input_fn():
return get_inputs(
testArrayValues,
testArrayLabels,
batchSize,
n_epochs=1,
shuffle=False
)
model_dir = './savedModel'
estimator = tf.estimator.LinearRegressor(feature_columns=[model_fn, model_dir])
#estimator.train(train_input_fn, max_steps=1)
estimator.train(train_input_fn)
estimator.evaluate(eval_input_fn)
Your problem is this line:
estimator = tf.estimator.LinearRegressor(feature_columns=[model_fn, model_dir])
You need to set the feature_columns argument to an array of feature columns. A feature column tells the estimator about the data you're feeding it.
It looks like all your input data is numeric, so I'd call tf.feature_column.numeric_column to create your feature column(s). The documentation is here. For example, the following code creates a numeric feature column containing x-coordinates:
xcol = tf.feature_column.numeric_column('x')
If all your estimator needs are x-coordinates, then you could create the estimator with the following code:
estimator = tf.estimator.LinearRegressor(feature_columns=[xcol])