Training Deep Neural Network Using Tensorflow - python

I am trying to immplement the VGG16 network using Tensorflow.
to test the model, i want to classify a dataset of images.
i sarted by creating a train_data with tensorflow.data.Dataset:
<TensorSliceDataset element_spec=(TensorSpec(shape=(215, 160, 3), dtype=tf.float64, name=None), TensorSpec(shape=(20,), dtype=tf.float64, name=None))>
(their is 20 ouput classes)
created a custom 2d Conv Layer:
`class CustomConv2d(Layer):
def __init__(self,filters,kernel_size,padding,name):
super(CustomConv2d,self).__init__()
self.conv = Conv2D(filters=filters,
kernel_size=kernel_size,
activation='relu',
padding =padding,
name=name,
)
self.batchN = BatchNormalization()
def call(self,x,training=True):
output = self.conv(x)
output = self.batchN(output)
return output`
Created a Sub Model Class
`class VGG16(Model):
def __init__(self,input_shape,NUM_OF_CLASSES=20,dropout_parameters=0.5):
super(VGG16,self).__init__()
self.dropout = Dropout(dropout_parameters)
### First Conv Block
self.conv_11 = Conv2D(filters=53,
kernel_size=(3,3),
activation='relu',
padding ='same',
name='conv11',
input_shape=input_shape
)
self.conv_12 = CustomConv2d(64,(3,3),padding='same',name='conv_12')
self.maxpool = MaxPool2D(pool_size=(2,2),padding='same')`
## Second Conv Block
self.conv21 = CustomConv2d(64,(3,3),padding='same',name='conv_21')
self.conv22 = CustomConv2d(64,(3,3),padding='same',name='conv_22')
## Third Conv Block
self.conv31 = CustomConv2d(256,(3,3),padding='same',name='conv_31')
self.conv32 = CustomConv2d(256,(3,3),padding='same',name='conv_32')
self.conv33 = CustomConv2d(256,(3,3),padding='same',name='conv_33')
## Fourth Conv Block
self.conv41 = CustomConv2d(512,(3,3),padding='same',name='conv_41')
self.conv42 = CustomConv2d(512,(3,3),padding='same',name='conv_42')
self.conv43 = CustomConv2d(512,(3,3),padding='same',name='conv_43')
## Fifth CConv Block
self.conv51 = CustomConv2d(512,(3,3),padding='same',name='conv_51')
self.conv52 = CustomConv2d(512,(3,3),padding='same',name='conv_52')
self.conv53 = CustomConv2d(512,(3,3),padding='same',name='conv_53')
#####
self.flatten = Flatten()
self.dense1 = Dense(1024,activation='relu',name='Dense_1')
self.dense2 = Dense(512,activation='relu',name='Dense_2')
self.dense3 = Dense(NUM_OF_CLASSES,activation='softmax',name='Dense_3')
def call(self,x,training=True):
x = self.maxpool(self.conv_12(self.conv_11(x)))
x = self.maxpool(self.conv22(self.conv21(x)))
x = self.maxpool(self.conv33(self.conv32(self.conv31(x))))
x = self.maxpool(self.conv43(self.conv42(self.conv41(x))))
x = self.maxpool(self.conv53(self.conv52(self.conv51(x))))
x = self.flatten(x)
x = self.dense3(self.dense2(self.dense1(x)))
return x
model = VGG16((215,160,3,1))
`
by the way i dont know why i have to put 1 at the end of shape
After Compiling the model
when i try to fit the data to the model i have this error:
`ValueError: in user code:
` File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1021, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.8/dist-packages/keras/engine/training.py", line 859, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.8/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
ValueError: Exception encountered when calling layer "vgg16_2" (type VGG16).
in user code:
File "/tmp/ipykernel_49926/980605695.py", line 38, in call *
x = self.maxpool(self.conv_12(self.conv_11(x)))
File "/usr/local/lib/python3.8/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler **
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.8/dist-packages/keras/engine/input_spec.py", line 228, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Input 0 of layer "conv11" is incompatible with the layer: expected min_ndim=4, found ndim=3. Full shape received: (215, 160, 3)
Call arguments received:
• x=tf.Tensor(shape=(215, 160, 3), dtype=float32)
• training=True``
Tought maybe their is problem in the shape of the Dataset

Edit:
the problem was solved after spliting the data into batches with:
trainDataset = trainDataset.shuffle(buffer_size=20).prefetch(buffer_size=15).batch(32)
but now the training is very slow. is it normal to take about 5 sec on each image of the batch?

It should be
model = VGG16((215,160,3))
Because conv2d input shape parameter take 3 values. (Height,width,channel)
you can input model like model(np.ones((1,215,160,3))) where 1 is the batch size

Related

Input 0 of layer "dense_16" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (None,)

I'm trying to fit some probability distribution using keras and tensorflow
Code where I'm getting error
def build_env_model(learning_rate):
InputLayer = Input(shape=(5,))
Layer_1 = Dense(16,activation='tanh')(InputLayer)
Layer_2 = Dense(16,activation='tanh')(Layer_1)
position_mean = Dense(1, activation="linear")(Layer_2)
position_sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
velocity_mean = Dense(1, activation="linear")(Layer_2)
velocity_sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
angle_mean = Dense(1, activation="linear")(Layer_2)
angle_sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
angular_velocity_mean = Dense(1, activation="linear")(Layer_2)
angular_velocity_sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
reward_mean = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
reward_sigma = Dense(1, activation=lambda x: tf.nn.elu(x) + 1)(Layer_2)
y_real = Input(shape=(5,))
lossF = cost(position_mean,position_sigma,velocity_mean,velocity_sigma,angle_mean,angle_sigma,angular_velocity_mean,angular_velocity_sigma,reward_mean,reward_sigma,y_real)
model = Model(inputs=[InputLayer,y_real],outputs=[position_mean,position_sigma,velocity_mean,velocity_sigma,angle_mean,
angle_sigma,angular_velocity_mean,angular_velocity_sigma,reward_mean,reward_sigma])
model.add_loss(lossF)
adamOptimizer = adam_v2.Adam(learning_rate=learning_rate)
model.compile(optimizer=adamOptimizer,metrics=['mse'])
return model
Training the model:
def update_env_model(self,state,action,reward,next_state,done):
state = state[0]
next_state = next_state[0]
position,velocity,angle,angular_velocity = state
ns_position,ns_velocity,ns_angle,ns_angular_velocity = next_state
inp1 = np.array([position,velocity,angle,angular_velocity,action])
inp2 = np.array([ns_position,ns_velocity,ns_angle,ns_angular_velocity,reward])
self.env_model.fit([inp1,inp2],verbose=0)
Error:
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1051, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1040, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1030, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 889, in train_step
y_pred = self(x, training=True)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 228, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Exception encountered when calling layer "model_1" (type Functional).
Input 0 of layer "dense_16" is incompatible with the layer: expected min_ndim=2, found ndim=1. Full shape received: (None,)
Call arguments received by layer "model_1" (type Functional):
• inputs=('tf.Tensor(shape=(None,), dtype=float32)', 'tf.Tensor(shape=(None,), dtype=float32)')
• training=True
• mask=None
I'm stuck at this error and not very sound with tensorflow. Thanks in advance for helping.
I tried passing argument ndmin as 1 and 2 as well but it didn't work.

ValueError: Can not squeeze dim[1], expected a dimension of 1, got 128

I am trying to build a transformer neural network for a text classification problem. I am following this tutorial but tweaking the encoder to include a layer with a softmax activation function.
This is the code that I changed:
class Encoder(tf.keras.layers.Layer):
def __init__(self,
*,
num_layers,
d_model, # Input/output dimensionality.
num_attention_heads,
dff, # Inner-layer dimensionality.
input_vocab_size, # Input vocabulary size.
dropout_rate=0.1
):
super().__init__()
self.d_model = d_model
self.num_layers = num_layers
# Embeddings + Positional encoding
self.pos_embedding = PositionalEmbedding(input_vocab_size, d_model)
# Encoder layers.
self.enc_layers = [
EncoderLayer(
d_model=d_model,
num_attention_heads=num_attention_heads,
dff=dff,
dropout_rate=dropout_rate)
for _ in range(num_layers)]
# Dropout.
self.dropout = tf.keras.layers.Dropout(dropout_rate)
self.flatten_layer = tf.keras.layers.GlobalAveragePooling1D()
self.dense_layer_1 = tf.keras.layers.Dense(256, activation='relu')
self.dense_layer_2 = tf.keras.layers.Dense(32, activation='relu')
self.final_layer = tf.keras.layers.Dense(2, activation='softmax')
# Masking.
def compute_mask(self, x, previous_mask=None):
return self.pos_embedding.compute_mask(x, previous_mask)
def call(self, x, training):
seq_len = tf.shape(x)[1]
# Sum up embeddings and positional encoding.
mask = self.compute_mask(x)
x = self.pos_embedding(x) # Shape `(batch_size, input_seq_len, d_model)`.
# Add dropout.
x = self.dropout(x, training=training)
# N encoder layers.
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
# experimental thing, may not work
x = self.flatten_layer(x)
x = self.dense_layer_1(x)
x = self.dense_layer_2(x)
# x = tf.keras.layers.Dense(32, activation='relu')(x)
x = self.final_layer(x)
return x # Shape `(batch_size, input_seq_len, d_model)`.
I am using the Twitter sentiment analysis dataset to classify positive or negative sentences.
My sequence length is 128.
The shape of my features is (43012, 128), and the shape of the labels is (43012, 1).
The model definition is:
model = tf.keras.Sequential([
tf.keras.layers.InputLayer(input_shape=(128)),
Encoder(num_layers = 4, d_model = 128, dff = 512, num_attention_heads = 8, input_vocab_size=tokenizers.en.get_vocab_size())
])
Which I have compiled as:
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"]
)
But when I try to train my model
history = model.fit(
x=new_features,
y=new_labels,
epochs=200,
batch_size=64,
validation_split=0.2
)
it is giving me the following error:
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1160, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1146, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1135, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 994, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1053, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 159, in __call__
losses, sample_weight, reduction=reduction
File "/usr/local/lib/python3.7/dist-packages/keras/utils/losses_utils.py", line 350, in compute_weighted_loss
) = squeeze_or_expand_dimensions(losses, None, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/losses_utils.py", line 224, in squeeze_or_expand_dimensions
sample_weight = tf.squeeze(sample_weight, [-1])
ValueError: Can not squeeze dim[1], expected a dimension of 1, got 128 for '{{node sparse_categorical_crossentropy/weighted_loss/Squeeze}} = Squeeze[T=DT_FLOAT, squeeze_dims=[-1]](Cast)' with input shapes: [?,128].
I tested the model with a random input without training and it works fine.
input = np.random.rand(5, 128)
x = model((input))
x.shape # (5, 2) as expected
Why am I getting this error and how can I fix this?

Tensoflow: Input 0 of layer "conv1d_17" is incompatible with the layer: expected min_ndim=3, found ndim=2. Full shape received: (None, 16)

Although I could find similar questions, I have not been able to fix this issue.
I am trying to classify text, but got this error:
Input 0 of layer "conv1d_17" is incompatible with the layer: expected min_ndim=3, found ndim=2. Full shape received: (None, 16)
Blockquote
# Training a Text Classification Model using a Convolutional Layer
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
df = pd.read_csv("Financeiro_Teste_V3.csv")
df.head(3)
df.info()
df = df.dropna()
X = df.loc[:, "HISTÓRICO"]
X
y = df.loc[:, ['PROCESSO']]
y.head(7)
training_sentences, testing_sentences, training_labels, testing_labels = train_test_split(X, y, random_state=0, train_size = .75)
training_sentences = training_sentences.to_numpy()
testing_sentences = testing_sentences.to_numpy()
# testing_sentences
#for x in testing_sentences:
# print(type(x), x)
## Data preprocessing
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
vocab_size = 10000
max_length = 120
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
# Initialize the Tokenizer class
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_tok)
# Generate the word index dictionary
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
# Generate and pad the training sequences
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Generate and pad the testing sequences
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# Convert the labels lists into numpy arrays
training_labels = np.array(training_labels)
testing_labels = np.array(testing_labels)
## Build and Compile the Model
import tensorflow as tf
# Parameters
embedding_dim = 16
filters = 128
kernel_size = 1
dense_dim = 6
# Model Definition with Conv1D
model_conv = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.Conv1D(filters, kernel_size, activation='relu'),
tf.keras.layers.GlobalMaxPooling1D(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(9, activation='softmax')
])
# Set the training parameters
model_conv.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),optimizer='adam',metrics=['accuracy'])
# Print the model summary
model_conv.summary()
## Train the Model
NUM_EPOCHS = 20
# Train the model
history_conv = model_conv.fit(training_padded, training_labels, epochs=NUM_EPOCHS, validation_data=(testing_padded, testing_labels))
import matplotlib.pyplot as plt
# Plot Utility
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
# Plot the accuracy and loss history
plot_graphs(history_conv, 'accuracy')
plot_graphs(history_conv, 'loss')
model_conv.predict(testing_sentences)
WARNING:tensorflow:Model was constructed with shape (None, 120) for input KerasTensor(type_spec=TensorSpec(shape=(None, 120), dtype=tf.float32, name='embedding_3_input'), name='embedding_3_input', description="created by layer 'embedding_3_input'"), but it was called on an input with incompatible shape (None,).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-69-98046d2f0914> in <module>
----> 1 model_conv.predict(testing_sentences)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1801, in predict_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1790, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1783, in run_step **
outputs = model.predict_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1751, in predict_step
return self(x, training=False)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/usr/local/lib/python3.7/dist-packages/keras/engine/input_spec.py", line 228, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Exception encountered when calling layer "sequential_3" (type Sequential).
Input 0 of layer "conv1d_3" is incompatible with the layer: expected min_ndim=3, found ndim=2. Full shape received: (None, 16)

How to fit image data correctly to a model in python?

i am trying to trained a cnn model, but i really don't understand how to do it properly. i still learning about this kind of stuff so i'm really lost. I already tried doing stuff with it but still cannot get my head around it. can someone explain to me how to do it properly. when i try to fit the train data to the model this error pops up.
WARNING:tensorflow:Model was constructed with shape (None, 224, 224, 3) for input KerasTensor(type_spec=TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name='input_1'), name='input_1', description="created by layer 'input_1'"), but it was called on an input with incompatible shape (None,).
Traceback (most recent call last):
File "G:/Skripsi/Program/training.py", line 80, in <module>
train.train()
File "G:/Skripsi/Program/training.py", line 70, in train
model.fit(self.x_train, self.y_train, epochs=2, verbose=1)
File "G:\Skripsi\Program\venv\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "G:\Skripsi\Program\venv\lib\site-packages\tensorflow\python\framework\func_graph.py", line 1129, in autograph_handler
raise e.ag_error_metadata.to_exception(e)
ValueError: in user code:
File "G:\Skripsi\Program\venv\lib\site-packages\keras\engine\training.py", line 878, in train_function *
return step_function(self, iterator)
File "G:\Skripsi\Program\venv\lib\site-packages\keras\engine\training.py", line 867, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "G:\Skripsi\Program\venv\lib\site-packages\keras\engine\training.py", line 860, in run_step **
outputs = model.train_step(data)
File "G:\Skripsi\Program\venv\lib\site-packages\keras\engine\training.py", line 808, in train_step
y_pred = self(x, training=True)
File "G:\Skripsi\Program\venv\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "G:\Skripsi\Program\venv\lib\site-packages\keras\engine\input_spec.py", line 227, in assert_input_compatibility
raise ValueError(f'Input {input_index} of layer "{layer_name}" '
ValueError: Exception encountered when calling layer "model" (type Functional).
Input 0 of layer "conv2d" is incompatible with the layer: expected min_ndim=4, found ndim=1. Full shape received: (None,)
Call arguments received:
• inputs=tf.Tensor(shape=(None,), dtype=int32)
• training=True
• mask=None
this is my code for training the model.
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from densenet201 import DenseNet201
import tensorflow as tf
import pandas as pd
import numpy as np
import cv2
import os
dataset_folder = "./datasets/train_datasets"
class TrainingPreprocessing:
#staticmethod
def preprocessing_train(path):
images = cv2.imread(path, 3)
images_resize = cv2.resize(src=images, dsize=(224, 224), interpolation=cv2.INTER_LINEAR)
images_normalize = cv2.normalize(images_resize, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,
dtype=cv2.CV_32F)
return images_normalize.reshape(224, 224, 3)
class Training:
#staticmethod
def load_data():
"""Loads and Preprocess dataset"""
train_labels_encode = []
train_labels = []
train_data = []
file_list = os.listdir(dataset_folder)
for folder in file_list:
file_list2 = os.listdir(str(dataset_folder) + '/' + str(folder))
for images in file_list2:
train_labels_encode.append(folder)
train_labels.append(folder)
train_data.append(np.array(TrainingPreprocessing.preprocessing_train(
str(dataset_folder) + '/' + str(folder) + '/' + str(images)
)))
labels = np.array(train_labels_decode)
data = np.array(train_data)
return labels, data
def split_data(self):
"""Split the preprocessed dataset to train and test data"""
x, y = self.load_data()
self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(x, y, test_size=0.20, random_state=0)
print('Training data shape : ', self.x_train.shape, self.y_train.shape)
print('Testing data shape : ', self.x_test.shape, self.y_test.shape)
def train(self):
"""Compile dan fit DenseNet model"""
input_shape = 224, 224, 3
number_classes = 2
model = DenseNet201.densenet(input_shape, number_classes)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='Adam', metrics=["accuracy"])
model.fit(self.x_train, self.y_train, epochs=2, verbose=1)
model.save_weights('densenet201_best_model.h5', overwrite=True)
loss, accuracy = model.evaluate(self.x_test, self.y_test)
print("[INFO] accuracy: {:.2f}%".format(accuracy * 100))
train = Training()
train.split_data()
train.train()
and this is the code for the cnn network
from tensorflow.keras.layers import AveragePooling2D, GlobalAveragePooling2D, MaxPool2D
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Dense
from tensorflow.keras.layers import ReLU, concatenate, Dropout
from tensorflow.keras.models import Model
import tensorflow.keras.layers as layers
import tensorflow.keras.backend as K
import tensorflow as tf
class DenseNet201:
def densenet(image_shape, number_classes, growth_rate=32):
def batch_relu_conv(x, growth_rate, kernel=1, strides=1):
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(growth_rate, kernel, strides=strides, padding='same', kernel_initializer="he_uniform")(x)
return x
def dense_block(x, repetition):
for _ in range(repetition):
y = batch_relu_conv(x, 4 * growth_rate)
y = batch_relu_conv(y, growth_rate, 3)
x = concatenate([y, x])
return x
def transition_layer(x):
x = batch_relu_conv(x, K.int_shape(x)[-1] // 2)
x = AveragePooling2D(2, strides=2, padding='same')(x)
return x
inputs = Input(image_shape)
x = Conv2D(64, 7, strides=2, padding='same', kernel_initializer="he_uniform")(inputs)
x = MaxPool2D(3, strides=2, padding='same')(x)
for repetition in [6, 12, 48, 32]:
d = dense_block(x, repetition)
x = transition_layer(d)
x = GlobalAveragePooling2D ()(d)
output = Dense(number_classes, activation='softmax')(x)
model = Model(inputs, output)
return model
It seems you inverted data and labels (x and y) in the function:
def load_data(): which returns: return labels, data
I think you are calling model.fit(self.x_train, self.y_train, epochs=2, verbose=1)
with label and then data. Hence the model complaining about not getting the expected data shape.

CNN prediction using tensorflow

I'm quite new to python and tensorflow, but already managed to build, train and validate a CNN with my own database of images saved as tf.records.
Now I want the model to read in a single picture and predict in real-time. Therefore, I wanted to modify my validation script by getting rid of the parser (which decoded my images saved as tf.records) and didn't batch the input images, since I only want to predict one. Somehow I always get the following Error:
TypeError: Value passed to parameter 'input' has DataType uint8 not in list of allowed values: float16, bfloat16, float32, float64
I took a closer look at the script I used to create the tf.records and compared them to the parser I used in the scripts for training and validation, but wasn't able to find the mistake.
I would be thankful, if you could help me to find the mistake or show me an easier way to predict the classes with an already trained CNN.
import tensorflow as tf
import cv2
num_classes = 2
crop_top = 5
crop_bottom = 10
crop_sides = 5
img_size_height = 80
img_size_width = 100
model_dir = "./2cv_128fc"
def load_image():
img = cv2.imread('./dir_pred_img/img_2.jpg')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img, (img_size_width + (2 * crop_sides), img_size_height + crop_top + crop_bottom),
interpolation=cv2.INTER_CUBIC)
img = img[crop_top:(img_size_height + crop_top), crop_sides:(img_size_width + crop_sides)]
features = {'image': img}
return features
def conv_nn(input_layer):
conv_1 = tf.layers.conv2d(inputs=input_layer, name='conv_layer_1', filters=32, kernel_size=3, padding='same',
activation=tf.nn.relu)
pool_1 = tf.layers.max_pooling2d(inputs=conv_1, pool_size=2, strides=2)
conv_2 = tf.layers.conv2d(inputs=pool_1, name='conv_layer_2', filters=32, kernel_size=3, padding='same',
activation=tf.nn.relu)
pool_2 = tf.layers.max_pooling2d(inputs=conv_2, pool_size=2, strides=2)
flatten = tf.contrib.layers.flatten(pool_2)
fc_layer = tf.layers.dense(inputs=flatten, name='fully_connected_layer', units=128, activation=tf.nn.relu)
fc_layer = tf.layers.dropout(fc_layer, rate=0.5, noise_shape=None, seed=None)
output_layer = tf.layers.dense(inputs=fc_layer, name='output_layer', units=num_classes)
return output_layer
def model_fn(features):
input_layer = features["image"]
input_layer = tf.identity(input_layer, name="input_tensor")
input_layer = tf.reshape(input_layer, [-1, img_size_height, img_size_width, 1]) # 1.tensor 2.shape
input_layer = tf.identity(input_layer, name="input_tensor_reshaped")
logits = conv_nn(input_layer)
pred = tf.nn.softmax(logits=logits)
return pred
model = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir)
prediction = list(model.predict(input_fn=load_image))
print(prediction[0])
full error message:
WARNING:tensorflow:Input graph does not use tf.data.Dataset or contain a QueueRunner. That means predict yields forever. This is probably a mistake.
Traceback (most recent call last):
File "C:/Users/Dell/PycharmProjects/create_data/pred_img.py", line 54, in <module>
prediction = list(model.predict(input_fn=load_image))
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 577, in predict
features, None, model_fn_lib.ModeKeys.PREDICT, self.config)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 1195, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "C:/Users/Dell/PycharmProjects/create_data/pred_img.py", line 47, in model_fn
logits = conv_nn(input_layer)
File "C:/Users/Dell/PycharmProjects/create_data/pred_img.py", line 27, in conv_nn
activation=tf.nn.relu)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\layers\convolutional.py", line 417, in conv2d
return layer.apply(inputs)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\base_layer.py", line 817, in apply
return self.__call__(inputs, *args, **kwargs)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\layers\base.py", line 374, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\engine\base_layer.py", line 757, in __call__
outputs = self.call(inputs, *args, **kwargs)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\keras\layers\convolutional.py", line 194, in call
outputs = self._convolution_op(inputs, self.kernel)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 868, in __call__
return self.conv_op(inp, filter)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 520, in __call__
return self.call(inp, filter)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\nn_ops.py", line 204, in __call__
name=self.name)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\gen_nn_ops.py", line 1043, in conv2d
data_format=data_format, dilations=dilations, name=name)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 609, in _apply_op_helper
param_name=input_name)
File "C:\Users\Dell\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 60, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: Value passed to parameter 'input' has DataType uint8 not in list of allowed values: float16, bfloat16, float32, float64
The following code descirbes how you can implement this with keras.
import keras
from keras.layers import Input, Convolution2D, MaxPooling2D, Cropping2D, Dense, Flatten, Dropout
from keras.preprocessing import image
from keras.models import Model
from keras.optimizers import Adam
import cv2
import numpy as np
# parameters
num_classes = 2
crop_top = 5
crop_bottom = 10
crop_sides = 5
img_size_height = 80
img_size_width = 100
channels = 3
input_shape = (img_size_height, img_size_width, channels)
activation = 'relu'
learning_rate = 0.0001
if num_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'categorical_crossentropy'
test_image = image.load_img('./data/img.png')
test_image = image.img_to_array(test_image)
input_shape = test_image.shape
def model(input_shape=input_shape):
inputs = Input(shape=input_shape)
# cropping=((pixels_from_top, pixels_from_bottom), (pixels_from_left, pixels_from_right))
cropping = Cropping2D(cropping=((crop_top, crop_bottom), (crop_sides, crop_sides)))(inputs)
conv_1 = Convolution2D(32, kernel_size=(3, 3), padding='same', activation=activation)(cropping)
pool_1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv_1)
conv_2 = Convolution2D(32, kernel_size=(3, 3), padding='same', activation=activation)(pool_1)
pool_2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv_2)
flatten = Flatten()(pool_2)
dense_1 = Dense(128, activation=activation, name='fully_connected_layer')(flatten)
dropout = Dropout(0.5)(dense_1)
outputs = Dense(num_classes, activation='softmax')(dropout)
model = Model(inputs=inputs, outputs=outputs)
adam = Adam(lr=learning_rate)
model.compile(optimizer=adam, loss=loss, metrics=['acc', 'mse', 'mae'])
model.summary()
return model
test_image = np.expand_dims(test_image, axis=0)
model = model(input_shape)
# note that without training we will only get random results
prediction = model.predict_on_batch(test_image)
print(prediction)
I found a solution.
I added following line to the end of def load_image() and returned dataset instead of feautures.
dataset = tf.data.Dataset.from_tensors(features)
return dataset

Categories