I will show you the predefined code below.
from __future__ import division
import os, keras
os.environ["KERAS_BACKEND"] = "theano"
os.environ["THEANO_FLAGS"] = "device=gpu%d"%(1)
import numpy as np
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation
from keras.optimizers import adam
from scipy.io import loadmat, savemat
import os.path
from keras import backend as K
# Model training function
def train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,n_BS,n_beams):
in_shp = list(In_train.shape[1:])
AP_models = []
for idx in range(0, n_BS*n_beams-2, n_beams):
idx_str = str(idx / n_beams + 1)
model = models.Sequential()
model.add(Dense(nodes_per_layer, activation='relu', init='he_normal',
name="dense" + idx_str + "1", input_shape=in_shp))
model.add(Dropout(dr))
for h in range(num_hidden_layers):
model.add(Dense(nodes_per_layer, activation='relu',
init='he_normal', name="dense" + idx_str + "h" + str(h)))
model.add(Dropout(dr))
model.add(Dense(n_beams, activation='relu', init='he_normal',
name="dense" + idx_str + "o"))
model.compile(loss=loss_fn, optimizer='adam')
model.summary()
# perform training ...
earlyStoppingCallback = \
keras.callbacks.EarlyStopping(monitor='val_loss',
patience=5,
verbose=0,
mode='auto')
filepath = 'DLCB_code_output/Results_mmWave_ML'+str(idx)
history = model.fit(In_train,
Out_train[:, idx:idx + n_beams],
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=2,
validation_data=(In_test, Out_test[:,idx:idx + n_beams]),
callbacks = [
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
])
# we re-load the best weights once training is finished
model.load_weights(filepath)
AP_models.append(model)
return AP_models
# Reading input and output sets generated from MATLAB
In_set_file=loadmat('DLCB_dataset/DLCB_input.mat')
Out_set_file=loadmat('DLCB_dataset/DLCB_output.mat')
In_set=In_set_file['DL_input']
Out_set=Out_set_file['DL_output']
# Parameter initialization
num_user_tot=In_set.shape[0]
n_DL_size=[.001,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.7,.75,.8]
count=0
num_tot_TX=4
num_beams=128
for DL_size_ratio in n_DL_size:
print (DL_size_ratio)
count=count+1
DL_size=int(num_user_tot*DL_size_ratio)
np.random.seed(2016)
n_examples = DL_size
num_train = int(DL_size * 0.8)
num_test = int(num_user_tot*.2)
train_index = np.random.choice(range(0,num_user_tot), size=num_train, replace=False)
rem_index = set(range(0,num_user_tot))-set(train_index)
test_index= list(set(np.random.choice(list(rem_index), size=num_test, replace=False)))
In_train = In_set[train_index]
In_test = In_set[test_index]
Out_train = Out_set[train_index]
Out_test = Out_set[test_index]
# Learning model parameters
nb_epoch = 10
batch_size = 100
dr = 0.05 # dropout rate
num_hidden_layers=4
nodes_per_layer=In_train.shape[1]
loss_fn='mean_squared_error'
# Model training
AP_models = train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,num_tot_TX,num_beams)
# Model running/testing
DL_Result={}
for id in range(0,num_tot_TX,1):
beams_predicted=AP_models[id].predict( In_test, batch_size=10, verbose=0)
DL_Result['TX'+str(id+1)+'Pred_Beams']=beams_predicted
DL_Result['TX'+str(id+1)+'Opt_Beams']=Out_test[:,id*num_beams:(id+1)*num_beams]
DL_Result['user_index']=test_index
savemat('DLCB_code_output/DL_Result'+str(count),DL_Result)
UnboundLocalError: local variable 'batch_index' referenced before assignment
As you can see above, there is an error message about 'batch_index'.
I searched through the internet, and I think there is a problem in the local/global variables.
Adversely, I don't use the variable 'batch_index' in the code.
I don't know how I should deal with this problem.
Related
I try to train model but got issue related to to covert between numpy and dataloader
Please help to check these code
First, prepare data and covert them by using dataGenerator function
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras import Input
from keras.layers import Flatten, Conv2D, MaxPool2D, Conv1D, GlobalMaxPooling1D, Dropout, Dense, BatchNormalization
from phobert_embeding import text2ids
from dataloader import DataGenerator
classes = ['__label__sống_trẻ', '__label__thời_sự', '__label__công_nghệ', '__label__sức_khỏe', '__label__giáo_dục', '__label__xe_360', '__label__thời_trang', '__label__du_lịch', '__label__âm_nhạc', '__label__xuất_bản', '__label__nhịp_sống', '__label__kinh_doanh', '__label__pháp_luật', '__label__ẩm_thực', '__label__thế_giới', '__label__thể_thao', '__label__giải_trí', '__label__phim_ảnh']
print("classes:", classes)
def load_text(filename):
labels, ids = [],[]
with open(filename, 'r', encoding="utf-8") as f:
c = 0
for line in f:
# print(c)
line = line.strip().lower().split(" ",1)
label = line[0]
text = line[1]
# print(label)
tkz = text2ids(text)
ids.append(tkz)
labels.append(classes.index(label))
c += 1
ids = tf.keras.preprocessing.sequence.pad_sequences(ids, maxlen=256, dtype="int32", value=0, truncating="post", padding="post")
ids[:,-1] = 2
# print(ids[0])
return ids, labels
ids_train, y_train = load_text("train.txt")
ids_test, y_test = load_text("test.txt")
print(len(ids_train))
print(len(y_train))
print(len(ids_test))
print(len(y_test))
# exit()
# with open("ids_train", 'wb') as f:
# pickle.dump(ids_train, f)
# with open("y_train", 'wb') as f:
# pickle.dump(y_train, f)
# with open("ids_test", 'wb') as f:
# pickle.dump(ids_test, f)
# with open("y_test", 'wb') as f:
# pickle.dump(y_test, f)
# exit()
# print("LOAD DATA DONE")
# with open("ids_train", 'rb') as f:
# ids_train = pickle.load(f)
# with open("y_train", 'rb') as f:
# y_train = pickle.load(f)
# with open("ids_test", 'rb') as f:
# ids_test = pickle.load(f)
# with open("y_test", 'rb') as f:
# y_test = pickle.load(f)
# print(ids_train)
# print("LOAD DATA FROM FILE DONE")
# exit()
train_generator = DataGenerator(ids_train, y_train, batch_size=8, n_classes=len(classes))
valid_generator = DataGenerator(ids_test, y_test, batch_size=8, n_classes=len(classes), shuffle=False)
print("train_generator:",len(train_generator))
print("valid_generator:",len(valid_generator))
model = Sequential()
model.add(Conv1D(128, kernel_size=5, input_shape=(256,768)))
# model.add(GlobalMaxPooling1D())
model.add(Conv1D(64, kernel_size=3, activation='relu'))
model.add(GlobalMaxPooling1D())
# model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(len(classes), activation='softmax'))
# model = Sequential()
# model.add(Dense(64, activation='relu'))
# model.add(BatchNormalization())
# model.add(Dropout(0.2))
# model.add(Dense(len(classes), activation='softmax'))
# model=multi_gpu_model(model, gpus=2)
# define the model
# model = tf.keras.Sequential([
# tf.keras.Input(shape=(256,768)),
# tf.compat.v1.keras.layers.CuDNNLSTM(64, return_sequences=False),
# tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(64, activation='relu'),
# tf.keras.layers.BatchNormalization(),
# tf.keras.layers.Dropout(0.2),
# tf.keras.layers.Dense(len(classes))
# ])
After configure model, go on compile and train this model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
print(model.summary())
# # fit the model
model.fit_generator(train_generator, epochs=10, verbose=1, validation_data=valid_generator)
Here is DataGenerator function, I called when covert from array of data to dataSet
import numpy as np
import keras
from phobert_embeding import get_emb_vector
from tensorflow.python.keras.utils.data_utils import Sequence
from keras.utils.np_utils import to_categorical
class DataGenerator(Sequence):
'Generates data for Keras'
def __init__(self, ids, labels, batch_size=16, max_seq_len=256, feature_len=768, n_classes=18, shuffle=True):
self.max_seq_len = max_seq_len
self.feature_len = feature_len
self.batch_size = batch_size
self.labels = labels
self.ids = ids
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.ids) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
X, y = self.__data_generation(indexes)
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, idx_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.empty((self.batch_size, self.max_seq_len, self.feature_len))
y = np.empty((self.batch_size), dtype=int)
# Generate data
for i, idx in enumerate(idx_temp):
X[i,] = get_emb_vector(self.ids[idx])
# Store class
y[i] = self.labels[idx]
# X = X[:,:,:,np.newaxis]
# y = to_categorical(y, num_classes=self.n_classes)
# print(y.shape)
# exit()
return X, y
The problem maybe in dataGenerator function or before I called model.fit_generator, I also try many solutions in GG but result is still be failed.
For running model, please reference code in here:
https://colab.research.google.com/drive/1utctbcsUqAN1ySfhFizzu3Cw6g9VcWnq?usp=sharing
I also try Keras hub to develop this case, the issue still happen. I think the problem come from processing dataset.
I still have no idea about this case. Please help me.
I trained a keras model and saved it as PB as in the below script.
import sys
import os
import tensorflow.compat.v1 as tf
from keras import backend as K
from tensorflow.python.keras.backend import set_session
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dropout, Flatten, Dense, Activation
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras import callbacks
import time
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
#param session The TensorFlow session to be frozen.
#param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
#param output_names Names of the relevant graph outputs.
#param clear_devices Remove the device directives from the graph for better portability.
#return The frozen graph definition.
"""
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ''
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
start = time.time()
tf.compat.v1.disable_eager_execution()
DEV = False
argvs = sys.argv
argc = len(argvs)
if argc > 1 and (argvs[1] == "--development" or argvs[1] == "-d"):
DEV = True
epochs = 2
train_data_path = 'data/train'
validation_data_path = 'data/test'
"""
Parameters
"""
img_width, img_height = 150, 150
batch_size = 32
samples_per_epoch = 24
validation_steps = 24
nb_filters1 = 32
nb_filters2 = 64
conv1_size = 3
conv2_size = 2
pool_size = 2
classes_num = 2
lr = 0.0004
model = Sequential()
model.add(Convolution2D(nb_filters1, conv1_size, conv1_size, padding ="same", input_shape=(img_width, img_height, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(nb_filters2, conv2_size, conv2_size, padding ="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(classes_num, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(learning_rate=lr),
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='categorical')
"""
Tensorboard log
"""
log_dir = './tf-log/'
tb_cb = callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0)
cbks = [tb_cb]
model.fit(
train_generator,
epochs=epochs,
validation_data=validation_generator,
callbacks=cbks)
target_dir = './models/'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
model.save('./mymodels/model')
pred_node_names = [None]
pred = [None]
for i in range(1):
pred_node_names[i] = "output_node"+str(i)
pred[i] = tf.identity(model.outputs[i], name=pred_node_names[i])
frozen_graph = freeze_session(tf.keras.backend.get_session(), output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, './', 'xor.pbtxt', as_text=True)
tf.train.write_graph(frozen_graph, './', 'xor.pb', as_text=False)
#Calculate execution time
end = time.time()
dur = end-start
if dur<60:
print("Execution Time:",dur,"seconds")
elif dur>60 and dur<3600:
dur=dur/60
print("Execution Time:",dur,"minutes")
else:
dur=dur/(60*60)
print("Execution Time:",dur,"hours")
Post that I want to load the model in opencv to perform the predictions but readNetFromTensorflow() fails with the following error
[libprotobuf ERROR D:\a\opencv-python\opencv-python\opencv\3rdparty\protobuf\src\google\protobuf\text_format.cc:292] Error parsing text-format opencv_tensorflow.GraphDef: 700:9: Unknown enumeration value of "DT_VARIANT" for field "type".
Traceback (most recent call last):
File "C:\Users\admin\Downloads\Image-Classification-by-Keras-and-Tensorflow-master\Using Tensorflow\classify_opencv.py", line 4, in <module>
tensorflowNet = cv2.dnn.readNetFromTensorflow('xor.pb','xor.pbtxt')
cv2.error: OpenCV(4.5.4-dev) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\tensorflow\tf_io.cpp:54: error: (-2:Unspecified error) FAILED: ReadProtoFromTextFile(param_file, param). Failed to parse GraphDef file: xor.pbtxt in function 'cv::dnn::ReadTFNetParamsFromTextFileOrDie
I went through the .pbtxt file, it indeed contains DT_VARIANT type which I guess is not defined in 3rd party protobuf.
How do I solve this error?
I am training my model to address image classification problem, i have 1000 images classified into 4 classes. When training the model i am getting "Type Error", I have reviewed my code several times and don't know where i have committed an error in the code, if possible could some one please suggest me reason for the error, i am posting the code and error generated below for your reference
"""
from tensorflow.keras.layers import Input, Concatenate, Dropout, Flatten, Dense, GlobalAveragePooling2D, Conv2D
from tensorflow.keras import backend as K
#from tensorflow.keras.utils import np_utils
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
from tensorflow.keras.metrics import top_k_categorical_accuracy
from tensorflow.keras.models import Sequential, Model, load_model
import tensorflow as tf
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, TensorBoard, EarlyStopping, CSVLogger, ReduceLROnPlateau
#from tensorflow.compat.keras.backend import KTF
#import keras.backend.tensorflow_backend as KTF
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.inception_v3 import InceptionV3
import os
import matplotlib.pylab as plt
import numpy as np
import pandas as pd
#import numpy as np, Pillow, skimage, imageio, matplotlib
#from scipy.misc import imresize
from skimage.transform import resize
from tqdm import tqdm
# Path to class files
classes_file = "/home/DEV/Downloads/IMAGE_1000_CLASSES"
data_classes = pd.read_csv(classes_file, header=None)
# Instances with targets
targets = data_classes[1].tolist()
# Split data according to their classes
class_0 = data_classes[data_classes[1] == 0]
class_1 = data_classes[data_classes[1] == 1]
class_2 = data_classes[data_classes[1] == 2]
class_3 = data_classes[data_classes[1] == 3]
# Holdout split train/test set (Other options are k-folds or leave-one-out)
split_proportion = 0.9
split_size_0 = int(len(class_0)*split_proportion)
split_size_1 = int(len(class_1)*split_proportion)
split_size_2 = int(len(class_2)*split_proportion)
split_size_3 = int(len(class_3)*split_proportion)
new_class_0_train = np.random.choice(len(class_0), split_size_0, replace=False)
new_class_0_train = class_0.iloc[new_class_0_train]
new_class_0_test = ~class_0.iloc[:][0].isin(new_class_0_train.iloc[:][0])
new_class_0_test = class_0[new_class_0_test]
new_class_1_train = np.random.choice(len(class_1), split_size_1, replace=False)
new_class_1_train = class_1.iloc[new_class_1_train]
new_class_1_test = ~class_1.iloc[:][0].isin(new_class_1_train.iloc[:][0])
new_class_1_test = class_1[new_class_1_test]
new_class_2_train = np.random.choice(len(class_2), split_size_2, replace=False)
new_class_2_train = class_2.iloc[new_class_2_train]
new_class_2_test = ~class_2.iloc[:][0].isin(new_class_2_train.iloc[:][0])
new_class_2_test = class_2[new_class_2_test]
new_class_3_train = np.random.choice(len(class_3), split_size_3, replace=False)
new_class_3_train = class_3.iloc[new_class_3_train]
new_class_3_test = ~class_3.iloc[:][0].isin(new_class_3_train.iloc[:][0])
new_class_3_test = class_3[new_class_3_test]
x_train_list = pd.concat(
[new_class_0_train, new_class_1_train, new_class_2_train, new_class_3_train])
x_test_list = pd.concat(
[new_class_0_test, new_class_1_test, new_class_2_test, new_class_3_test])
# Load files
imagePath = "/home/DEV/Downloads/IMAGE_SET_1000/"
x_train = []
y_train = []
for index, row in tqdm(x_train_list.iterrows(), total=x_train_list.shape[0]):
try:
loadedImage = plt.imread(imagePath + str(row[0]) + ".jpg")
x_train.append(loadedImage)
y_train.append(row[1])
except:
# Try with .png file format if images are not properly loaded
try:
loadedImage = plt.imread(imagePath + str(row[0]) + ".png")
x_train.append(loadedImage)
y_train.append(row[1])
except:
# Print file names whenever it is impossible to load image files
print(imagePath + str(row[0]))
x_test = []
y_test = []
for index, row in tqdm(x_test_list.iterrows(), total=x_test_list.shape[0]):
try:
loadedImage = plt.imread(imagePath + str(row[0]) + ".jpg")
x_test.append(loadedImage)
y_test.append(row[1])
except:
# Try with .png file format if images are not properly loaded
try:
loadedImage = plt.imread(imagePath + str(row[0]) + ".png")
x_test.append(loadedImage)
y_test.append(row[1])
except:
# Print file names whenever it is impossible to load image files
print(imagePath + str(row[0]))
img_width, img_height = 139, 139
index = 0
for image in tqdm(x_train):
#aux = imresize(image, (img_width, img_height, 3), "bilinear")
aux = resize(image, (img_width, img_height))
x_train[index] = aux / 255.0 # Normalization
index += 1
index = 0
for image in tqdm(x_test):
#aux = imresize(image, (img_width, img_height, 3), "bilinear")
aux = resize(image, (img_width, img_height))
x_test[index] = aux / 255.0 # Normalization
index += 1
os.environ["KERAS_BACKEND"] = "tensorflow"
RANDOM_STATE = 42
def get_session(gpu_fraction=0.8):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
#KTF.set_session(get_session())
#k.tensorflow.set_session(get_session())
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# Set F-score as 0 if there are no true positives (sklearn-like).
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
nb_classes = 4
final_model = []
# Option = InceptionV3
model = InceptionV3(weights="imagenet", include_top=False,
input_shape=(img_width, img_height, 3), classifier_activation="softmax")
model.summary()
# Creating new outputs for the model
x = model.output
x = Flatten()(x)
#x = GlobalAveragePooling2D()(x)
x = Dense(512, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(512, activation="relu")(x)
x = Dropout(0.5)(x)
predictions = Dense(nb_classes, activation='softmax')(x)
final_model = Model(inputs=model.input, outputs=predictions)
# Metrics
learningRate = 0.001
#optimizer = model.compile(optimizer= 'adam' , loss= tensorflow.keras.losses.binary_crossentropy, metrics=['accuracy'])
optimizer = tf.keras.optimizers.SGD(learning_rate=learningRate, momentum=0.88, nesterov=True)
#optimizer = tf.keras.Adam(learning_rate=learningRate, momentum=0.88, nesterov=True)
# Compiling the model...
final_model.compile(loss="categorical_crossentropy", optimizer=optimizer,
metrics=["accuracy", fbeta_score])
#x_train = np.array(x_train)
x_train = np.asarray(x_train).astype(np.float32)
#x_test = np.array(x_test)
x_test = np.asarray(x_test).astype(np.float32)
# Defining targets...
y_train = np.concatenate([np.full((new_class_0_train.shape[0]), 0), np.full((new_class_1_train.shape[0]), 1),
np.full((new_class_2_train.shape[0]), 2), np.full((new_class_3_train.shape[0]), 3)])
y_test = np.concatenate([np.full((new_class_0_test.shape[0]), 0), np.full((new_class_1_test.shape[0]), 1),
np.full((new_class_2_test.shape[0]), 2), np.full((new_class_3_test.shape[0]), 3)])
#y_train = np_utils.to_categorical(y_train)
y_train = tf.keras.utils.to_categorical(y_train)
#y_test = np_utils.to_categorical(y_test)
y_test = tf.keras.utils.to_categorical(y_test)
modelFilename = "./model_inception.h5"
trainingFilename = "./training.csv"
nb_train_samples = y_train.shape[0]
nb_test_samples = y_test.shape[0]
#epochs = 10000
epochs = 1000
batch_size = 24
trainingPatience = 200
decayPatience = trainingPatience / 4
# Setting the data generator...
train_datagen = ImageDataGenerator(
horizontal_flip=True,
fill_mode="reflect",
zoom_range=0.2
)
train_generator = train_datagen.flow(x_train, y_train, batch_size=batch_size)
# Saving the model
checkpoint = ModelCheckpoint(modelFilename,
monitor='val_acc',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
save_freq=1)
adaptativeLearningRate = ReduceLROnPlateau(monitor='val_acc',
factor=0.5,
patience=decayPatience,
verbose=1,
mode='auto',
min_delta=0.0001,
cooldown=0,
min_lr=1e-8)
early = EarlyStopping(monitor='val_acc',
min_delta=0,
patience=trainingPatience,
verbose=1,
mode='auto')
csv_logger = CSVLogger(trainingFilename, separator=",", append=False)
# Callbacks
callbacks = [checkpoint, early, csv_logger, adaptativeLearningRate]
# Training of the model
final_model.fit(train_generator,
steps_per_epoch=nb_train_samples / batch_size,
epochs=epochs,
shuffle=True,
validation_data=(x_test, y_test),
validation_steps=nb_test_samples / batch_size,
callbacks=callbacks)
Error :
File "/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 986, in wrapper
raise e.ag_error_metadata.to_exception(e)
TypeError: in user code:
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py:855 train_function *
return step_function(self, iterator)
/home/DEV/Downloads/codes/Classification_model.py:212 fbeta_score *
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/autograph/operators/control_flow.py:1170 if_stmt
_tf_if_stmt(cond, body, orelse, get_state, set_state, symbol_names, nouts)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/autograph/operators/control_flow.py:1216 _tf_if_stmt
final_cond_vars = control_flow_ops.cond(
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206 wrapper
return target(*args, **kwargs)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py:535 new_func
return func(*args, **kwargs)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/control_flow_ops.py:1254 cond
return cond_v2.cond_v2(pred, true_fn, false_fn, name)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/ops/cond_v2.py:90 cond_v2
false_graph = func_graph_module.func_graph_from_py_func(
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py:999 func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/autograph/operators/control_flow.py:1213 aug_orelse
_verify_tf_cond_vars(new_body_vars_[0], new_orelse_vars, symbol_names)
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/autograph/operators/control_flow.py:365 _verify_tf_cond_vars
nest.map_structure(
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/nest.py:867 map_structure
structure[0], [func(*x) for x in entries],
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/util/nest.py:867 <listcomp>
structure[0], [func(*x) for x in entries],
/home/DEV/anaconda3/lib/python3.8/site-packages/tensorflow/python/autograph/operators/control_flow.py:335 verify_single_cond_var
raise TypeError(
TypeError: 'retval_' has dtype int32 in the main branch, but dtype float32 in the else branch
As the error indicates, you have returned int32 (return 0) in the main branch but float32 (return fbeta_score) in else, and this happened in the fbeta_score function.
So, change return 0 => return 0.0.
I am training Resnet-50 to classify 9 classes. I am using following code, transfer learning, to train the model.
Train and test loss and accuracy seem to be fine but when I am testing network against new images I see lots of mistakes.
I feel like that the model is not learning well, I was wondering if you please let me know what is wrong in my approach? How do I solve this problem?
NUM_CLASSES = 9
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
LOSS_METRICS = ['accuracy']
NUM_EPOCHS = 100
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
resnet_weights_path = '/path/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = resnet_weights_path))
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION))
model.layers[0].trainable = False
from tensorflow.python.keras import optimizers
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
model.summary()
from tensorflow.python.keras import optimizers
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'/path train folder/train',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'/path test folder/test',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_VALIDATION,
class_mode='categorical')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '/path/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
model.load_weights("/path/best.hdf5")
model.save('transfer_resnet.h5')
print(fit_history.history.keys())
This can have many reasons.
For one, it is possible that your data set is too small or not varied enough.
What you can try is to add a few more Dense Layers in the top section.
I am trying to run the CNN code which is "Deep Learning Coordinated Beamforming".
As you can see below, there is a "UnboundLocalError: local variable 'batch_index' referenced before assignment". I don't use the variable 'batch_index' in the code. How can I deal with this error?
I use the Windows OS, Python 3.6.8, Keras 2.3.1 and Tensorflow 2.0.0.
I search the net and I don't realize how I should deal with it.
from __future__ import division
import os, keras
os.environ["KERAS_BACKEND"] = "theano"
os.environ["THEANO_FLAGS"] = "device=gpu%d"%(1)
import numpy as np
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers.core import Reshape,Dense,Dropout,Activation
from keras.optimizers import adam
from scipy.io import loadmat, savemat
import os.path
from keras import backend as K
# Model training function
def train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,n_BS,n_beams):
in_shp = list(In_train.shape[1:])
AP_models = []
for idx in range(0, n_BS*n_beams-2, n_beams):
idx_str = str(idx / n_beams + 1)
model = models.Sequential()
model.add(Dense(nodes_per_layer, activation='relu', init='he_normal',
name="dense" + idx_str + "1", input_shape=in_shp))
model.add(Dropout(dr))
for h in range(num_hidden_layers):
model.add(Dense(nodes_per_layer, activation='relu',
init='he_normal', name="dense" + idx_str + "h" + str(h)))
model.add(Dropout(dr))
model.add(Dense(n_beams, activation='relu', init='he_normal',
name="dense" + idx_str + "o"))
model.compile(loss=loss_fn, optimizer='adam')
model.summary()
# perform training ...
earlyStoppingCallback = \
keras.callbacks.EarlyStopping(monitor='val_loss',
patience=5,
verbose=0,
mode='auto')
filepath = 'DLCB_code_output/Results_mmWave_ML'+str(idx)
history = model.fit(In_train,
Out_train[:, idx:idx + n_beams],
batch_size=batch_size,
nb_epoch=nb_epoch,
verbose=2,
validation_data=(In_test, Out_test[:,idx:idx + n_beams]),
callbacks = [
keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
])
# we re-load the best weights once training is finished
model.load_weights(filepath)
AP_models.append(model)
return AP_models
# Reading input and output sets generated from MATLAB
In_set_file=loadmat('DLCB_dataset/DLCB_input.mat')
Out_set_file=loadmat('DLCB_dataset/DLCB_output.mat')
In_set=In_set_file['DL_input']
Out_set=Out_set_file['DL_output']
# Parameter initialization
num_user_tot=In_set.shape[0]
n_DL_size=[.001,.05,.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.7,.75,.8]
count=0
num_tot_TX=4
num_beams=128
for DL_size_ratio in n_DL_size:
print (DL_size_ratio)
count=count+1
DL_size=int(num_user_tot*DL_size_ratio)
np.random.seed(2016)
n_examples = DL_size
num_train = int(DL_size * 0.8)
num_test = int(num_user_tot*.2)
train_index = np.random.choice(range(0,num_user_tot), size=num_train, replace=False)
rem_index = set(range(0,num_user_tot))-set(train_index)
test_index= list(set(np.random.choice(list(rem_index), size=num_test, replace=False)))
In_train = In_set[train_index]
In_test = In_set[test_index]
Out_train = Out_set[train_index]
Out_test = Out_set[test_index]
# Learning model parameters
nb_epoch = 10
batch_size = 100
dr = 0.05 # dropout rate
num_hidden_layers=4
nodes_per_layer=In_train.shape[1]
loss_fn='mean_squared_error'
# Model training
AP_models = train(In_train, Out_train, In_test, Out_test,
nb_epoch, batch_size,dr,
num_hidden_layers, nodes_per_layer,
loss_fn,num_tot_TX,num_beams)
# Model running/testing
DL_Result={}
for id in range(0,num_tot_TX,1):
beams_predicted=AP_models[id].predict( In_test, batch_size=10, verbose=0)
DL_Result['TX'+str(id+1)+'Pred_Beams']=beams_predicted
DL_Result['TX'+str(id+1)+'Opt_Beams']=Out_test[:,id*num_beams:(id+1)*num_beams]
DL_Result['user_index']=test_index
savemat('DLCB_code_output/DL_Result'+str(count),DL_Result)