Low accuracy on captcha recognition - python

I am generating at most 4 digits captchas with the following method:
def genData(n=30000, max_digs=4, width=150):
capgen = ImageCaptcha()
data = []
target = []
for i in range(n):
x = np.random.randint(0, 10 ** max_digs)
img = misc.imread(capgen.generate(str(x)))
img = np.mean(img, axis=2)[:, :width]
data.append(img.flatten())
target.append(x)
return np.array(data), np.array(target)
Then I am processing data like following
train_data, train_target = genData()
test_data, test_target = genData(1000)
train_data = train_data.reshape(train_data.shape[0], 1, 150, 60)
test_data = test_data.reshape(test_data.shape[0], 1, 150, 60)
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
train_data /= 255
test_data /= 255
My model structure is as follows:
def get_model():
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 150, 60), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10 ** 4, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
Then I am training the model
model = get_model()
# Fit the model
model.fit(train_data, train_target, validation_data=(test_data, test_target), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(test_data, test_target, verbose=0)
print("Large CNN Error: %.2f%%" % (100 - scores[1] * 100))
I don't know which part that I am doing wrong but my accuracy cannot reach even %1.

You have 10000(!) classes. How long do you train? How much training data do you have per class?
Your approach is almost certainly the problem. While you can solve problems "brute force" like this, it is a very bad way to do so. You should first try to detect single digits and then classify each digit with a 10-class classifier.

Related

Used same data to do validation and predict ,but got lower accuracy

I have a CNN model.
And I split one dataset into the training set and the validation set.
After I used them to train my CNN model for 50 epochs, my validation accuracy was 0.99.
But after training, I used the same validation set and the trained model to predict, I got a lower accuracy, which is only 0.49.
I don't know whether my code is wrong or not.
model_zero = models.Sequential()
model_zero.add(layers.experimental.preprocessing.Rescaling(1./255))
model_zero.add(layers.Conv2D(32, (3, 3),padding = 'same', input_shape = (64, 64, 3)))
model_zero.add(layers.Activation('relu'))
model_zero.add(layers.MaxPooling2D((2, 2)))
model_zero.add(layers.Conv2D(32, (3, 3),padding = 'same'))
model_zero.add(layers.BatchNormalization())
model_zero.add(layers.Activation('relu'))
model_zero.add(layers.MaxPooling2D((2, 2)))
model_zero.add(layers.Conv2D(64, (3, 3),padding = 'same'))
model_zero.add(layers.BatchNormalization())
model_zero.add(layers.Activation('relu'))
model_zero.add(layers.MaxPooling2D((2, 2)))
model_zero.add(layers.Conv2D(64, (3, 3),padding = 'same'))
model_zero.add(layers.BatchNormalization())
model_zero.add(layers.Activation('relu'))
model_zero.add(layers.MaxPooling2D((2, 2)))
model_zero.add(layers.Flatten())
model_zero.add(layers.Dense(128, activation='relu'))
model_zero.add(layers.Dense(num_classes, activation='softmax'))
model_zero.build(input_shape = (None, 64, 64, 3))
model_zero.compile(optimizer = 'adagrad',
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics = ['accuracy'])
training_zero_data = training_zero_data.shuffle(20)
kfold_Xtrain = training_zero_data.take(125)
kfold_Ytrain = training_zero_data.skip(125).take(63)
epochs = 30
history = model_zero.fit(kfold_Xtrain, epochs = epochs,
validation_data = kfold_Ytrain, verbose=1)
predict_zero = model_zero.predict(kfold_Ytrain)
predict_zero = np.argmax(predict_zero, axis=1)
labels_zero = np.array([])
for x, y in kfold_Ytrain:
labels_zero = np.concatenate([labels_zero, y.numpy()])
cnn_zero_kfold_cm.append(confusion_matrix(labels_zero, predict_zero))
cnn_zero_kfold_accuracy.append(accuracy_score(labels_zero, predict_zero))
cnn_zero_kfold_recall.append(recall_score(labels_zero, predict_zero, average=None))
cnn_zero_kfold_precision.append(precision_score(labels_zero, predict_zero, average=None))
if acc1 < accuracy_score(labels_zero, predict_zero):
c1 = model_zero
acc1 = accuracy_score(labels_zero, predict_zero)

High accuracy in mode.fit but low precision and recall

I've been training a CNN with keras. A binary classificator where it says if a depth image has a manhole or not. I've manually labeled the datasets with 0 (no manhole) and 1 (it has a manhole). I have 2 datasets 1 with 45k images to train the CNN and one with 26k images to test the CNN.
Both datasets are unbalanced double of negatives images than positives.
This is the code:
# dimensions of our images.
img_width, img_height = 80, 60
n_positives_img, n_negatives_img = 17874, 26308
n_total_img = 44182
#Labeled arrays for datasets
arrayceros = np.zeros(n_negatives_img)
arrayunos = np.ones(n_positives_img)
#Reshaping of datasets to convert separate them
arraynegativos= ds_negatives.reshape(( n_negatives_img, img_height, img_width,1))
arraypositivos= ds_positives.reshape((n_positives_img, img_height, img_width,1))
#Labeling datasets with the arrays
ds_negatives_target = tf.data.Dataset.from_tensor_slices((arraynegativos, arrayceros))
ds_positives_target = tf.data.Dataset.from_tensor_slices((arraypositivos, arrayunos))
#Concatenate 2 datasets and shuffle them
ds_concatenate = ds_negatives_target.concatenate(ds_positives_target)
datasetfinal = ds_concatenate.shuffle(n_total_img)
Then I have the same for the second dataset for testing.
#Adding batch dimension to datasets 4dim
valid_ds = datasetfinal2.batch(12)
train_ds = datasetfinal.batch(12)
#Defining model
model = Sequential()
model.add(Conv2D(5, kernel_size=(5, 5),activation='relu',input_shape=(60,80,1),padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D((5, 5),padding='same'))
model.add(Dropout(0.3))
model.add(Conv2D(5, (5, 5), activation='relu',padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Dropout(0.3))
model.add(Conv2D(5, (5, 5), activation='relu',padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2),padding='same'))
model.add(Dropout(0.3))
model.add(Conv2D(5, (5, 5), activation='relu',padding='same'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
#Compiling model
model.summary()
initial_learning_rate = 0.001
lr_schedule = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=100000, decay_rate=0.96, staircase=True
)
model.compile(
loss="binary_crossentropy",
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
metrics=["acc"],
)
# Define callbacks.
checkpoint_cb = keras.callbacks.ModelCheckpoint(
"2d_image_classification.h5", save_best_only=True
)
early_stopping_cb = keras.callbacks.EarlyStopping(monitor="val_acc", patience=15)
#Fitting the model
history= model.fit(train_ds, validation_data=valid_ds, batch_size=100, epochs=5,callbacks=[checkpoint_cb, early_stopping_cb])
This gives me 99% of acc in train dataset and 95% in test dataset.
But when i do this it gives me 60% precision for negatives images and 45% for positives:
#Get the real labels of valid dataset
valid_labels = list(valid_ds.flat_map(lambda x, y: tf.data.Dataset.from_tensor_slices((x, y))).as_numpy_iterator())
valid_labels = [y for x, y in valid_labels]
y_pred = model.predict(valid_ds)
y_pred = (y_pred > 0.5).astype(float)
from sklearn.metrics import classification_report
print(classification_report(valid_labels, y_pred))
Why this? I have printed both predicted labels and true labels and it look likes its random. It has no sense.
https://colab.research.google.com/drive/1bhrntDItqoeT0KLb-aKp0W8cV6LOQOtP?usp=sharing
If u need more information, just ask me.
Thanks!!!!

Audio processing Conv1D keras

I am learning Keras using audio classification, Actually, I am implementing the code with modification from https://github.com/deepsound-project/genre-recognition/blob/master/train_model.py using Keras.
The shape of the dataset is
X_train shape = (800, 32, 1)
y_train shape = (800, 10)
X_test shape = (200, 32, 1)
y_test shape = (200, 10)
The model
model = Sequential()
model.add(Conv1D(filters=256, kernel_size=5, input_shape=(32,1), activation="relu"))
model.add(BatchNormalization(momentum=0.9))
model.add(MaxPooling1D(2))
model.add(Dropout(0.5))
model.add(Conv1D(filters=256, kernel_size=5, activation="relu"))
model.add(BatchNormalization(momentum=0.9))
model.add(MaxPooling1D(2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128, activation="relu", ))
model.add(Dense(10, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer = Adam(lr=0.001),
metrics = ['accuracy'],
)
model.summary()
red_lr= ReduceLROnPlateau(monitor='val_loss',patience=2,verbose=2,factor=0.5,min_delta=0.01)
check=ModelCheckpoint(filepath=r'/content/drive/My Drive/Colab Notebooks/gen/cnn.hdf5', verbose=1, save_best_only = True)
History = model.fit(X_train,
y_train,
epochs=100,
#batch_size=512,
validation_data = (X_test, y_test),
verbose = 2,
callbacks=[check, red_lr],
shuffle=True )
The accuracy graph
Loss graph
I do not understand, Why the val_acc is in the range of 70%. I tried to modify the model architecture including optimizer, but no improvement.
And, Is it good to have a lot of difference between loss and val_loss.
how to improve the accuracy above 80... any help...
Thank you
I found it, I use concatenate function from Keras to concatenate all convolution layers and, it gives the best performance.

Different model performance in MLP and CNN

I'm experimenting with geometric shape classification. My datasets are 100x100 px thresholded black and white images of squares, circles and triangles in total 3000 and 1000 for each shape. They look like these:
But I got them as a csv file, where each row is the one dimensional representation of the image and last column is label.
I used MLP from sklearn to make a classifier. It performed well. Almost 99%.
df = pd.read_csv("img_data.csv", sep=";")
df = df.sample(frac=1) # shuffling the whole dataset
X = df.drop('label', axis=1) # Because 'label' is the column of label
y = df['label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
clf = MLPClassifier(solver='adam', activation="relu",alpha=1e- 5,hidden_layer_sizes=(1000,), random_state=1, verbose=True)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print('accuracy',accuracy_score(y_test, y_pred))
Then I wanted to try with CNN. For that I used keras with tensorflow backend. But accuracy here couldn't cross above 92% even after 20 epochs. Here's my code:
df = pd.read_csv("img_data.csv", sep=";")
df = df.sample(frac=1) # shuffling the whole dataset
X = df.drop('label', axis=1) # Because 'label' is the column of label
y = df['label']
X=X.as_matrix()
X = np.reshape(X, (-1, 100, 100, 1)) #made 1d to 2d
a = list(y)
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(range(max(a)))
y = label_binarizer.transform(a) # encoding one hot for labels
X_train, X_test, y_train, y_test = train_test_split(all_images, y, test_size=0.20)
model = Sequential()
model.add(Conv2D(32, 3, activation='relu', input_shape=[100, 100, 1]))
model.add(MaxPool2D())
model.add(BatchNormalization())
model.add(Conv2D(64, 3, activation='relu'))
model.add(MaxPool2D())
model.add(BatchNormalization())
model.add(Conv2D(128, 3, activation='relu'))
model.add(MaxPool2D())
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 20
model.fit(X_train, y_train,
validation_data=(X_test, y_test),
epochs=epochs, batch_size=64, verbose=1)
This seems to be a very simple problem. There is very little structure inside the data, so I think you could try to reduce the depth of the neural network by removing the last two convolution and max pooling layers. Instead increase the number of nodes in the fully-connected layer, like this:
model = Sequential()
model.add(Conv2D(32, 3, activation='relu', input_shape=[100, 100, 1]))
model.add(MaxPool2D())
model.add(BatchNormalization())
model.add(Conv2D(64, 3, activation='relu'))
model.add(MaxPool2D())
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(3, activation='softmax'))
You could also try to use some image augmentation techniques like shifting and rotating to increase your dataset. Then I expect the convnet to outperform the standard mlp.
Best

Convolutional neural net testing accuracy stays constant after each epoch

I see improving training accuracies after each iteration, but testing accuracy stays fixed at exactly 0.7545 after each epoch. I understand hitting a ceiling on accuracy at some point but don't understand why I don't at least see slight variations in accuracies (up or down). I'm training on about 800 images total.
Things I've tried:
- Switch to SGD optimizer.
- Start with learning rate of 0.01 and reduce until 0.00000001.
- Remove regularization layers.
#PARAMS
dropout_prob = 0.2
activation_function = 'relu'
loss_function = 'categorical_crossentropy'
verbose_level = 1
convolutional_batches = 32
convolutional_epochs = 10
inp_shape = X_train.shape[1:]
num_classes = 3
opt = SGD(lr=0.00001)
opt2 = 'adam'
def train_convolutional_neural():
y_train_cat = np_utils.to_categorical(y_train, 3)
y_test_cat = np_utils.to_categorical(y_test, 3)
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(3, 3), input_shape=inp_shape))
model.add(Conv2D(filters=32, kernel_size=(3, 3)))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(Dropout(rate=dropout_prob))
model.add(Flatten())
#model.add(Dense(64,activation=activation_function))
model.add(Dropout(rate=dropout_prob))
model.add(Dense(32,activation=activation_function))
model.add(Dense(num_classes,activation='softmax'))
model.summary()
model.compile(loss=loss_function, optimizer=opt, metrics=['accuracy'])
history = model.fit(X_train, y_train_cat, batch_size=convolutional_batches, epochs = convolutional_epochs, verbose = verbose_level, validation_data=(X_test, y_test_cat))
model.save('./models/convolutional_model.h5')

Categories