keras autoencoder cnn not training - python

I'm working on a neural network and one of the pieces I want to use is an autoencoder. However for some reason the training losses remain stuck at around 600.000 which is roughly equivalent to a loss of about 0.5 per pixel which seems to heavily indicate that no learning is taking place and that the results are somehow completly random/always the same. Anybody notice what I'm doing wrong?
Full code:
import tensorflow
import keras
import pandas as pd
import numpy as np
import random
import os
import cv2
import matplotlib.pyplot as plt
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.layers import Dense, Conv2D, Dropout, BatchNormalization, Input, Reshape, Flatten, Conv2DTranspose, MaxPooling2D, UpSampling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam
TrainingDirectory="trainingdata"
modelPath = "models/autoencoder1"
def main():
model = make_model()
model.fit_generator(mygenerator(),steps_per_epoch=2000,epochs=5)
model.save(modelPath)
#ENCODER
def make_model():
input_layer = Input(shape=(None,None, 3))
# encoder
h = Conv2D(64, (3, 3), activation='relu', padding='same')(input_layer)
h = MaxPooling2D((2, 2), padding='same')(h)
# decoder
h = Conv2D(64, (3, 3), activation='relu', padding='same')(h)
h = UpSampling2D((2, 2))(h)
output_layer = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(h)
#assemble network
ae = Model(input_layer, output_layer)
ae.summary()
ae.compile(optimizer="adam", loss="mse")
return ae
def Preprocess(image):
resized_image = scale_down(image)
return resized_image
def scale_down(image):
max_size= max(image.shape)
scale=1;
if(max_size>750):
scale=750/max_size
width = int(image.shape[1] * scale)
height = int(image.shape[0] * scale)
if width%72!=0:
width=width+(72-width%72)
if height%72!=0:
height=height+(72-height%72)
dsize = (width, height)
image=cv2.resize(image, dsize)
return image
def mygenerator():
batch_features=None
batch_labels=None
while True:
target=random.choice(os.listdir(TrainingDirectory))
print(target)
batch_features=Preprocess(cv2.imread(TrainingDirectory+'/'+target+"/input.jpg", cv2.IMREAD_COLOR))
batch_labels=Preprocess(cv2.imread(TrainingDirectory+'/'+target+"/labelWalls.png", cv2.IMREAD_COLOR))
print(batch_features.shape)
yield np.array([batch_features]), np.array([batch_features])
main()

Related

Distiungish between Composite fraction and Subtraction symbol in machine learning

I am working in a project name "Handwritten Math Evaluation"
SO what basically happen in this is that there are 11 classes of (0 - 9) and (+,-) each containing 50 clean handwritten digits in them. Then I trained a CNN model for it with 80 % of data used in training and 20 % using in testing of model which lead in a accuracy of 98.83 %. Here is the code for the architecture of CNN model :-
import pandas as pd
import numpy as np
import pickle
np.random.seed(1212)
import keras
from keras.models import Model
from keras.layers import *
from keras import optimizers
from keras.layers import Input, Dense
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.utils.np_utils import to_categorical
from keras.models import model_from_json
import matplotlib.pyplot as plt
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape =(28,28,1), activation ='relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(15, (3, 3), activation ='relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation ='relu'))
model.add(Dense(50, activation ='relu'))
model.add(Dense(12, activation ='softmax'))
# Compile model
model.compile(loss ='categorical_crossentropy',
optimizer ='adam', metrics =['accuracy'])
model.fit(X_train, y_train, epochs=1000)
Now each image in dataset is preprocesed as follows:-
import cv2
im = cv2.imread(path)
im_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
rects = [cv2.boundingRect(ctr) for ctr in ctrs]
rect = rects[0]
im_crop =im_th[rect[1]:rect[1]+rect[3],rect[0]:rect[0]+rect[2]]
im_resize = cv2.resize(im_crop,(28,28))
im_resize = np.array(im_resize)
im_resize=im_resize.reshape(28,28)
I have made an evaluation function which solves simple expression like 7+8 :-
def evaluate(im):
s = ''
data = []
im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, im_th = cv2.threshold(im_gray, 90, 255, cv2.THRESH_BINARY_INV)
ctrs, hier = cv2.findContours(im_th.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
boundingBoxes = [cv2.boundingRect(c) for c in ctrs]
look_up = ['0','1','2','3','4','5','6','7','8','9','+','-']
i=0
for c in ctrs:
rect = boundingBoxes[i]
im_crop = im_th[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]
im_resize = cv2.resize(im_crop,(28,28))
im_resize = np.array(im_resize)
im_resize = im_resize.reshape(28,28,1)
data.append(im_resize)
i+=1
data = np.array(data)
predictions = model.predict(data)
i=0
while i<len(boundingBoxes):
rect = boundingBoxes[i]
print(rect[2],rect[3])
print(predictions[i])
s += look_up[predictions[i].argmax()]
i+=1
return s
I need help extending this thought for Compund fractions but the problem is that they are identical to subtraction sign when resized to (28 , 28) so I need help in distuingish between them.
This is my first Question so please tell if any detail is left.

A target array with shape (11203, 25) was passed for an output of shape (None, 3) while using as loss `categorical_crossentropy`

I am a beginner in text processing techniques and I am trying to execute the below code.
from keras.layers import Dense, Input, GlobalMaxPooling1D
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Conv2D, MaxPooling2D, Dropout,concatenate
from keras.layers.core import Reshape, Flatten
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
from keras.models import Model
from keras import regularizers
sequence_length = trn_abs.shape[1]
filter_sizes = [3,4,5]
num_filters = 100
drop = 0.5
inputs = Input(shape=(sequence_length,))
embedding = embedding_layer(inputs)
reshape = Reshape((sequence_length,embedding_dim,1))(embedding)
conv_0 = Conv2D(num_filters, (filter_sizes[0], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
conv_1 = Conv2D(num_filters, (filter_sizes[1], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
conv_2 = Conv2D(num_filters, (filter_sizes[2], embedding_dim),activation='relu',kernel_regularizer=regularizers.l2(0.01))(reshape)
maxpool_0 = MaxPooling2D((sequence_length - filter_sizes[0] + 1, 1), strides=(1,1))(conv_0)
maxpool_1 = MaxPooling2D((sequence_length - filter_sizes[1] + 1, 1), strides=(1,1))(conv_1)
maxpool_2 = MaxPooling2D((sequence_length - filter_sizes[2] + 1, 1), strides=(1,1))(conv_2)
merged_tensor = concatenate([maxpool_0, maxpool_1, maxpool_2], axis=1)
flatten = Flatten()(merged_tensor)
reshape = Reshape((3*num_filters,))(flatten)
dropout = Dropout(drop)(flatten)
output = Dense(units=3, activation='softmax',kernel_regularizer=regularizers.l2(0.01))(dropout)
# this creates a model that includes
model = Model(inputs, output)
adam = Adam(lr=1e-3)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['acc'])
callbacks = [EarlyStopping(monitor='val_loss')]
model.fit(X_trn, trn[target_cols], epochs=100)
and I am getting the following error:
ValueError: A target array with shape (11203, 25) was passed for output of shape (None, 3) while using as loss `categorical_crossentropy`. This loss expects targets to have the same shape as the output.
Could anyone help me with this, I am new to stackoverflow too,so please accept my apologies for ill-formating of question.
It's really important that the number of neurons at the end of your neural network is the number of categories you have. So try this:
output = Dense(units=25, activation='softmax'...

How to implement CAM without visualize_cam in this code?

I want to make Class activation map, so I have write the code
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, GlobalAveragePooling2D
from keras.models import Model, Input
from keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train_resized = x_train.reshape((60000, 28, 28, 1))
x_test_resized = x_test.reshape((10000, 28, 28, 1))
y_train_hot_encoded = to_categorical(y_train)
y_test_hot_encoded = to_categorical(y_test)
inputs = Input(shape=(28,28, 1))
x = Conv2D(64, (3,3), activation='relu')(inputs)
x = Conv2D(64, (3,3), activation='relu')(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_resized, y_train_hot_encoded, epochs=30, batch_size=256, shuffle=True, validation_split=0.3)
works fine, so I have imported visualize_cam module
from vis.visualization import visualize_cam
import matplotlib.pyplot as plt
import numpy as np
for i in range(10):
ind = np.where(y_test == i)[0][0]
plt.subplot(141)
plt.imshow(x_test_resized[ind].reshape((28,28)))
for j,modifier in enumerate([None, 'guided', 'relu']):
heat_map = visualize_cam(model, 4, y_test[ind], x_test_resized[ind], backprop_modifier=modifier)
plt.subplot(1,4,j+2)
plt.imshow(heat_map)
plt.show()
but the visualize_cam didn`t work well
I tried many times to fix the module but it doesn`t go well
(it depends on scipy which version is below 1.3. but )
so I have to implement cam without that module
Is there any solution to replace visualize_cam into other option to implement CAM?
Here is a scipy library independent implementation.
from keras.datasets import mnist
from keras.layers import Conv2D, Dense, GlobalAveragePooling2D
from keras.models import Model, Input
from keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train_resized = x_train.reshape((60000, 28, 28, 1))
x_test_resized = x_test.reshape((10000, 28, 28, 1))
y_train_hot_encoded = to_categorical(y_train)
y_test_hot_encoded = to_categorical(y_test)
inputs = Input(shape=(28,28, 1))
x = Conv2D(64, (3,3), activation='relu')(inputs)
x = Conv2D(64, (3,3), activation='relu', name='final_conv')(x)
x = GlobalAveragePooling2D()(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train_resized, y_train_hot_encoded, epochs=1, batch_size=256, shuffle=True, validation_split=0.3)
import numpy as np
import cv2
import io
import requests
from PIL import Image
import matplotlib.pyplot as plt
# Using Keras implementation from tensorflow
from tensorflow.python.keras import applications
from tensorflow.python.keras.preprocessing.image import load_img, img_to_array
from tensorflow.python.keras import backend as K
# Get the layer of the last conv layer
fianlconv = model.get_layer('final_conv')
# Get the weights matrix of the last layer
weight_softmax = model.layers[-1].get_weights()[0]
# Function to generate Class Activation Mapping
HEIGHT = 28
WIDTH = 28
def returnCAM(feature_conv, weight_softmax, class_idx):
size_upsample = (WIDTH, HEIGHT)
# Keras default is channels last, hence nc is in last
bz, h, w, nc = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = np.dot(weight_softmax[:, idx], np.transpose(feature_conv.reshape(h*w, nc)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, size_upsample))
return output_cam
x = x_test_resized[0,:,:,0]
plt.imshow(x)
plt.show()
classes = {1:'1', 2: '2', 3: '3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 0:'0'}
probs_extractor = K.function([model.input], [model.output])
features_conv_extractor = K.function([model.input], [fianlconv.output])
probs = probs_extractor([np.expand_dims(x, 0).reshape(1,28,28,1)])[0]
features_blob = features_conv_extractor([np.expand_dims(x, 0).reshape(1,28,28,1)])[0]
features_blobs = []
features_blobs.append(features_blob)
idx = np.argsort(probs)
probs = np.sort(probs)
for i in range(-1, -6, -1):
print('{:.3f} -> {}'.format(probs[0, i], classes[idx[0, i]]))
CAMs = returnCAM(features_blobs[0], weight_softmax, [idx[0, -1]])
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (28, 28)), cv2.COLORMAP_JET)
result = heatmap[:,:,0] * 0.3 + x * 0.5
print(result.shape)
plt.imshow(result)
plt.show()
N.B: I'm plotting normalized images so the result isn't great, I also trained only for 1 epoch. For better results, you may try training more, change to appropriate color space.

Error using Colab GPU, while none with CPU

I'm trying some code in Google Colab.
Using CPU it works fine, but when I switch to GPU it shows errors.
Self-contained code:
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input, BatchNormalization, Activation
from keras.layers import ZeroPadding2D, MaxPooling2D, Dense
from keras.layers import Reshape, Add, Dropout
from keras.layers import Conv2D
from keras.layers import Conv3DTranspose, Conv2DTranspose
from keras.initializers import VarianceScaling
from keras.models import Model
from keras.regularizers import l2
from keras.optimizers import SGD
import sys
# hyperparameters
BATCH_NORM_MOMENTUM = 0.1
BATCH_NORM_EPS = 1e-5
KERNEL_REGULARIZER = 0.0001
batchSize = 4
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
def step1(input_shape = (3, 256, 256)):
step = 'step1_'
X_input = Input(input_shape, name = step + 'input')
X = Conv2D(64, (7, 7), strides = (2, 2), padding='same', data_format = 'channels_first', kernel_initializer="he_normal",kernel_regularizer=l2(KERNEL_REGULARIZER), name = step+'b1_conv_a',)(X_input)
X = BatchNormalization(axis = 1, momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)
X = Activation('relu', name = step+'b1_act_a')(X)
X = MaxPooling2D((3, 3), strides=(2, 2), data_format='channels_first', padding='same', name = step + 'b1_maxpool2d_a')(X)
print(X.shape)
model = Model(inputs = X_input, outputs = X, name='step1')
return model
step1Model = step1((3,256,256))
Error:
ValueError: Shape must be rank 1 but is rank 0 for 'step1_b1_bn_a/cond/Reshape_4' (op: 'Reshape') with input shapes: [1,64,1,1], [].
Why is there this difference between using CPU and GPU ?
This probably has to do with tensorflow and tensorflow-gpu packages on CPU and GPU kernels respectively.
You can bypass it but removing axis=1 from BatchNormalization layer
change:
X = BatchNormalization(axis = 1, momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)
to:
X = BatchNormalization(momentum=BATCH_NORM_MOMENTUM, epsilon = BATCH_NORM_EPS, name = step+'b1_bn_a')(X)

Importing weights from csv file:Layer weight shape (672, 7) not compatible with provided weight shape (1, 1, 672, 7)

I'm writing Deep learning network in Keras, previously tested in Matlab. To avoid doing all the learning, I exported weights and biases of the final layer in Matlab as the .csv file, and want to use them in my network - so my network would just test score based on given weights, instead of whole learning.
Code:
import os
os.environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Convolution2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
#from keras import backend as K
from keras.preprocessing import image
print("\n")
print("\n")
trained_weights = []
trained_bias = []
trained_weights = np.loadtxt('Weights.csv', delimiter=';')
trained_bias = np.loadtxt('Bias.csv', delimiter=';')
network_weights = np.array([[trained_weights]])
network_bias = np.array([[trained_bias]])
network_outputs = np.array([['a','c','d','e','f','g','h']])
# Load test image
img = load_img('note_a.png')
note = image.img_to_array(img)
#note = np.expand_dims(note, axis=0)
note = np.reshape(note, [1, 50, 30, 3])
# Model architecture
model = Sequential()
# Layer 1
model.add(Convolution2D(12, (6, 6), batch_size=1, input_shape=(50, 30, 3), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Layer 2
model.add(Convolution2D(24, (6, 6), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=2))
# Layer 3
model.add(Convolution2D(48, (6, 6), activation='relu'))
model.add(Flatten())
layer2=Dense(7, weights=[network_weights, network_bias], activation='softmax')
model.add(layer2)
model.summary()
print("\n")
print("\n")
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])
#model.fit((note,network_outputs), batch_size=32, nb_epoch=10, verbose=1)
#score = model.evaluate(note, network_outputs)
I was trying to use
model.set_weights([network_outputs])
but it seems like it assigns a weights only to my first layer of network, so I just assigned weights to final layer. Result is the following error:
ValueError: Layer weight shape (672, 7) not compatible with provided weight shape (1, 1, 672, 7)
And this is quite confusing for me. How, by doing weights=[network_weights, network_bias] I get 4 dimensions? Is it because network_weights has dimensions [1,672], and network_bias=[1,7], which makes [1,1,672,7]?
And how I can properly resize this weights parameter?
np.array([[trained_weights]]) creates an array out of your data surrounded by 2 empty dimensions, so your final shape is (1, 1, x, y). Same for your trained_bias. Applying [network_weights, network_bias] does again surround your 2 arrays with a dimension, which does obviously not match.
I think you just need to clean all the unnecessary surroundings by using something like:
weights=[trained_weights, trained_bias]

Categories