Why do I keep having class "1" as the predicted class? - python

I have the following Convolutional Neural Network (CNN) in Keras, but keep having the prediction on the test images as class "1", provided that the training data is balanced. Any ideas on how I can solve this issue? Thanks.
from keras import layers
from keras import models
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
import cv2
import numpy as np
import os
train_directory = '/train'
validation_directory = '/valid'
test_directory = '/test'
results_directory = '/results'
correct_classification = 0
number_of_test_images = 0
labels = []
prediction_probabilities = []
model = models.Sequential()
model.add(layers.Conv2D(32,(3,3),activation='relu',input_shape=(512,512,3)))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(64,(3,3),activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(128,(3,3),activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(256,(3,3),activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Conv2D(512,(3,3),activation='relu'))
model.add(layers.MaxPooling2D(2,2))
model.add(layers.Flatten())
model.add(layers.Dense(1024,activation='relu'))
model.add(layers.Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['acc'])
train_data = ImageDataGenerator(rescale=1.0/255)
validation_data = ImageDataGenerator(rescale=1.0/255)
train_generator = train_data.flow_from_directory(train_directory,target_size=(512,512),batch_size=20,class_mode='binary')
validation_generator = validation_data.flow_from_directory(validation_directory,target_size=(512,512),batch_size=20,class_mode='binary')
history = model.fit_generator(train_generator,
steps_per_epoch=10,
epochs=10,
validation_data=validation_generator,
validation_steps=5)
model.save('my_model.h5')
for root, dirs, files in os.walk(test_directory):
for file in files:
img = cv2.imread(root + '/' + file)
img = cv2.resize(img,(512,512),interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
img = img/255.0
if os.path.basename(root) == 'nevus':
label = 1
elif os.path.basename(root) == 'melanoma':
label = 0
labels.append(label)
img_class = model.predict_classes(img)
img_class_probability = model.predict(img)
prediction_probability = img_class_probability[0]
prediction_probabilities.append(prediction_probability)
prediction = img_class[0]
if prediction == label:
correct_classification = correct_classification + 1

The output of your network is the cause of constant prediction of "1". You need to have an two output units in your final layer. A similar question is asked here, and I have quoted the explanation from Matias below for convenience.
Softmax normalizes by the sum of exponential of each output. Since there is one output, the only possible output is 1.0.
For a binary classifier you can either use a sigmoid activation with the "binary_crossentropy" loss, or put two output units at the last layer, keep using softmax and change the loss to categorical_crossentropy.

Related

How will I get prediction from this ensembled model?

# Load the hdf5 files
from keras.models import load_model
resnet50 = keras.models.load_model('/content/drive/MyDrive/HDF5 Files/RestNet 50 best_model.hdf5')
resnet152 = keras.models.load_model('/content/drive/MyDrive/HDF5 Files/best_model_4.hdf5')
# Get the predictions from each model
predictions1 = resnet50.predict(images)
predictions3 = resnet152.predict(images)
# Combine the predictions using a majority vote
predictions = np.array([predictions1, predictions3])
predictions = np.mean(predictions, axis=0)
print(predictions)
.
The output I'm getting is [[9.9993783e-01 1.3912816e-06 6.0800008e-05 2.9077312e-09]]. What does this mean?
Its not clear from your question how many images you are passing and how many categories for classification you have in your problem. I will try to answer in a generic sense.
Say you are passing a batch of 4 images to each of the model restnet50 and resnet152 and both of them are trained on 5 categories then each model will give prediction with shape (4,5).
>> predictions1.shape
(4,5)
>> predictions3.shape
(4,5)
To combine them for majority vote, you should code as follows:
>>predictions = np.dstack((predictions1, predictions3)) # stack along the third axis.
>>predictions.shape
(4,5,2)
>> mean_predictions = np.mean(predictions, axis=2) # find the mean probabilities of both predictions in each category.
>> mean_predictions.shape
(4,5)
>> class_index = np.argmax(mean_predictions,axis=1) # Find the index having highest probability.
>> class_index.shape
(4,)
The final class_index gives the index of class or category to which each of the 4 images belong. I hope this helps.
BETTER WAY
You can create ensemble model in tensorflow in a much better way as follows:
from tensorflow.keras.applications.resnet50 import preprocess_input as process_resnet50
from tensorflow.keras.applications.resnet_v2 import preprocess_input as process_resnetv2
from tensorflow.keras.layers import Lambda,Dense,GlobalMaxPool2D,Concatenate
from tensorflow.keras import Input, Model
from keras.models import load_model
resnet50 = keras.models.load_model('/content/drive/MyDrive/HDF5 Files/RestNet 50 best_model.hdf5')
resnet152 = keras.models.load_model('/content/drive/MyDrive/HDF5 Files/best_model_4.hdf5')
category_count = 5 # you can give your own category count.
inp = Input(input_shape)
resnet_prep50 = tf.keras.layers.Lambda(process_resnet50)(inp)
res_net50 = resnet50(resnet_prep50)
x_resnet50 = GlobalMaxPool2D()(res_net50)
x_resnet50 = Dense(128, activation='relu')(x_resnet50)
resnet_prep152 = tf.keras.layers.Lambda(process_resnetv2)(inp)
res_net152 = resnet152(resnet_prep152)
x_resnet152 = GlobalMaxPool2D()(res_net152)
x_resnet152 = Dense(128, activation='relu')(x_resnet152)
x = Concatenate()([x_resnet50, x_resnet152])
out = Dense(category_count, activation='softmax')(x)
ensemble_model = Model(inputs=inp, outputs = out)
predictions = ensemble_model.predict(images)
predictions = np.argmax(predictions, axis=1)

Making predictions on new images using a CNN in pytorch

I'm new in pytorch, and i have been stuck for a while on this problem. I have trained a CNN for classifying X-ray images. The images can be found in this Kaggle page https://www.kaggle.com/prashant268/chest-xray-covid19-pneumonia/ .
I managed to get good accuracy both on training and test data, but when i try to make predictions on new images i get the same (wrong class) output for every image. Here's my model in detail.
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import glob
import torch.nn.functional as F
import torch.nn as nn
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.autograd import Variable
import torchvision
import pathlib
from google.colab import drive
drive.mount('/content/drive')
epochs = 20
batch_size = 128
learning_rate = 0.001
#Data Transformation
transformer = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])
])
#Load data with DataLoader
train_path = '/content/drive/MyDrive/Chest X-ray (Covid-19 & Pneumonia)/Data/train'
test_path = '/content/drive/MyDrive/Chest X-ray (Covid-19 & Pneumonia)/Data/test'
train_loader = DataLoader(torchvision.datasets.ImageFolder(train_path,transform = transformer), batch_size= batch_size, shuffle= True)
test_loader = DataLoader(torchvision.datasets.ImageFolder(test_path,transform = transformer), batch_size= batch_size, shuffle= False)
root = pathlib.Path(train_path)
classes = sorted([j.name.split('/')[-1] for j in root.iterdir()])
print(classes)
train_count = len(glob.glob(train_path+'/**/*.jpg')) + len(glob.glob(train_path+'/**/*.png')) + len(glob.glob(train_path+'/**/*.jpeg'))
test_count = len(glob.glob(test_path+'/**/*.jpg')) + len(glob.glob(test_path+'/**/*.png')) + len(glob.glob(test_path+'/**/*.jpeg'))
print(train_count,test_count)
#Create the CNN
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
'''nout = [(width + 2*padding - kernel_size) / stride] + 1 '''
# [128,3,224,224]
self.conv1 = nn.Conv2d(in_channels = 3, out_channels = 12, kernel_size = 5)
# [4,12,220,220]
self.pool1 = nn.MaxPool2d(2,2) #reduces the images by a factor of 2
# [4,12,110,110]
self.conv2 = nn.Conv2d(in_channels = 12, out_channels = 24, kernel_size = 5)
# [4,24,106,106]
self.pool2 = nn.MaxPool2d(2,2)
# [4,24,53,53] which becomes the input of the fully connected layer
self.fc1 = nn.Linear(in_features = (24 * 53 * 53), out_features = 120)
self.fc2 = nn.Linear(in_features = 120, out_features = 84)
self.fc3 = nn.Linear(in_features = 84, out_features = len(classes)) #final layer, output will be the number of classes
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = x.view(-1, 24 * 53 * 53)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Training the model
model = CNN()
loss_function = nn.CrossEntropyLoss() #includes the softmax activation function
optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate)
n_total_steps = len(train_loader)
for epoch in range(epochs):
n_correct = 0
n_samples = 0
for i, (images, labels) in enumerate(train_loader):
# Forward pass
outputs = model(images)
_, predicted = torch.max(outputs, 1)
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()
loss = loss_function(outputs, labels)
# Backpropagation and optimization
optimizer.zero_grad() #empty gradients
loss.backward()
optimizer.step()
acc = 100.0 * n_correct / n_samples
print(f'Epoch [{epoch+1}/{epochs}], Step [{i+1}/{n_total_steps}], Accuracy: {round(acc,2)} %, Loss: {loss.item():.4f}')
print('Done!!')
# Testing the model
with torch.no_grad():
n_correct = 0
n_samples = 0
n_class_correct = [0 for i in range(3)]
n_class_samples = [0 for i in range(3)]
for images, labels in test_loader:
outputs = model(images)
# max returns (value ,index)
_, predicted = torch.max(outputs, 1)
n_samples += labels.size(0)
n_correct += (predicted == labels).sum().item()
acc = 100.0 * n_correct / n_samples
print(f'Accuracy of the network: {acc} %')
torch.save(model.state_dict(),'/content/drive/MyDrive/Chest X-ray (Covid-19 & Pneumonia)/model.model')
For loading the model and trying to make predictions on new images, the code is as follows:
checkpoint = torch.load('/content/drive/MyDrive/Chest X-ray (Covid-19 & Pneumonia)/model.model')
model = CNN()
model.load_state_dict(checkpoint)
model.eval()
#Data Transformation
transformer = transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])
])
#Making preidctions on new data
from PIL import Image
def prediction(img_path,transformer):
image = Image.open(img_path).convert('RGB')
image_tensor = transformer(image)
image_tensor = image_tensor.unsqueeze_(0) #so img is not treated as a batch
input_img = Variable(image_tensor)
output = model(input_img)
#print(output)
index = output.data.numpy().argmax()
pred = classes[index]
return pred
pred_path = '/content/drive/MyDrive/Chest X-ray (Covid-19 & Pneumonia)/Test_images/Data/'
test_imgs = glob.glob(pred_path+'/*')
for i in test_imgs:
print(prediction(i,transformer))
I'm guessing the problem must be in the way that i am preprocessing the data, although i cannot find my mistake. Any help will be deeply appreciated, since i have been stuck on this for a while now.
p.s. i can share my notebook as well, if it is of any help
Regarding your problem, I have a really good way to debug this to target where the problem most likely will be and so it will be really easy to fix your issue.
So, my debugging process would be based on the fact that your CNN performs well on the test set. Firstly set your test loader batch size to 1 temporarily. After that, One thing to do is in your test loop when you calculate the amount correct, you can run the following code:
#Your code
outputs = model(images) # Really only one image and 1 output.
#Altered Code:
correct = (predicted == labels).sum().item() # This will be either 1 or 0 since you have only one image per batch
# My new code:
if correct:
# if value is 1 instead of 0 then turn value into a single image with no batch size
single_correct_image = images.squeeze(0)
# Then convert tensor image into PIL image
pil_image = transforms.ToPILImage()(single_correct_image)
# Save the pil image to any directory specified in quotes.
pil_image = pil_image.save("/content")
#Terminate testing process. Ignore Value Error if it says terminating process
raise ValueError("terminating process")
Now you have an image saved to disk that you know is correct in the test set. The next step would be to open such image and run it to your predict function. Couple of things can happen and thus give info about your situation
If your model returns the wrong answer then there is something wrong with the different code you have within the prediction and testing code. One uses a torch.sum and torch.max the other uses np.argmax.Then you can use print statements to debug what is going on there. Perhaps some conversion error or your expectation of the output's format is different.
If your code return the right answer then your model is just failing to predict on new images. I suggest running more trial cases with the above process.
For additional reference, if you still get very stuck to the point where you feel like you can't solve it, then I suggest using this notebook to guide and give some suggestions on what code to atleast inspect.
https://www.kaggle.com/salvation23/xray-cnn-pytorch
Sarthak Jain

Testing a Random Image against a Python Keras/Tensorflow CNN

I've created and CNN and I am trying to figure out how to test a random image against it. I am utilizing Keras and Tensorflow. Lets assume I wanted to test the image found here: https://i.ytimg.com/vi/7I8OeQs7cQA/maxresdefault.jpg.
How would I save the model, load it then test this image against it? Here is some example code I found online that demonstrates what I mean:
https://meta.stackexchange.com/questions/144665/hide-email-address-from-my-profile
Any help is much appreciated, thanks!
import os
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display, Image
from keras.models import Sequential, load_model
from keras.layers import Conv2D, Flatten, MaxPooling2D, Input
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import models, layers
X = []
y = []
from sklearn.model_selection import train_test_split
labels = os.listdir(r'C:/Users/zF1bo/Desktop/natural_images')
labels
for label in labels:
path = r'C:/Users/zF1bo/Desktop/natural_images/{}/'.format(label)
img_data = os.listdir(path)
for image in img_data:
a = cv2.imread( path + image)
a = cv2.resize(a, (64, 64))
X.append(np.array(a.astype('float32')) / 255)
y.append(label)
buckets = []
for i in y:
if i == 'airplane':
buckets.append(0)
elif i == 'car':
buckets.append(1)
elif i == 'cat':
buckets.append(2)
elif i == 'dog':
buckets.append(3)
elif i == 'flower':
buckets.append(4)
elif i == 'fruit':
buckets.append(5)
elif i == 'motorbike':
buckets.append(6)
elif i == 'person':
buckets.append(7)
y = buckets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, \
random_state = 0)
model = models.Sequential()
model.add(layers.Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=(64,64,3)))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(layers.MaxPool2D(pool_size=(2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(8, activation='softmax'))
model.compile(optimizer='adam', loss = 'sparse_categorical_crossentropy',metrics=['accuracy'])
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=(256), epochs=25)
pred = model.predict(X_test)
diff = []
for i in pred:
diff.append(np.argmax(i))
from sklearn.metrics import accuracy_score
accuracy_score(diff,y_test)
Step 1: Save the model
model.save('model.h5')
Step 2: Load the model
loaded_model = tensorflow.keras.models.load_model('model.h5')
Step 3: Download the image via requests library(answer is taken from: Downloading a picture via urllib and python):
import urllib.request
urllib.request.urlretrieve(url, filename)
Otherwise you can apply the same steps like in the first picture you posted. Do not forget to expand_dims()
you can save your model using mode.save('path to save location')
To input an image you should read it, perform any pre-processing that you did to the training images then use model.predict as shown in the code below.
import tensorflow as tf
from tensorflow import keras
model = keras.models.load_model('path/to/location') # loads the saved model
pred_img =r'path to the img'
img=cv2.imread (pred_img)
img=img/255 # rescale the image
print(img.shape)
img=cv2.resize(img, (64,64)) # resize to same size used in training
print (img.shape)
img=np.expand_dims(img, axis=0)
print (img.shape)
pred=model.predict(img)
print (pred) # will be a list of 8 elements select the element in the list with the highest probability
index=np.argmax(pred)) # this will be the index of the class predicted
class_name=buckets[index] # this will be the name of the class predicted
print (class_name)
NOTE if your read in the training images with cv2 remember the order is bgr not rgb. The model was trained on bgr images. When you read in the image to predict it must be bgr also.

4-dimensions data became 2 dimensions after going throught convolutional neuron network

I am training a neuron network with two types of input: image and BR (blue over red, it's kind of a non-image feature like height, weight...). To do that, i use fit function in keras, and convert image to list for input. But I don't know why the image list, which have 4 dimensions shape became 2 dimemsions when going into fit, and I got the error as below:
Error when checking input: expected dense_1_input to have 3
dimensions, but got array with shape (1630, 1)
When I converted the image list to array, I had checked the shape of image_array and it has exactly 4 dimensions (particularly its shape is 1630, 60, 60, 3). Even right before the fit function, it still has the same shape. So I really don't know why the shape became (1630,1). Could anyone explain for me?
Here is my code:
from keras.utils.np_utils import to_categorical
import pandas as pd
import numpy as np
import os
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense,Concatenate, concatenate,Reshape, BatchNormalization, Merge
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import Adagrad
from sklearn import preprocessing
from scipy.misc import imread
import time
from PIL import Image
import cv2
img_width, img_height = 60, 60
img_list = []
BR_list = []
label_list = []
data_num = 1630
folder1 = "cut2/train/sugi/"
folder2 = "cut2/train/hinoki/"
def imgConvert(file_path):
img = imread(file_path,flatten = True)
img = np.arange(1*3*60*60).reshape((60,60,3))
img = np.array(img).reshape(60,60,3)
img = img.astype("float32")
return img
def B_and_R(img_path):
img = cv2.imread(img_path)
B = 0
R = 0
for i in range(25,35):
#print(i)
for j in range(25,35):
B = B+img[i,j,0]
R = R+img[i,j,2]
#(j)
#(img[i,j])
ave_B = B/100
ave_R = R/100
BR = ave_B/ave_R
return BR
def getData(path,pollen):
for the_file in os.listdir(path):
#print(the_file)
file_path = os.path.join(path, the_file)
B_over_R = B_and_R(file_path)
img_arr = imgConvert(file_path)
#writer.writerow([img_arr,B_over_R,"sugi"])
img_list.append(img_arr)
BR_list.append(B_over_R)
lb = np.zeros(2)
if pollen == "sugi":
lb[0] +=1
else:
lb[1] +=1
label_list.append(lb)
if __name__ == '__main__':
getData(folder1,"sugi")
getData(folder2,"hinoki")
img_arr = np.array(img_list)
print(img_arr.shape)
#.reshape(img_list[0],1,img_width,img_height)
img_arr.astype("float32")
img_arr /= 255
print(img_arr.shape)
img_array = np.expand_dims(img_arr, axis = 0)
img_array = img_array[0,:,:,:,:]
print(img_array.shape)
"""
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(img_array)
"""
#img_array = img_array.reshape(img_array[0],1,img_width,img_height)
print(img_array.shape)
label_arr = np.array(label_list)
print(label_arr.shape)
#label_array = np.expand_dims(label_arr, axis = 0)
#label_array = label_array[0,:,:,:,:]
BR_arr = np.array(BR_list)
print(BR_arr.shape)
#BR_array = np.expand_dims(BR_arr, axis = 0)
#BR_array = BR_array[0,:,:,:,:]
#print(len([img_arr,BR_arr]))
input_tensor = Input(shape=(img_width, img_height,3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
# FC層の作成
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
#print(top_model.summary())
# VGG16とFC層を結合してモデルを作成
branch1 = Model(input=vgg16.input, output=top_model(vgg16.output))
#model.summary()
print(branch1.summary())
branch2 = Sequential()
branch2.add(Dense(1, input_shape=(data_num,1), activation='sigmoid'))
#branch1.add(Reshape(BR.shape, input_shape = BR.shape))
branch2.add(BatchNormalization())
branch2.add(Flatten())
print(branch2.summary())
merged = Merge([branch1, branch2], mode = "concat")
model = Sequential()
model.add(merged)
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
#last_model = Model(input = [branch1.input,branch2.input],output=model())
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
metrics=['accuracy'])
print(img_array.shape)
model.fit([img_array,BR_arr], label_arr,
epochs=5, batch_size=100, verbose=1)
Ok, then the problem is the input shape.
While your data for branch 2 is 2D (batch, 1), your model should also have a 2D input: input_shape = (1,). (Batch sizes are ignored in input_shape)

AttributeError: 'Sequential' no attribute 'get_shape' when merging models

I am trying to create two sequential models (each trained on different sets of data - different images). Then I would like to take the average of their outputs, and add a softmax layer to give me a single classification output based on the two sequential models. My code is below, but I get an Attribute Error that says 'Sequential' object has no attribute 'get_shape'.
The full error code is:
Traceback (most recent call last):
File "Mergedmodels.pyu", line 135, in <module>
merged = average ([modelo, modelN1])
File "G:\Anaconda\lib\site-packages\keras\layers\merge.py", line 481, in average
return Average(**kwargs)(inputs)
File "G:\Anaconda\lib\site-packages\keras\engine\topology.py", line 542, in _ call_input_shapes.append(K.int_sshape(x_elem))
File "G:\Anaconda\lib\site-packages\keras\backend\tensorflow_backend.py", line 411, in int_shape
shape = x.get_shape()
AttributeError: 'Sequential' object has no attribute 'get_shape'
Any idea on how to fix it?
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import merge
from keras.layers import average
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import mnist
import pandas as pd
from numpy import array
from PIL import Image
import matplotlib.pyplot as plt
from keras import backend as K
import glob
import os
K.set_image_dim_ordering('th')
np.random.seed(123) #set for reproducibility
size = 48, 48
#IMPORTING TRAINING IMAGES FOR FIRST MODEL (ORIGINAL)
folder = 'images'
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
ims = [read(os.path.join(folder, filename)) for filename in os.listdir(folder)]
X_train = np.array([read(os.path.join(folder, filename)) for filename in os.listdir(folder)], dtype='uint8')
#CHECK print (X_train.shape)
X_train = X_train.reshape(X_train.shape[0],3,48,48)
#X_test = X_test.reshape(X_test.shape[0],1,28,28)
X_train = X_train.astype ('float32')
#X_test = X_test.astype ('float32')
X_train /= 255
#X_test /= 255
#IMPORTING TRAINING IMAGES FOR SECOND MODEL (NORMALIZED)
folder = 'images2'
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
ims = [read(os.path.join(folder, filename)) for filename in os.listdir(folder)]
X_training = np.array([read(os.path.join(folder, filename)) for filename in os.listdir(folder)], dtype='uint8')
#CHECK print (X_train.shape)
X_training = X_training.reshape(X_train.shape[0],3,48,48)
#X_test = X_test.reshape(X_test.shape[0],1,28,28)
X_training = X_training.astype ('float32')
#X_test = X_test.astype ('float32')
X_training /= 255
#X_test /= 255
#IMPORTING LABELS FOR 10K TRAINING IMAGES
saved_column = pd.read_csv('labels4.csv')
y_labels = array(saved_column)
Y_train = np_utils.to_categorical(y_labels,501)
#y_train = np.array ([0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1])
#(X_train, y_train),(X_test, y_test) = mnist.load_data()
#COPYING LABELS FOR SECOND MODEL TRAINING IMAGES
#Y_training = Y_train
#IMPORTING TEST IMAGES
folder2 = 'test'
read = lambda imname: np.asarray(Image.open(imname).convert("RGB"))
ims = [read(os.path.join(folder2, filename)) for filename in os.listdir(folder2)]
X_test = np.array([read(os.path.join(folder2, filename)) for filename in os.listdir(folder2)], dtype='uint8')
X_test = X_test.reshape(X_test.shape[0],3,48,48)
X_test = X_test.astype ('float32')
X_test /= 255
#IMPORTING LABELS FOR TEST IMAGES
another_column = pd.read_csv('labelstest4.csv')
test_labels = array(another_column)
Y_test = np_utils.to_categorical(test_labels,501)
#train_labels = np.array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
#Y_train = np_utils.to_categorical(y_train, 2)
#Y_test = np_utils.to_categorical(y_test,10)
#BUILDING FIRST NN FOR ORIGINAL IMAGES
modelo = Sequential()
modelo.add(Convolution2D(32,3,3, activation='relu', input_shape=(3,48,48), dim_ordering='th'))
modelo.add(Convolution2D(32,3,3, activation = 'relu'))
modelo.add(MaxPooling2D(pool_size=(2,2)))
modelo.add(Dropout(0.25))
modelo.add(Flatten())
modelo.add(Dense(128,activation='relu'))
modelo.add(Dropout(0.5))
modelo.add(Dense(501, activation = 'sigmoid'))
modelo.compile(loss='categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
modelo.fit(X_train, Y_train,
batch_size = 5, nb_epoch= 5, verbose = 1)
score = modelo.evaluate(X_test, Y_test, verbose=0)
#BUILDING SECOND NN FOR NORMALIZED IMAGES
modelN1 = Sequential()
modelN1.add(Convolution2D(32,3,3, activation='relu', input_shape=(3,48,48), dim_ordering='th'))
modelN1.add(Convolution2D(32,3,3, activation = 'relu'))
modelN1.add(MaxPooling2D(pool_size=(2,2)))
modelN1.add(Dropout(0.25))
modelN1.add(Flatten())
modelN1.add(Dense(128,activation='relu'))
modelN1.add(Dropout(0.5))
modelN1.add(Dense(501, activation = 'sigmoid'))
modelN1.compile(loss='categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
modelN1.fit(X_training, Y_train,
batch_size = 5, nb_epoch= 1, verbose = 1)
score = modelN1.evaluate(X_test, Y_test, verbose=0)
#MERGING MODELS
merged = average([modelo, modelN1])
finalmodel = Sequential ()
finalmodel.add(merged)
finalmodel.add(Dense(501, activation = 'softmax'))
finalmodel.compile(loss='categorical_crossentropy',
optimizer = 'adam',
metrics = ['accuracy'])
Y_madeuplabels = np.array ([0, 1, 52, 20])
Y_training = np_utils.to_categorical(Y_madeuplabels, 501)
finalmodel.fit([X_train], Y_training,
batch_size = 5, nb_epoch= 1, verbose = 1)
score = finalmodel.evaluate(X_test, Y_test, verbose=0)
print ("the code ran")
This way of combining sequential models doesn't seem to work in Keras 2.0
since average works over tensors and not layers. That is the reason the error message which is saying that the Sequential model has noget_shape() methods; get_shape() exists only on Tensors.
Here is an example that replicates the error:
mod1 = Sequential()
mod1.add(Dense(1, input_shape=(10,)))
mod2 = Sequential()
mod2.add(Dense(1, input_shape=(10,)))
avg = average([mod1, mod2]) # throws AttributeError
A hacky way to get around this is to use the functional API to combine
the outputs of the two models and then do the softmax layer. As an example:
X1 = np.random.rand(10, 10)
X2 = np.random.rand(10, 10)
Y = np.random.choice(2, 10)
mod1 = Sequential()
mod1.add(Dense(16, input_shape=(10,)))
mod2 = Sequential()
mod2.add(Dense(16, input_shape=(10,)))
# so use the outputs of the models to do the average over
# this way we do averaging over tensor __not__ models.
avg = average([mod1.output, mod2.output])
dense = Dense(1, activation="sigmoid")(avg)
# the two inputs are the inputs to the sequential models
# and the output is the dense layer
mod3 = Model(inputs=[mod1.input, mod2.input], outputs=[dense])
mod3.compile(loss='binary_crossentropy', optimizer='sgd')
mod3.fit([X1, X2], Y)

Categories