How to find out misclassified images filenames - python

I am building a model to detect solar panel defects. I am using 255*255 images to train the model and I want to use the confusion matrix to improve my model.
The matrix gives me incorrectly classified images, but I need to find out exact filenames for the false positive and false negative images
How can I achieve this goal?
I have provided my code below:
import numpy as np
import os
import time
import keras
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
#from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
#from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.layers import Input
from keras import models
from keras import layers
from keras import optimizers
from keras_applications.resnet import ResNet101
from keras.optimizers import SGD, Adagrad, Adadelta, RMSprop, Adam
from keras.callbacks import LearningRateScheduler
from keras.models import load_model
from keras import regularizers
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
###########################################################################################################################
## Model Initials
IMAGE_SIZE = (255, 255)
BATCH_SIZE = 24
NUM_EPOCHS = 1
WEIGHTS_FINAL = 'defectdetection.hdf5'
MODEL_FINAL = 'defectdetection.h5'
BEST_WEIGHT ='1defectdetection.hdf5'
##############################################################################################
## Loading dataset for the training process
## Define data path
# Loading the training data
img_path = 'C:/Users/TeamSoloMid/SolarCellsImages/dataset/Sample0001.jpg'
img = image.load_img(img_path, target_size=(255, 255))
x = image.img_to_array(img)
print (x.shape)
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print('Input image shape:', x.shape)
PATH = os.getcwd()
data_path = 'C:/Users/TeamSoloMid/SolarCellsImages'
data_dir_list = os.listdir(data_path)
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(255,255))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#print('Input image shape:', x.shape)
img_data_list.append(x)
img_data = np.array(img_data_list)
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
#t=time.time()
# Define the number of classes
num_classes = 2
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:1603]=0
labels[1604:3225]=1
names = ['Defect', 'Almost']
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
TestPcnt = 0.2
X_train, X_test, y_train, y_test = train_test_split(x, y,
test_size=TestPcnt,
random_state=2)
epoch=NUM_EPOCHS
###############################################################################################
# Fine tune the resnet 101
image_input = Input(shape=(255, 255, 3))
model = ResNet101(include_top=False,
input_tensor=image_input,
weights='imagenet',
backend=keras.backend,
layers=keras.layers,
models=keras.models,
utils=keras.utils)
# Freeze all the layers
for layer in model.layers[:-3]:
layer.trainable = False
#model.summary()
last_layer = model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
x = Dense(256, activation='relu',name='fc-1')(x)
x = Dropout(0.5)(x)
out = Dense(num_classes, activation='softmax',name='output_layer')(x)
# this is the model we will train
net_model = Model(inputs=model.input, outputs=out)
net_model.summary()
for layer in net_model.layers[:-5]:
layer.trainable = False
net_model.summary()
for layer in net_model.layers:
print(layer, layer.trainable)
#my_opti= optimizers.Adam(lr=0.00002)
#my_opti= optimizers.Adam(lr=0.00001)
################################################################################################
#Define learning Rate
learning_rate = 0.00002
decay_rate = learning_rate / epoch
momentum = 0.9
sgd = SGD(lr=learning_rate, momentum=momentum,
decay=decay_rate,
nesterov=False)
##############################################################################
## we will keep the weights of the epoch that scores highest in terms of accuracy on the test set.
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath=BEST_WEIGHT,
monitor = 'val_acc',
verbose=1,
save_best_only=True,
mode = 'max')
###################################################################
callback_list = [checkpointer]
net_model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
t=time.time()
hist = net_model.fit(X_train, y_train, batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS, verbose=1,
callbacks = [checkpointer],
validation_data=(X_test, y_test))
print('Training time: %s' % (time.time()-1))
(loss, accuracy) = net_model.evaluate(X_test, y_test,
batch_size=BATCH_SIZE,
verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
############################################################################################
## Saving The weights of the model after training
net_model.save_weights(WEIGHTS_FINAL)
print('1. Weights Saved')
net_model.save_weights(BEST_WEIGHT)
print('2. Best Weights Saved')
##############################################################################
## Saving The Complete model after training
net_model.save(MODEL_FINAL)
print('3. Model Saved')
############################################################################################
import matplotlib.pyplot as plt
# visualizing losses and accuracy
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(NUM_EPOCHS)
plt.figure(1,figsize=(7,5))
plt.plot(xc,train_loss)
plt.plot(xc,val_loss)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(xc,train_acc)
plt.plot(xc,val_acc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
############################################################################
from sklearn.metrics import confusion_matrix, classification_report
import itertools
from sklearn.utils.multiclass import unique_labels
from sklearn import metrics
import seaborn as sns
import pandas as pd
from sklearn.datasets import load_files
from sklearn.svm import LinearSVC
from sklearn import svm
LABELS= ['Defect', 'Almost']
# Print confusion matrix for training data
y_pred_train = net_model.predict(X_train)
def show_confusion_matrix(validations, predictions):
matrix = metrics.confusion_matrix(validations, predictions)
plt.figure(figsize=(10, 10))
sns.heatmap(matrix,
cmap='coolwarm',
linecolor='white',
linewidths=1,
xticklabels=LABELS,
yticklabels=LABELS,
annot=True,
fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
y_pred_test = net_model.predict(X_test)
# Take the class with the highest probability from the test predictions
max_y_pred_test = np.argmax(y_pred_test, axis=1)
max_y_test = np.argmax(y_test, axis=1)
show_confusion_matrix(max_y_test, max_y_pred_test)
print(classification_report(max_y_test, max_y_pred_test))

I would calculate a checksum (i.e. md5) and use that as a dictionary key that would keep the image path as value, i.e.
import hashlib
...
image_paths = {}
...
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(255,255))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#print('Input image shape:', x.shape)
img_data_list.append(x)
# in this example i am only hashing the first 5 pixels of the image. You would probably want to use all of the pixels of the image.
image_hash = hashlib.md5(str(x[0]) + str(x[1]) + str(x[2]) + str(x[3]) + str(x[4])).hexdigest()
image_paths[image_hash] = img_path
...
when you want to decode the image path you simply calculate the hash again and look the path up in the dictionary
image_hash = hashlib.md5(str(x[0]) + str(x[1]) + str(x[2]) + str(x[3]) + str(x[4])).hexdigest()
image_path = image_paths[image_hash]
While that is not the most flexible approach, I believe that it will still help you achieve your goal.
One note, hashing might be pretty expensive if you have a lot of images, but if your images do not change, you only need to hash them once and save somewhere. In the consequent attempts, you would only need to load the data instead of hashing everything again

Thanks for your answering, I have tried your solution, and there is an IndexError like:
File "C:/Users/TeamSoloMid/Solar cells Defect Detection.py", line 102, in <module>
image_hash = hashlib.md5(str(x[0])+str(x[1])+str(x[2])+str(x[3])+str(x[4])).hexdigest()
IndexError: index 1 is out of bounds for axis 0 with size 1
Below is the code I added your solution in
import numpy as np
import os
import time
import keras
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing import image
from keras.layers import GlobalAveragePooling2D, Dense, Dropout
#from keras.layers import GlobalAveragePooling2D, Dense, Dropout,Activation,Flatten
#from keras.layers import Input
from keras.models import Model
from keras.utils import np_utils
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.layers import Input
from keras import models
from keras import layers
from keras import optimizers
from keras_applications.resnet import ResNet101
from keras.optimizers import SGD, Adagrad, Adadelta, RMSprop, Adam
from keras.callbacks import LearningRateScheduler
from keras.models import load_model
from keras import regularizers
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import hashlib
########################################################################################
## Model Initials
IMAGE_SIZE = (255, 255)
BATCH_SIZE = 24
NUM_EPOCHS = 1
WEIGHTS_FINAL = 'defectdetection.hdf5'
MODEL_FINAL = 'defectdetection.h5'
BEST_WEIGHT ='defectdetection.hdf5'
#########################################################################################
## Loading dataset for the training process
## Define data path
# Loading the training data
img_path = 'C:/Users/TeamSoloMid/SolarCellsImages/dataset/Sample0001.jpg'
img = image.load_img(img_path, target_size=(255, 255))
x = image.img_to_array(img)
print (x.shape)
x = np.expand_dims(x, axis=0)
print (x.shape)
x = preprocess_input(x)
print('Input image shape:', x.shape)
PATH = os.getcwd()
data_path = 'C:/Users/TeamSoloMid/SolarCellsImages'
data_dir_list = os.listdir(data_path)
image_paths = {}
img_data_list=[]
for dataset in data_dir_list:
img_list=os.listdir(data_path+'/'+ dataset)
print ('Loaded the images of dataset-'+'{}\n'.format(dataset))
for img in img_list:
img_path = data_path + '/'+ dataset + '/'+ img
img = image.load_img(img_path, target_size=(255,255))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
#print('Input image shape:', x.shape)
img_data_list.append(x)
image_hash = hashlib.md5(str(x[0])+str(x[1])+str(x[2])+str(x[3])+str(x[4])).hexdigest()
image_paths[image_hash] = img_path
img_data = np.array(img_data_list)
print (img_data.shape)
img_data=np.rollaxis(img_data,1,0)
print (img_data.shape)
img_data=img_data[0]
print (img_data.shape)
#t=time.time()
# Define the number of classes
num_classes = 2
num_of_samples = img_data.shape[0]
labels = np.ones((num_of_samples,),dtype='int64')
labels[0:1603]=0
labels[1604:3225]=1
#labels[3226:4847]=2
names = ['Defect', 'Almost']
# convert class labels to on-hot encoding
Y = np_utils.to_categorical(labels, num_classes)
#Shuffle the dataset
x,y = shuffle(img_data,Y, random_state=2)
# Split the dataset
TestPcnt = 0.2
X_train, X_test, y_train, y_test = train_test_split(x, y,
test_size=TestPcnt,
random_state=2)
epoch=NUM_EPOCHS
########################################################################################
# Fine tune the resnet 101
image_input = Input(shape=(255, 255, 3))
model = ResNet101(include_top=False,
input_tensor=image_input,
weights='imagenet',
backend=keras.backend,
layers=keras.layers,
models=keras.models,
utils=keras.utils)
# Freeze all the layers
for layer in model.layers[:-3]:
layer.trainable = False
#model.summary()
last_layer = model.output
# add a global spatial average pooling layer
x = GlobalAveragePooling2D()(last_layer)
x = Dense(256, activation='relu',name='fc-1')(x)
x = Dropout(0.5)(x)
out = Dense(num_classes, activation='softmax',name='output_layer')(x)
# this is the model we will train
net_model = Model(inputs=model.input, outputs=out)
net_model.summary()
for layer in net_model.layers[:-5]:
layer.trainable = False
net_model.summary()
for layer in net_model.layers:
print(layer, layer.trainable)
#my_opti= optimizers.Adam(lr=0.00002)
#my_opti= optimizers.Adam(lr=0.00001)
#########################################################################################
Define learning Rate
learning_rate = 0.00002
decay_rate = learning_rate / epoch
momentum = 0.9
sgd = SGD(lr=learning_rate, momentum=momentum,
decay=decay_rate,
nesterov=False)
##############################################################################
## we will keep the weights of the epoch that scores highest in terms of accuracy on the test set.
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath=BEST_WEIGHT,
monitor = 'val_acc',
verbose=1,
save_best_only=True,
mode = 'max')
###################################################################
callback_list = [checkpointer]
net_model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
t=time.time()
hist = net_model.fit(X_train, y_train, batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS, verbose=1,
callbacks = [checkpointer],
validation_data=(X_test, y_test))
print('Training time: %s' % (time.time()-1))
(loss, accuracy) = net_model.evaluate(X_test, y_test,
batch_size=BATCH_SIZE,
verbose=1)
print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss,accuracy * 100))
############################################################################################
## Saving The weights of the model after training
net_model.save_weights(WEIGHTS_FINAL)
print('1. Weights Saved')
net_model.save_weights(BEST_WEIGHT)
print('2. Best Weights Saved')
##############################################################################
## Saving The Complete model after training
net_model.save(MODEL_FINAL)
print('3. Model Saved')
############################################################################################
import matplotlib.pyplot as plt
# visualizing losses and accuracy
train_loss=hist.history['loss']
val_loss=hist.history['val_loss']
train_acc=hist.history['acc']
val_acc=hist.history['val_acc']
xc=range(NUM_EPOCHS)
plt.figure(1,figsize=(7,5))
plt.plot(xc,train_loss)
plt.plot(xc,val_loss)
plt.xlabel('num of Epochs')
plt.ylabel('loss')
plt.title('train_loss vs val_loss')
plt.grid(True)
plt.legend(['train','val'])
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
plt.figure(2,figsize=(7,5))
plt.plot(xc,train_acc)
plt.plot(xc,val_acc)
plt.xlabel('num of Epochs')
plt.ylabel('accuracy')
plt.title('train_acc vs val_acc')
plt.grid(True)
plt.legend(['train','val'],loc=4)
#print plt.style.available # use bmh, classic,ggplot for big pictures
plt.style.use(['classic'])
############################################################################
from sklearn.metrics import confusion_matrix, classification_report
import itertools
from sklearn.utils.multiclass import unique_labels
from sklearn import metrics
import seaborn as sns
import pandas as pd
from sklearn.datasets import load_files
from sklearn.svm import LinearSVC
from sklearn import svm
LABELS= ['Defect', 'Almost']
# Print confusion matrix for training data
y_pred_train = net_model.predict(X_train)
def show_confusion_matrix(validations, predictions):
matrix = metrics.confusion_matrix(validations, predictions)
plt.figure(figsize=(10, 10))
sns.heatmap(matrix,
cmap='coolwarm',
linecolor='white',
linewidths=1,
xticklabels=LABELS,
yticklabels=LABELS,
annot=True,
fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
y_pred_test = net_model.predict(X_test)
# Take the class with the highest probability from the test predictions
max_y_pred_test = np.argmax(y_pred_test, axis=1)
max_y_test = np.argmax(y_test, axis=1)
show_confusion_matrix(max_y_test, max_y_pred_test)
print(classification_report(max_y_test, max_y_pred_test))
image_hash = hashlib.md5(str(x[0]) + str(x[1]) + str(x[2]) + str(x[3]) + str(x[4])).hexdigest()
image_path = image_paths[image_hash]

Related

bad prediction when having noise on the data: LSTM time-series regression

I want to predict the force plate using a smart insole using the LSTM model for time series prediction. the data on the force plate has positive and negative values (I think the resulting positive value is a noise). if I ignore the positive value, then the predicted results of the data testing will be bad. but if I change the positive value to 0 then the prediction results will be good. what should I do if I want to keep positive value without changing it but have good prediction result.
Force Plate Shape
2050,1
Smart Insole Shape
2050,89
below are my code:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import math
from tensorflow.keras.layers import Dense,RepeatVector, LSTM, Dropout
from tensorflow.keras.layers import Flatten, Conv1D, MaxPooling1D
from tensorflow.keras.layers import Bidirectional, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import MinMaxScaler
%matplotlib inline
## Load Data
Insole = pd.read_csv('1113_Rwalk40s1_list.txt', header=None, low_memory=False)
SIData = np.asarray(Insole)
df = pd.read_csv('1113_Rwalk40s1.csv', low_memory=False)
columns = ['Fx']
selected_df = df[columns]
FCDatas = selected_df[:2050]
## End Load Data
## Concatenate Data
SmartInsole = np.array(SIData[:2050])
FCData = np.array(FCDatas)
# FCData = np.where(FCData>0, 0, FCData) #making positive value to 0
Dataset = np.concatenate((SmartInsole, FCData), axis=1)
## End Concatenate Data
## Normalization Data
scaler_in = MinMaxScaler(feature_range=(0, 1))
scaler_out = MinMaxScaler(feature_range=(0, 1))
data_scaled_in = scaler_in.fit_transform(Dataset[:,0:89])
data_scaled_out = scaler_out.fit_transform(Dataset[:,89:90])
## End Normalization Data
steps= 50
inp = []
out = []
for i in range(len(data_scaled_out) - (steps)):
inp.append(data_scaled_in[i:i+steps])
out.append(data_scaled_out[i+steps])
inp= np.asanyarray(inp)
out= np.asanyarray(out)
x_train, x_test, y_train, y_test = train_test_split(inp, out, test_size=0.25,random_state=2)
## Model Building
model = Sequential()
model.add(LSTM(64, activation='relu', return_sequences= False, input_shape= (50,89)))
model.add(Dense(32,activation='relu'))
model.add(Dense(16,activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss = 'mse', optimizer=Adam(learning_rate=0.002), metrics=['mse'])
model.summary()
## End Model Building
## Model fit
history = model.fit(x_train,y_train, epochs=50, verbose=2, batch_size=64, validation_data=(x_test, y_test))
## End Model fit
## Model Loss Plot
import matplotlib.pyplot as plt
plt.figure(figsize=(10,6))
plt.plot(history.history['loss'], label='Train Loss')
plt.plot(history.history['val_loss'], label='Test Loss')
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epochs')
plt.legend(loc='upper right')
plt.show()
## End Model Loss Plot
## Prediction and Model Evaluation
model.evaluate(inp, out)
predictions=model.predict(inp)
print('MSE: ',mean_squared_error(out, predictions))
print('RMSE: ',math.sqrt(mean_squared_error(out, predictions)))
print('Coefficient of determination (r2 Score): ', r2_score(out, predictions))
#invert normalize
predictions = scaler_out.inverse_transform(predictions)
out = scaler_out.inverse_transform(out)
x=[]
colors=['red','green','brown','teal','gray','black','maroon','orange','purple']
colors2=['green','red','orange','black','maroon','teal','blue','gray','brown']
x = np.arange(0,2000)*40/2000
for i in range(0,1):
plt.figure(figsize=(15,6))
plt.plot(x,out[0:2000,i],color=colors[i])
plt.plot(x,predictions[0:2000,i],markerfacecolor='none',color=colors2[i])
plt.title('LSTM Regression (Training Data)')
plt.ylabel('Force/Fx (N)')
plt.xlabel('Time(s)')
plt.legend(['Real value', 'Predicted Value'], loc='lower left')
plt.savefig('Regression Result.png'[i])
plt.show()
## End Prediction and Model Evaluation
## Model Validation
Test_Insole = pd.read_csv('1113_Rwalk40s2_list.txt', header=None, low_memory=False)
TestSIData = np.asarray(Test_Insole)
Test_df = pd.read_csv('1113_Rwalk40s2.csv', low_memory=False)
Test_columns = ['Fx']
Test_selected_df = Test_df[Test_columns]
Test_FCDatas = Test_selected_df[:2050]
test_SmartInsole = np.array(TestSIData[:2050])
test_FCData = np.array(Test_FCDatas)
# test_FCData = np.where(test_FCData>0, 0, test_FCData) #making positive value to 0
test_Dataset = np.concatenate((test_SmartInsole, test_FCData), axis=1)
test_scaler_in = MinMaxScaler(feature_range=(0, 1))
test_scaler_out = MinMaxScaler(feature_range=(0, 1))
test_data_scaled_in = test_scaler_in.fit_transform(test_Dataset[:,0:89])
test_data_scaled_out = test_scaler_out.fit_transform(test_Dataset[:,89:90])
test_steps= 50
test_inp = []
test_out = []
for i in range(len(test_data_scaled_out) - (test_steps)):
test_inp.append(test_data_scaled_in[i:i+test_steps])
test_out.append(test_data_scaled_out[i+test_steps])
test_inp= np.asanyarray(test_inp)
test_out= np.asanyarray(test_out)
model.evaluate(test_inp, test_out)
test_predictions=model.predict(test_inp)
test_predictions = test_scaler_out.inverse_transform(test_predictions)
test_out = test_scaler_out.inverse_transform(test_out)
x=[]
colors=['red','green','brown','teal','gray','black','maroon','orange','purple']
colors2=['green','red','orange','black','maroon','teal','blue','gray','brown']
x = np.arange(0,2000)*40/2000
for i in range(0,1):
plt.figure(figsize=(15,6))
plt.plot(x,test_out[0:2000,i],color=colors[i])
plt.plot(x,test_predictions[0:2000,i],markerfacecolor='none',color=colors2[i])
plt.title('LSTM Regression (Testing Data)')
plt.ylabel('Force/Fx (N)')
plt.xlabel('Time(s)')
plt.legend(['Real value', 'Predicted Value'], loc='lower left')
plt.savefig('Regression Result.png'[i])
plt.show()
## End Model validation
the Result without changing the positive value
the Result if I changing the positive value to 0

Singleton array array('y_pred', dtype='<U6') cannot be considered a valid collection. for finding F1 SCORE ON Depresjon dataset

I have built a model to detect depression using activity data from depresjon dataset where I have labelled depressed as 1 and non depressed as 0 and I am now trying to find out the F1 score and ROC curve of the same model and having problems while doing that
import os
import numpy
import pandas
from tensorflow import keras
from tensorflow.keras.backend import expand_dims
from tensorflow.keras.utils import to_categorical`enter code here`
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import f1_score, make_scorer, confusion_matrix, accuracy_score, precision_score, recall_score, precision_recall_curve
condition_folder = list(os.walk("data/condition"))[0]
control_folder = list(os.walk("data/control"))[0]
condidtion_files = ["%s/%s"%(condition_folder[0],f) for f in condition_folder[2]]
control_files = ["%s/%s"%(control_folder[0],f) for f in control_folder[2]]
x = []
y = []
for f in condidtion_files:
df = pandas.read_csv(f)
x1 = numpy.array(df['activity'].tolist())
x.append(x1)
y.append(1)
for f in control_files:
df = pandas.read_csv(f)
x1 = numpy.array(df['activity'].tolist())
x.append(x1)
y.append(0)
x = numpy.array(x)
y = numpy.array(y)
seq_len_max = max([len(x1) for x1 in x])
x = pad_sequences(
x, maxlen=seq_len_max,
dtype='int32', padding='pre', truncating='pre',
value=0.0
)
x = expand_dims(x, axis=-1)
y = to_categorical(y)
num_classes = y.shape[1]
numpy.save('x.npy', x)
numpy.save('y.npy', y)
x = numpy.load('x.npy')
y = numpy.load('y.npy')
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
numpy.save('x_train.npy', x_train)
numpy.save('y_train.npy', y_train)
numpy.save('x_test.npy', x_test)
numpy.save('y_test.npy', y_test)
x_train = numpy.load('x_train.npy')
y_train = numpy.load('y_train.npy')
x_test = numpy.load('x_test.npy')
y_test = numpy.load('y_test.npy')
'''
(65407, 1)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x[0])
plt.show()
plt.close()
'''
def make_model(input_shape):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=300, kernel_size=30, strides = 10, padding="valid")(input_layer)
conv2 = keras.layers.Conv1D(filters=300, kernel_size=30, strides = 10, padding="valid")(conv1)
conv2 = keras.layers.MaxPooling1D()(conv2)
conv3 = keras.layers.Conv1D(filters=300, kernel_size=30, strides = 10, padding="valid")(conv2)
gap = keras.layers.GlobalMaxPooling1D()(conv3)
output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
model = make_model(input_shape=x.shape[1:])
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["acc"],
)
epochs = 10
batch_size = 60
acc = 0
while(acc <=0.95):
history = model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
)
acc = history.history['acc'][-1]
y_pred = model.predict(x)
numpy.save('y_pred.npy', y_pred)
numpy.argmax(y_pred, 1)
model.save("depression.h5")
model = keras.models.load_model("depression.h5")
test_loss, test_acc = model.evaluate(x, y)
import os
import numpy
import pandas
from tensorflow import keras
from tensorflow.keras.backend import expand_dims
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.sequence import pad_sequences
model = keras.models.load_model("depression.h5")
def deep_depression_detector(activity_data_csv_file):
df = pandas.read_csv(activity_data_csv_file)
x = numpy.array([df['activity'].tolist()])
x = pad_sequences(
x, maxlen=65407,
dtype='int32', padding='pre', truncating='pre',
value=0.0
)
x = expand_dims(x, axis=-1)
y_pred = model.predict(x)[0]
y_score = numpy.max(y_pred)
y_label = numpy.argmax(y_pred)
if y_label == 1:
return {'prediction':'depressed', 'confidence': y_score}
else:
return {'prediction':'nondepressed', 'confidence': y_score}
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
y_true = np.array(y_test)
y_pred = np.array(y_pred)
precision_recall_fscore_support(y_true, y_pred, average='weighted')
TypeError: Singleton array array('y_pred', dtype='<U6') cannot be cannot be considered a valid collection.
AND I GET THE FOLLOWING ERROR when I try to find the F1 SCORE despite following everything correctly also how should I find the ROC CURVE since that also seems to be giving some kind of error.

Cannot transform list into tensor: ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list)

I really need your help, I have been trying to figure it out but still no way out of the problem.. I can read the dataset(like the picture, using pandas), but cant make this turns into a tensor, tried lots of different ways to make the list to tensor, or array, matrix.., but none worked..
Do you have some idea how can I change my csv reading to make the data fit into the tensodflow?
import sys
import numpy as np
from numpy import array
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from scipy.sparse import csr_matrix
import re
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from pandas import Series
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from numpy import array
from keras.models import load_model
from sklearn import datasets
df = pd.read_csv('set1.csv')
#conversão lista em tensor
def my_func(arg):
arg = tf.convert_to_tensor(arg, dtype=tf.float32)
return arg
#Uma vgez, e a lista STR é convertida em lista
s1 = df["sensor1"].apply(eval)
s2 = df["sensor2"].apply(eval)
s3 = df["sensor3"].apply(eval)
X = (s1, s2, s3)
X = np.matrix(X)
X = X.transpose()
Y = pd.get_dummies(data=df.categoria).values
#print("shape Y:",(Y.shape), " tipo Y:" ,(type(Y)), "len Y:", len(Y))
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, random_state = 42)
print("Train Shape:", X_train.shape,Y_train.shape)
print("Test Shape:", X_test.shape,Y_test.shape)
def model():
if pre_trained_wv is True:
print("USE PRE TRAINED")
#num_words = min(max_features, len(word_index) + 1)
#num_words
weights_embedding_matrix = load_pre_trained_wv(word_index, num_words, word_embedding_dim)
input_shape = (max_sequence_length,)
model_input = Input(shape=input_shape, name="input", dtype='float64')
embedding = Embedding(
num_words,
word_embedding_dim,
input_length=max_sequence_length,
name="embedding",
weights=[weights_embedding_matrix],
trainable=False)(model_input)
#if bilstm is True:
# lstm = Bidirectional(LSTM(word_embedding_dim, dropout=0.2, recurrent_dropout=0.2, name="lstm"))(embedding)
#else:
lstm = LSTM(word_embedding_dim, dropout=0.2, recurrent_dropout=0.2, name="lstm")(embedding)
else:
input_shape = (max_sequence_length,)
#model_input = Input(shape=input_shape, name="input", dtype='int32')
model_input = Input(shape=input_shape, name="input")
embedding = Embedding(max_features, embed_dim, input_length=max_sequence_length, name="embedding")(model_input)
#if bilstm is True:
# lstm = Bidirectional(LSTM(embed_dim, dropout=0.2, recurrent_dropout=0.2, name="lstm"))(embedding)
#else:
lstm = LSTM(embed_dim, dropout=0.2, recurrent_dropout=0.2, name="lstm")(embedding)
model_output = Dense(2, activation='softmax', name="softmax")(lstm)
model = Model(inputs=model_input, outputs=model_output)
return model
model = model()
model.compile(loss = 'binary_crossentropy', optimizer='adam', metrics = ['accuracy'])
print(model.summary())
if not os.path.exists('./{}'.format(filename) ):
hist = model.fit(
X_train,
Y_train,
validation_data=(X_test, Y_test),
epochs=epochs,
batch_size=batch_size,
shuffle=True,
verbose=1)
model.save_weights(filename)
# Plot
plt.figure()
plt.plot(hist.history['loss'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_loss'], lw=2.0, color='r', label='val')
plt.title('Classificador de Gestos')
plt.xlabel('Epochs')
plt.ylabel('Cross-Entropy')
plt.legend(loc='upper right')
plt.show()
plt.figure()
plt.plot(hist.history['acc'], lw=2.0, color='b', label='train')
plt.plot(hist.history['val_acc'], lw=2.0, color='r', label='val')
plt.title('Classificador de Gestos')
plt.xlabel('Epochs')
plt.ylabel('Acurácia')
plt.legend(loc='upper left')
plt.show()
else:
model.load_weights('./{}'.format(filename) )
scores = model.evaluate(X_test, Y_test, verbose = 0, batch_size = batch_size)
print("Acc: %.2f%%" % (scores[1]*100))
while True:
sentence = input("input> ")
if sentence == "exit":
break
new_text = pad_sequences(new_text, maxlen=max_sequence_length, dtype='float64', value=0)
#new_text = pad_sequences(new_text, maxlen=max_sequence_length, value=0)
sentiment = model.predict(new_text,batch_size=1,verbose = 2)[0]
if(np.argmax(sentiment) == 0):
pred_proba = "%.2f%%" % (sentiment[0]*100)
print("movimento para direita => ", pred_proba)
elif (np.argmax(sentiment) == 1):
pred_proba = "%.2f%%" % (sentiment[1]*100)
print("movimento para esquerda => ", pred_proba)
this is the csv file:
CSV

Prediction with ANN are most of the time false

my probleme is that my ANN predicts only about 2 times out of 10 the right digit but when the ANN was fitted it tells me about 98% accuarcy. I'm quit a starter with ANNS and I don't know if I'm missing something abvious or why it is like it is.
For testing I use a tabel with 81 digits (allways in a row from 1 to 9).
I would be grateful if someone can help me :)
Training the ANN
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.keras.models import save_model
# See available datasets
print(tfds.list_builders())
# Construct a tf.data.Dataset
dataset = tfds.load(name="mnist", split=tfds.Split.TRAIN)
# Build your input pipeline
dataset = dataset.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
for features in dataset.take(1):
image, label = features["image"], features["label"]
# tfds works in both Eager and Graph modes
# See available datasets
print(tfds.list_builders())
# Construct a tf.data.Dataset
dataset = tfds.load(name="mnist", split=tfds.Split.TRAIN)
# Build your input pipeline
dataset = dataset.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
for features in dataset.take(1):
image, label = features["image"], features["label"]
mnist = tf.keras.datasets.mnist
# Aufteilung in Training- und Testset
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
print(x_test[0])
# TF Bilderkennungsmodell
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
# Crossentropy für die 10 Zahlen Klassen
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Modellfitting und Evaluation
model.fit(x_train, y_train, epochs=50)
model.evaluate(x_test, y_test)
filepath = './saved_model2'
save_model(model, filepath)
For testing with my table:
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.models import load_model
import cv2
import PIL
# Model configuration
img_width, img_height = 28, 28
batch_size = 250
no_epochs = 25
no_classes = 10
validation_split = 0.2
verbosity = 1
filepath = './saved_model2'
model = load_model(filepath, compile=True)
img = PIL.Image.open("Dataset/raster.jpg")
bilder = []
i= 0
for x in range(0,224,28):
img1 = img.crop([0,x,28,x+28])
img2 = img.crop([28,x,56,x+28])
img3 = img.crop([56,x,84,x+28])
img4 = img.crop([84,x,112,x+28])
img5 = img.crop([112,x,140,x+28])
img6 = img.crop([140,x,168,x+28])
img7 = img.crop([168,x,196,x+28])
img8 = img.crop([196,x,224,x+28])
img9 = img.crop([224,x,252,x+28])
img1.save("Dataset/eigeneBilder/"+str(i+1)+".jpg")
img2.save("Dataset/eigeneBilder/"+str(i+2)+".jpg")
img3.save("Dataset/eigeneBilder/"+str(i+3)+".jpg")
img4.save("Dataset/eigeneBilder/"+str(i+4)+".jpg")
img5.save("Dataset/eigeneBilder/"+str(i+5)+".jpg")
img6.save("Dataset/eigeneBilder/"+str(i+6)+".jpg")
img7.save("Dataset/eigeneBilder/"+str(i+7)+".jpg")
img8.save("Dataset/eigeneBilder/"+str(i+8)+".jpg")
img9.save("Dataset/eigeneBilder/"+str(i+9)+".jpg")
i += 9
print(bilder)
samples_to_predict = []
for i in range(1,81,1):
# Generate a plot
sample = cv2.imread("Dataset/eigeneBilder/"+str(i)+".jpg")
img = cv2.bitwise_not(sample)
sample = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
reshaped_image = sample.reshape((img_width, img_height,1))
plt.imshow(reshaped_image)
reshaped_image/255
# Add sample to array for prediction
samples_to_predict.append(reshaped_image)
samples_to_predict = np.array(samples_to_predict)
print(samples_to_predict[0])
predictions = model.predict(samples_to_predict)
classes = np.argmax(predictions, axis=1)
print(samples_to_predict)
print(classes)
One thing you can do is add validation_data in model.fit like this :
model.fit(x_train, y_train, epochs=50, validation_data=(x_test,y_test))
You can also add some 2Dconv layers with pooling before the flatten one. Or add more neurones.
Let me know if it helps.

Image generator missing positional argument for unet keras

I keep getting the following error for below code when I try to train the model: TypeError: fit_generator() missing 1 required positional argument: 'generator'. For the life of me I can not figure out what is causing this error. x_train is an rgb image of shape (400, 256, 256, 3) and for y_train i have 10 output classes making it shape (400, 256, 256, 10). What is going wrong here?
If necessary the data can be downloaded with the following link:
https://www49.zippyshare.com/v/5pR3GPv3/file.html
import skimage
from skimage.io import imread, imshow, imread_collection, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Model
from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D, Reshape, core, Dropout
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as K
from sklearn.metrics import jaccard_similarity_score
from shapely.geometry import MultiPolygon, Polygon
import shapely.wkt
import shapely.affinity
from collections import defaultdict
from keras.preprocessing.image import ImageDataGenerator
from keras.utils.np_utils import to_categorical
from keras import utils as np_utils
import os
from keras.preprocessing.image import ImageDataGenerator
gen = ImageDataGenerator()
#Importing image and labels
labels = skimage.io.imread("ede_subset_293_wegen.tif")
images = skimage.io.imread("ede_subset_293_20180502_planetscope.tif")[...,:-1]
#scaling image
img_scaled = images / images.max()
#Make non-roads 0
labels[labels == 15] = 0
#Resizing image and mask and labels
img_scaled_resized = img_scaled[:6400, :6400 ]
print(img_scaled_resized.shape)
labels_resized = labels[:6400, :6400]
print(labels_resized.shape)
#splitting images
split_img = [
np.split(array, 25, axis=0)
for array in np.split(img_scaled_resized, 25, axis=1)
]
split_img[-1][-1].shape
#splitting labels
split_labels = [
np.split(array, 25, axis=0)
for array in np.split(labels_resized, 25, axis=1)
]
#Convert to np.array
split_labels = np.array(split_labels)
split_img = np.array(split_img)
train_images = np.reshape(split_img, (625, 256, 256, 3))
train_labels = np.reshape(split_labels, (625, 256, 256, 10))
train_labels = np_utils.to_categorical(train_labels, 10)
#Create train test and val
x_train = train_images[:400,:,:,:]
x_val = train_images[400:500,:,:,:]
x_test = train_images[500:625,:,:,:]
y_train = train_labels[:400,:,:]
y_val = train_labels[400:500,:,:]
y_test = train_labels[500:625,:,:]
# Create image generator (credit to Ioannis Nasios)
data_gen_args = dict(rotation_range=5,
width_shift_range=0.1,
height_shift_range=0.1,
validation_split=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
seed = 1
batch_size = 100
def XYaugmentGenerator(X1, y, seed, batch_size):
genX1 = gen.flow(X1, y, batch_size=batch_size, seed=seed)
genX2 = gen.flow(y, X1, batch_size=batch_size, seed=seed)
while True:
X1i = genX1.next()
X2i = genX2.next()
yield X1i[0], X2i[0]
# Train model
Model.fit_generator(XYaugmentGenerator(x_train, y_train, seed, batch_size), steps_per_epoch=np.ceil(float(len(x_train)) / float(batch_size)),
validation_data = XYaugmentGenerator(x_val, y_val,seed, batch_size),
validation_steps = np.ceil(float(len(x_val)) / float(batch_size))
, shuffle=True, epochs=20)
You have a few mistakes in your code, but considering your error:
TypeError: fit_generator() missing 1 required positional argument:
'generator'
this is caused because fit_generator call XYaugmentGenerator but no augmentation generator is called inside.
gen.flow(...
won't work because gen is not declared. You should either rename image_datagen to gen as:
gen = ImageDataGenerator(**data_gen_args)
or, replace gen with image_datagen
genX1 = image_datagen.flow(X1, y, batch_size=batch_size, seed=seed)
genX2 = image_datagen.flow(y, X1, batch_size=batch_size, seed=seed)

Categories