How to view incorrectly recognized text - python

There is a neural network that classifies the sentiment of the reviews. The accuracy is not 100%, hence there are texts that are recognized by the network incorrectly. How can I see them? I tried my function, but it gives an error
data = pd.concat([positive_train_data,negative_train_data,positive_test_data,negative_test_data],ignore_index = True)
data.reset_index(drop=True,inplace=True)
x = data.Text
y = data.Sentiment
x_train, x_test, y_train1, y_test = train_test_split(x, y, test_size = 0.50, random_state = 2000)
print( "Train set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(x_train),
(len(x_train[y_train1 == 0]) / (len(x_train)*1.))*100,
(len(x_train[y_train1 == 1]) / (len(x_train)*1.))*100))
print ("Test set has total {0} entries with {1:.2f}% negative, {2:.2f}% positive".format(len(x_test),
(len(x_test[y_test == 0]) / (len(x_test)*1.))*100,
(len(x_test[y_test == 1]) / (len(x_test)*1.))*100))
tvec1 = TfidfVectorizer(max_features=10000,ngram_range=(1, 2),min_df=3,use_idf=1,smooth_idf=1,sublinear_tf=1,stop_words = 'english')
tvec1.fit(x_train)
x_train_tfidf = tvec1.transform(x_train)
print(x_test.shape)
x_test_tfidf = tvec1.transform(x_test).toarray()
model = Sequential()
model.add(Dense(100, activation='relu', input_dim=10000))
model.add(Dropout(0.25))
model.add(Dense(50,activation = 'relu'))
model.add(Dense(1, activation='sigmoid'))
optimiz = optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(loss = 'binary_crossentropy',optimizer = optimiz ,metrics = ['accuracy'])
hist = model.fit(x_train_tfidf,y_train1,validation_data = (x_test_tfidf,y_test ),epochs = 5,batch_size = 64)
And my function
y_pred_vect = model.predict(x_test_tfidf)
# bolean mask
mask = (y_pred_vect != y_test).any(axis=1)
print(mask)
print(len(mask))
num_words=5000 # only use top 1000 words
INDEX_FROM=3 # word index offset
# этот шаг нужен чтобы получить `test_x` в изначальном виде (до токенизации):
(train_x, _), (test_x, _) = imdb.load_data(num_words=num_words, index_from=INDEX_FROM)
x_wrong = test_x[mask]
word_to_id = imdb.get_word_index()
word_to_id = {k:(v+INDEX_FROM) for k,v in word_to_id.items()}
word_to_id["<PAD>"] = 0
word_to_id["<START>"] = 1
word_to_id["<UNK>"] = 2
id_to_word = {value:key for key,value in word_to_id.items()}
all_wrong_sents = [' '.join(id_to_word[id] for id in sent) for sent in x_wrong]
print(all_wrong_sents[:10])
Error on line -
mask = (y_pred_vect != y_test).any(axis=1)
Data must be 1-dimensional

Try this...
import numpy as np
mask = np.squeeze(y_pred_vect) != y_test

Related

Keras text classification from scratch - error implementing

I'm implementing Keras text classification from scratch on an army dataset of reviews but I'm getting an error of logits and labels must have the same shape. (None,1) vs ().
The code below is a bit long but it has the tensor preprocesing and the model is a functional API:
max_features = 200
sequence_length = None
embedding_dim = 128
from tensorflow.keras.layers import TextVectorization
dtrain_lab = data_train[['airline_sentiment','negativereason']].to_numpy()
display(dtrain_lab)
tlist_txt = data_train['negativereason'].tolist()
tlist_sent = data_train['airline_sentiment'].tolist()
rac = 0
for k in tlist_txt:
rap = tlist_txt[rac]
if pd.isnull(rap) == True:
tlist_txt[rac] = 'empty'
rac+=1
#p-prueba
p_list = []
for i in tlist_sent:
if i == 'positive':
p_list.append(1)
if i == 'negative' or i == 'neutral':
p_list.append(0)
train_sent = np.array(p_list)
val_txt = data_val['negativereason'].tolist()
val_sent = data_val['airline_sentiment'].tolist()
l_val = []
for j in val_sent:
if j == 'positive':
l_val.append(1)
if j == 'negative' or j == 'neutral':
l_val.append(0)
sent_val_na = np.array(l_val)
dac = 0
for k in val_txt:
hap = val_txt[dac]
if pd.isnull(hap) == True:
val_txt[dac] = 'empty'
dac+=1
tftrain_db = tf.data.Dataset.from_tensor_slices((tlist_txt, train_sent))
tfval_db = tf.data.Dataset.from_tensor_slices((val_txt, sent_val_na))
vectorize_layer = TextVectorization(
standardize='lower_and_strip_punctuation',
split="whitespace",
max_tokens=200,
output_mode="int",
output_sequence_length=30,
)
def vectorize_text(text, label):
text = tf.expand_dims(text, -1)
return vectorize_layer(text), label
text_ds = tftrain_db.map(lambda x, y: x)
vectorize_layer.adapt(text_ds)
v_dbtrain = tftrain_db.map(vectorize_text)
v_dbval = tfval_db.map(vectorize_text)
from tensorflow.keras import layers
inputs = tf.keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(max_features, embedding_dim)(inputs)
x = layers.Dropout(0.5)(x)
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.Conv1D(128, 7, padding="valid", activation="relu", strides=3)(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation="relu" input_shape = (None,))(x)
x = layers.Dropout(0.5)(x)
predictions = layers.Dense(1, activation="sigmoid", name="predictions")(x)
model = tf.keras.Model(inputs, predictions)
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
epochs = 3
model.fit(v_dbtrain, validation_data= tfval_db, epochs=epochs)
Error is:
ValueError: `logits` and `labels` must have the same shape, received ((None, 1) vs ()).

Spoken Language Identification

```import numpy as np
import glob
import os
from keras.models import Model
from keras.layers import Input, Dense, GRU, CuDNNGRU, CuDNNLSTM
from keras import optimizers
import h5py
from sklearn.model_selection import train_test_split
from keras.models import load_model
def language_name(index):
if index == 0:
return "English"
elif index == 1:
return "Hindi"
elif index == 2:
return "Mandarin"
# ---------------------------BLOCK 1------------------------------------
# COMMENT/UNCOMMENT BELOW CODE BLOCK -
# Below code extracts mfcc features from the files provided into a dataset
codePath = './train/'
num_mfcc_features = 64
english_mfcc = np.array([]).reshape(0, num_mfcc_features)
for file in glob.glob(codePath + 'english/*.npy'):
current_data = np.load(file).T
english_mfcc = np.vstack((english_mfcc, current_data))
hindi_mfcc = np.array([]).reshape(0, num_mfcc_features)
for file in glob.glob(codePath + 'hindi/*.npy'):
current_data = np.load(file).T
hindi_mfcc = np.vstack((hindi_mfcc, current_data))
mandarin_mfcc = np.array([]).reshape(0, num_mfcc_features)
for file in glob.glob(codePath + 'mandarin/*.npy'):
current_data = np.load(file).T
mandarin_mfcc = np.vstack((mandarin_mfcc, current_data))
# Sequence length is 10 seconds
sequence_length = 1000
list_english_mfcc = []
num_english_sequence = int(np.floor(len(english_mfcc)/sequence_length))
for i in range(num_english_sequence):
list_english_mfcc.append(english_mfcc[sequence_length*i:sequence_length*(i+1)])
list_english_mfcc = np.array(list_english_mfcc)
english_labels = np.full((num_english_sequence, 1000, 3), np.array([1, 0, 0]))
list_hindi_mfcc = []
num_hindi_sequence = int(np.floor(len(hindi_mfcc)/sequence_length))
for i in range(num_hindi_sequence):
list_hindi_mfcc.append(hindi_mfcc[sequence_length*i:sequence_length*(i+1)])
list_hindi_mfcc = np.array(list_hindi_mfcc)
hindi_labels = np.full((num_hindi_sequence, 1000, 3), np.array([0, 1, 0]))
list_mandarin_mfcc = []
num_mandarin_sequence = int(np.floor(len(mandarin_mfcc)/sequence_length))
for i in range(num_mandarin_sequence):
list_mandarin_mfcc.append(mandarin_mfcc[sequence_length*i:sequence_length*(i+1)])
list_mandarin_mfcc = np.array(list_mandarin_mfcc)
mandarin_labels = np.full((num_mandarin_sequence, 1000, 3), np.array([0, 0, 1]))
del english_mfcc
del hindi_mfcc
del mandarin_mfcc
total_sequence_length = num_english_sequence + num_hindi_sequence + num_mandarin_sequence
Y_train = np.vstack((english_labels, hindi_labels))
Y_train = np.vstack((Y_train, mandarin_labels))
X_train = np.vstack((list_english_mfcc, list_hindi_mfcc))
X_train = np.vstack((X_train, list_mandarin_mfcc))
del list_english_mfcc
del list_hindi_mfcc
del list_mandarin_mfcc
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.2)
with h5py.File("mfcc_dataset.hdf5", 'w') as hf:
hf.create_dataset('X_train', data=X_train)
hf.create_dataset('Y_train', data=Y_train)
hf.create_dataset('X_val', data=X_val)
hf.create_dataset('Y_val', data=Y_val)
# ---------------------------------------------------------------
# --------------------------BLOCK 2-------------------------------------
# Load MFCC Dataset created by the code in the previous steps
with h5py.File("mfcc_dataset.hdf5", 'r') as hf:
X_train = hf['X_train'][:]
Y_train = hf['Y_train'][:]
X_val = hf['X_val'][:]
Y_val = hf['Y_val'][:]
# ---------------------------------------------------------------
# ---------------------------BLOCK 3------------------------------------
# Setting up the model for training
DROPOUT = 0.3
RECURRENT_DROP_OUT = 0.2
optimizer = optimizers.Adam(decay=1e-4)
main_input = Input(shape=(sequence_length, 64), name='main_input')
# ### main_input = Input(shape=(None, 64), name='main_input')
# ### pred_gru = GRU(4, return_sequences=True, name='pred_gru')(main_input)
# ### rnn_output = Dense(3, activation='softmax', name='rnn_output')(pred_gru)
layer1 = CuDNNLSTM(64, return_sequences=True, name='layer1')(main_input)
layer2 = CuDNNLSTM(32, return_sequences=True, name='layer2')(layer1)
layer3 = Dense(100, activation='tanh', name='layer3')(layer2)
rnn_output = Dense(3, activation='softmax', name='rnn_output')(layer3)
model = Model(inputs=main_input, outputs=rnn_output)
print('\nCompiling model...')
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['acc'])
model.summary()
history = model.fit(X_train, Y_train, batch_size=32, epochs=75, validation_data=(X_val, Y_val), shuffle=True, verbose=1)
model.save('sld.hdf5')
# ---------------------------------------------------------------
# --------------------------BLOCK 4-------------------------------------
# Inference Mode Setup
streaming_input = Input(name='streaming_input', batch_shape=(1, 1, 64))
pred_layer1 = CuDNNLSTM(64, return_sequences=True, name='layer1', stateful=True)(streaming_input)
pred_layer2 = CuDNNLSTM(32, return_sequences=True, name='layer2')(pred_layer1)
pred_layer3 = Dense(100, activation='tanh', name='layer3')(pred_layer2)
pred_output = Dense(3, activation='softmax', name='rnn_output')(pred_layer3)
streaming_model = Model(inputs=streaming_input, outputs=pred_output)
streaming_model.load_weights('sld.hdf5')
# streaming_model.summary()
# ---------------------------------------------------------------
# ---------------------------BLOCK 5------------------------------------
# Language Prediction for a random sequence from the validation data set
random_val_sample = np.random.randint(0, X_val.shape[0])
random_sequence_num = np.random.randint(0, len(X_val[random_val_sample]))
test_single = X_val[random_val_sample][random_sequence_num].reshape(1, 1, 64)
val_label = Y_val[random_val_sample][random_sequence_num]
true_label = language_name(np.argmax(val_label))
print("***********************")
print("True label is ", true_label)
single_test_pred_prob = streaming_model.predict(test_single)
pred_label = language_name(np.argmax(single_test_pred_prob))
print("Predicted label is ", pred_label)
print("***********************")
# ---------------------------------------------------------------
# ---------------------------BLOCK 6------------------------------------
## COMMENT/UNCOMMENT BELOW
# Prediction for all sequences in the validation set - Takes very long to run
print("Predicting labels for all sequences - (Will take a lot of time)")
list_pred_labels = []
for i in range(X_val.shape[0]):
for j in range(X_val.shape[1]):
test = X_val[i][j].reshape(1, 1, 64)
seq_predictions_prob = streaming_model.predict(test)
predicted_language_index = np.argmax(seq_predictions_prob)
list_pred_labels.append(predicted_language_index)
pred_english = list_pred_labels.count(0)
pred_hindi = list_pred_labels.count(1)
pred_mandarin = list_pred_labels.count(2)
print("Number of English labels = ", pred_english)
print("Number of Hindi labels = ", pred_hindi)
print("Number of Mandarin labels = ", pred_mandarin)
# ---------------------------------------------------------------
```
```Traceback (most recent call last):
File "C:\Users\SKYLAND-2\Documents\nipunmanral SLR\language_identification.py", line 79, in <module>
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.2)
File "C:\Users\SKYLAND-2\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\model_selection\_split.py", line 2417, in train_test_split
arrays = indexable(*arrays)
File "C:\Users\SKYLAND-2\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\validation.py", line 378, in indexable
check_consistent_length(*result)
File "C:\Users\SKYLAND-2\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\validation.py", line 332, in check_consistent_length
raise ValueError(
ValueError: Found input variables with inconsistent numbers of samples: [3, 0]```
hi, i am trying to run the code which belong to nipunmanral spoken language identification and i received this error. this is my first time learning machine learning, i am trying to learn spoken language identification which classify what type of language from an audio. i hope someone can share some tutorial or fix the error.

Transformation to multilabel classification

I'm trying to implement a neural network in Python (Keras) that will predict the probability of multiple outcomes. At the moment I have the following code, for simplicity I reduced the problem to 3 inputs and 3 outputs:
import keras as k
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_frame = pd.read_csv("123.csv")
input_names = ["Sex", "Age", "IQ"]
output_names = ["OUTPUT1", "OUTPUT2", "OUTPUT3"]
raw_input_data = data_frame[input_names]
raw_output_data = data_frame[output_names]
max_age = 100
encoders = {"Age": lambda age: [age/max_age],
"Sex": lambda gen: {"male": [0], "female": [1]}.get(gen),
"IQ": lambda iq_value: [iq_value],
"OUTPUT1": lambda output1_value: [output1_value],
"OUTPUT2": lambda output2_value: [output2_value],
"OUTPUT3": lambda output3_value: [output3_value]}
def dataframe_to_dict(df):
result = dict()
for column in df.columns:
values = data_frame[column].values
result[column] = values
return result
def make_supervised(df):
raw_input_data = data_frame[input_names]
raw_output_data = data_frame[output_names]
return {"inputs": dataframe_to_dict(raw_input_data),
"outputs": dataframe_to_dict(raw_output_data)}
def encode(data):
vectors = []
for data_name, data_values in data.items():
encoded = list(map(encoders[data_name], data_values))
vectors.append(encoded)
formatted = []
for vector_raw in list(zip(*vectors)):
vector = []
for element in vector_raw:
for e in element:
vector.append(e)
formatted.append(vector)
return formatted
supervised = make_supervised(data_frame)
encoded_inputs = np.array(encode(supervised["inputs"]))
encoded_outputs = np.array(encode(supervised["outputs"]))
train_x = encoded_inputs[:300]
train_y = encoded_outputs[:300]
test_x = encoded_inputs[300:]
test_y = encoded_outputs[300:]
model = k.Sequential()
model.add(k.layers.Dense(units=5, activation="relu"))
model.add(k.layers.Dense(units=1, activation="sigmoid"))
model.compile(loss="mse", optimizer="sgd", metrics=["accuracy"])
fit_results = model.fit(x=train_x, y=train_y, epochs=100, validation_split=0.2)
plt.title("Losses train/validation")
plt.plot(fit_results.history["loss"], label="Train")
plt.plot(fit_results.history["val_loss"], label="Validation")
plt.legend()
plt.show()
plt.title("Accuracies train/validation")
plt.plot(fit_results.history["accuracy"], label="Train")
plt.plot(fit_results.history["val_accuracy"], label="Validation")
plt.legend()
plt.show()
predicted_test = model.predict(test_x)
real_data = data_frame.iloc[300:][input_names+output_names]
real_data["POUTPUT1", "POUTPUT2", "POUTPUT3"] = predicted_test
print(real_data)
real_data.to_csv('C:/***/133.csv')
I need help implementing the output of probabilities for all 3 outcomes [POUTPUT1, POUTPUT2, POUTPUT3] (currently outputs only 1) and saving them in a table like this one:
You need to adapt input and output of your model, and change your sigmoid output activation for an activation that supports categories (softmax for example) Try something like this:
INPUT_DIM = 3
OUTPUT_DIM = 3
# first define your model
model = k.models.Sequential()
model.add(k.layers.Dense(8, activation='relu', input_dim = INPUT_DIM ))
model.add(k.layers.Dense(8, activation='relu'))
## you can add more layer if you want, to customize your model
model.add(k.layers.Dense(OUTPUT_DIM, activation='softmax'))
# then compile
model.compile(loss="mse", optimizer="sgd", metrics=["accuracy"])
# then fit
fit_results = model.fit(train_x, train_y, epochs=100, validation_split=0.2)
So, I tested your code with the changes I suggested, and the network seems to work.
Try this :
import keras as k
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_frame = pd.read_csv("123.csv")
input_names = ["Sex", "Age", "IQ"]
output_names = ["OUTPUT1", "OUTPUT2", "OUTPUT3"]
raw_input_data = data_frame[input_names]
raw_output_data = data_frame[output_names]
max_age = 100
encoders = {"Age": lambda age: [age/max_age],
"Sex": lambda gen: {"male": [0], "female": [1]}.get(gen),
"IQ": lambda iq_value: [iq_value],
"OUTPUT1": lambda output1_value: [output1_value],
"OUTPUT2": lambda output2_value: [output2_value],
"OUTPUT3": lambda output3_value: [output3_value]}
def dataframe_to_dict(df):
result = dict()
for column in df.columns:
values = data_frame[column].values
result[column] = values
return result
def make_supervised(df):
raw_input_data = data_frame[input_names]
raw_output_data = data_frame[output_names]
return {"inputs": dataframe_to_dict(raw_input_data),
"outputs": dataframe_to_dict(raw_output_data)}
def encode(data):
vectors = []
for data_name, data_values in data.items():
encoded = list(map(encoders[data_name], data_values))
vectors.append(encoded)
formatted = []
for vector_raw in list(zip(*vectors)):
vector = []
for element in vector_raw:
for e in element:
vector.append(e)
formatted.append(vector)
return formatted
supervised = make_supervised(data_frame)
encoded_inputs = np.array(encode(supervised["inputs"]))
encoded_outputs = np.array(encode(supervised["outputs"]))
print(encoded_inputs)
print(encoded_outputs)
train_x = encoded_inputs[:-10]
train_y = encoded_outputs[:-10]
test_x = encoded_inputs[-10:] # I changed this to fit my fake data
test_y = encoded_outputs[-10:] # but you can keep your code.
INPUT_DIM = 3
OUTPUT_DIM = 3
# first define your model
model = k.models.Sequential()
model.add(k.layers.Dense(8, activation='relu', input_dim = INPUT_DIM ))
model.add(k.layers.Dense(8, activation='relu'))
model.add(k.layers.Dense(OUTPUT_DIM, activation='softmax'))
# then compile
model.compile(loss="mse", optimizer="sgd", metrics=["accuracy"])
# then fit
fit_results = model.fit(train_x, train_y, epochs=100, validation_split=0.2)
# plt.title("Losses train/validation")
# plt.plot(fit_results.history["loss"], label="Train")
# plt.plot(fit_results.history["val_loss"], label="Validation")
# plt.legend()
# plt.show()
# plt.title("Accuracies train/validation")
# plt.plot(fit_results.history["accuracy"], label="Train")
# plt.plot(fit_results.history["val_accuracy"], label="Validation")
# plt.legend()
# plt.show()
predicted_test = model.predict(test_x)
print(predicted_test[0])
Then, when i print predicted_test[0], it gives me the outputs :
[[0.9967424 0.00114053 0.00211706]]
After that, I don't know exactly what you want to do with the dataframe, but I would try something like :
real_data = data_frame.iloc[-2:][input_names+output_names]
real_data.reset_index(inplace=True)
real_data["POUTPUT1"] = predicted_test[:,0]
real_data["POUTPUT2"] = predicted_test[:,1]
real_data["POUTPUT3"] = predicted_test[:,2]
print(real_data)
# then save it
real_data.to_csv(...)
3rd edit to solve your problem, I think it's ok now, the initial question is solve.
You should close this topic and open a new one if you encounter a new issue.

Explaining my deep learning model with LIME text explainer in python on Twitter sentiment analysis

I have a dataset of Tweets labelled with sentiments. I have pre-processed the data and done parts of speech tagging (all via NLTK in python). After preprocessing the data looks like this:
Pre-processed tweets
After preprocessing training data is prepared with the following code:
full_text = list(train['content'].values) + list(test['content'].values)
tokenizer = Tokenizer(num_words=20000,lower = True, filters = '')
tokenizer.fit_on_texts(full_text)
train_tokenized = tokenizer.texts_to_sequences(train['content'])
test_tokenized = tokenizer.texts_to_sequences(test['content'])
max_len = 50
X_train = pad_sequences(train_tokenized, maxlen = max_len)
X_test = pad_sequences(test_tokenized, maxlen = max_len)
embed_size = 300
max_features = 20000
def get_coefs(word,*arr):
return word, np.asarray(arr, dtype='float32')
def get_embed_mat(embedding_path):
embedding_index = dict(get_coefs(*o.strip().split(" ")) for o in open(embedding_path,encoding="utf8"))
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
print(nb_words)
embedding_matrix = np.zeros((nb_words + 1, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
A deep learning model is built with Word embeddings as a layer. The code for model building is given below:
def build_model1(lr = 0.0, lr_d = 0.0, units = 0, dr = 0.0):
inp = Input(shape = (max_len,))
x = Embedding(20001, embed_size, weights = [embedding_matrix], trainable = False)(inp)
x1 = SpatialDropout1D(dr)(x)
x_lstm = Bidirectional(LSTM(units, return_sequences = True))(x1)
x1 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_lstm)
avg_pool1_lstm1 = GlobalAveragePooling1D()(x1)
max_pool1_lstm1 = GlobalMaxPooling1D()(x1)
x_lstm = Bidirectional(LSTM(units, return_sequences = True))(x1)
x1 = Conv1D(32, kernel_size=2, padding='valid', kernel_initializer='he_uniform')(x_lstm)
avg_pool1_lstm = GlobalAveragePooling1D()(x1)
max_pool1_lstm = GlobalMaxPooling1D()(x1)
x = concatenate([avg_pool1_lstm1, max_pool1_lstm1,
avg_pool1_lstm, max_pool1_lstm])
#x = BatchNormalization()(x)
x = Dropout(0.1)(Dense(128,activation='relu') (x))
x = BatchNormalization()(x)
x = Dropout(0.1)(Dense(64,activation='relu') (x))
x = Dense(8, activation = "sigmoid")(x)
model = Model(inputs = inp, outputs = x)
model.compile(loss = "binary_crossentropy", optimizer = Adam(lr = lr, decay = lr_d), metrics = ["accuracy"])
history = model.fit(X_train, y_one_hot, batch_size = 128, epochs = 20, validation_split=0.1,
verbose = 1, callbacks = [check_point, early_stop])
model = load_model(file_path)
return model
I want to use LIME to explain the predictions of this model (as given in the below image). But it is not working.
Lime Text explanation of Model

ValueError: Output of generator should be a tuple `(x, y, sample_weight)` or `(x, y)`. Found: <keras...>

when I training vgg_face model on Keras, I used data generator, but got this issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: <keras_preprocessing.image.NumpyArrayIterator object at 0x7f03e83a75f8>
I have tried the previous methods about a similar issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None, but without working.
def process_line(line):
path = '/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full'
label_ages = np.asarray([line[1:3]])
label_genders = np.array([line[4:7]])
data = Image.open(path + line)
arr = np.asarray(data, dtype="float32")
arr = cv2.resize(arr, (224, 224))
# return (arr,label_ages)
if label_ages and label_genders != None:
return (arr, label_ages)
def generate_arrays_from_file(data, batch_size, datagen):
# np_utils.to_categorical onehot
while True:
f = data
cnt = 0
X_Y = []
X = []
Y_age = []
Y_gender = []
for line in f:
# x,y_age,y_gender=process_line(line.strip('\n'))
x, y_age = process_line(line.strip('\n'))[0], process_line(line.strip('\n'))[1]
X.append(x)
# X_Y.append(x_y)
Y_age.append(y_age)
# if int(y_gender)==111:
# label=np.array([1,0])
# Y_gender.append(label)
# if int(y_gender)==112:
# label = np.array([0, 1])
# Y_gender.append(label)
cnt += 1
if cnt == batch_size:
cnt = 0
datagen.fit(X)
print(np.asarray(X).shape, np.asarray(Y_age).shape)
yield datagen.flow(np.array(X), np.array(Y_age), batch_size=batch_size)
# yield np.asarray(X), np.asarray(Y_age)
X = []
X_Y = []
Y_age = []
Y_gender = []
# f.close()
def model(epochs, lr, batch_size):
content = open('/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full/AFAD-Full.txt').readlines()
random.shuffle(content)
num = int(len(content) * 0.8)
train_data = content[:num]
test_data = content[num:]
# Convolution Features
vgg_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3),
pooling='max') # pooling: None, avg or max
# custom parameters
last_layer = vgg_model.get_layer('avg_pool').output
x = Flatten(name='flatten')(last_layer)
out_age = Dense(units=1000, activation='relu', name='regression', kernel_regularizer=regularizers.l2(0.01))(x)
out_age1 = Dense(units=500, activation='relu', name='regression1', kernel_regularizer=regularizers.l2(0.01))(
out_age)
out_age2 = Dense(units=100, name='regression2', kernel_regularizer=regularizers.l2(0.01))(out_age1)
out_age3 = Dense(units=1, name='regression3', kernel_regularizer=regularizers.l2(0.01))(out_age2)
out_gender = Dense(units=2, activation='softmax', name='classifier1')(x)
# custom_vgg_model = Model(vgg_model.input, outputs=[out_age3, out_gender])
custom_vgg_model = Model(vgg_model.input, outputs=out_age3)
# Create the model
model = custom_vgg_model
sgd = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# model.compile(optimizer=sgd, loss=["mean_squared_error", "categorical_crossentropy"],
# metrics=['accuracy'])
model.compile(optimizer=sgd, loss=["mean_squared_error"],
metrics=['accuracy'])
logging.debug("Model summary...")
model.count_params()
model.summary()
class Schedule:
def __init__(self, nb_epochs, initial_lr):
self.epochs = nb_epochs
self.initial_lr = initial_lr
def __call__(self, epoch_idx):
if epoch_idx < self.epochs * 0.25:
return self.initial_lr
elif epoch_idx < self.epochs * 0.50:
return self.initial_lr * 0.2
elif epoch_idx < self.epochs * 0.75:
return self.initial_lr * 0.04
return self.initial_lr * 0.008
callbacks = [LearningRateScheduler(schedule=Schedule(epochs, lr)),
ModelCheckpoint("/home/apptech/pixeleye_test/apps/arup/result/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="auto")
]
logging.debug("Running training...")
datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
preprocessing_function=get_random_eraser(v_l=0, v_h=255))
# training_generator = MixupGenerator(X_train, [y_train_a, y_train_g], batch_size=32, alpha=0.2,
# datagen=datagen)()
hist = model.fit_generator(generator=generate_arrays_from_file(train_data, batch_size, datagen),
steps_per_epoch=len(train_data) // batch_size,
validation_data=generate_arrays_from_file(test_data, batch_size, datagen),
validation_steps=len(test_data) // batch_size,
epochs=epochs, verbose=1,
callbacks=callbacks)
fixed.
datagen.flow() returns a generator. To obtain the batch, use:
X_batch, y_batch = datagen.flow(X_train, y_train, batch_size=9).next()

Categories