Tensorflow model.fit() using a Dataset generator - python

I am using the Dataset API to generate training data and sort it into batches for a NN.
Here is a minimum working example of my code:
import tensorflow as tf
import numpy as np
import random
def my_generator():
while True:
x = np.random.rand(4, 20)
y = random.randint(0, 11)
label = tf.one_hot(y, depth=12)
yield x.reshape(4, 20, 1), label
def my_input_fn():
dataset = tf.data.Dataset.from_generator(lambda: my_generator(),
output_types=(tf.float64, tf.int32))
dataset = dataset.batch(32)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
if __name__ == "__main__":
tf.enable_eager_execution()
model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(4, 20, 1)),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
data_generator = my_input_fn()
model.fit(data_generator)
The code fails using TensorFlow 1.13.1 at the model.fit() call with the following error:
Traceback (most recent call last):
File "scripts/min_working_example.py", line 37, in <module>
model.fit(data_generator)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 880, in fit
validation_steps=validation_steps)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_arrays.py", line 310, in model_iteration
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/utils/generic_utils.py", line 526, in slice_arrays
return [None if x is None else x[start] for x in arrays]
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/utils/generic_utils.py", line 526, in <listcomp>
return [None if x is None else x[start] for x in arrays]
File "~/.local/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 654, in _slice_helper
name=name)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 820, in strided_slice
shrink_axis_mask=shrink_axis_mask)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 9334, in strided_slice
_six.raise_from(_core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: Attr shrink_axis_mask has value 4294967295 out of range for an int32 [Op:StridedSlice] name: strided_slice/
I tried running the same code on a different machine using TensorFlow 2.0 (after removing the line tf.enable_eager_execution() because it runs eagerly by default) and I got the following error:
Traceback (most recent call last):
File "scripts/min_working_example.py", line 37, in <module>
model.fit(data_generator)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py", line 873, in fit
steps_name='steps_per_epoch')
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_arrays.py", line 352, in model_iteration
batch_outs = f(ins_batch)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/backend.py", line 3217, in __call__
outputs = self._graph_fn(*converted_inputs)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 558, in __call__
return self._call_flat(args)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 627, in _call_flat
outputs = self._inference_function.call(ctx, args)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/eager/function.py", line 397, in call
(len(args), len(list(self.signature.input_arg))))
ValueError: Arguments and signature arguments do not match: 21 23
I tried changing model.fit() to model.fit_generator() but this fails on both TensorFlow versions too. On TF 1.13.1 I get the following error:
Traceback (most recent call last):
File "scripts/min_working_example.py", line 37, in <module>
model.fit_generator(data_generator)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/training.py", line 1426, in fit_generator
initial_epoch=initial_epoch)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_generator.py", line 115, in model_iteration
shuffle=shuffle)
File "~/.local/lib/python3.6/site-packages/tensorflow/python/keras/engine/training_generator.py", line 377, in convert_to_generator_like
num_samples = int(nest.flatten(data)[0].shape[0])
TypeError: __int__ returned non-int (type NoneType)
and on TF 2.0 I get the following error:
Traceback (most recent call last):
File "scripts/min_working_example.py", line 37, in <module>
model.fit_generator(data_generator)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py", line 1515, in fit_generator
steps_name='steps_per_epoch')
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_generator.py", line 140, in model_iteration
shuffle=shuffle)
File "~/.local/lib/python3.7/site-packages/tensorflow/python/keras/engine/training_generator.py", line 477, in convert_to_generator_like
raise ValueError('You must specify `batch_size`')
ValueError: You must specify `batch_size`
yet batch_size is not a recognized keyword for fit_generator().
I am puzzled by these error messages and I would appreciate if anyone can shed some light on them, or point out what I am doing wrong.

While the origin of the errors is still nebulous, I have found a solution that makes the code work. I'll post it here in case it is useful to anyone in a similar situation.
Basically, I changed the my_input_fn() into a generator and used model.fit_generator() as follows:
import tensorflow as tf
import numpy as np
import random
def my_generator(total_items):
i = 0
while i < total_items:
x = np.random.rand(4, 20)
y = random.randint(0, 11)
label = tf.one_hot(y, depth=12)
yield x.reshape(4, 20, 1), label
i += 1
def my_input_fn(total_items, epochs):
dataset = tf.data.Dataset.from_generator(lambda: my_generator(total_items),
output_types=(tf.float64, tf.int64))
dataset = dataset.repeat(epochs)
dataset = dataset.batch(32)
iterator = dataset.make_one_shot_iterator()
while True:
batch_features, batch_labels = iterator.get_next()
yield batch_features, batch_labels
if __name__ == "__main__":
tf.enable_eager_execution()
model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(4, 20, 1)),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
total_items = 200
batch_size = 32
epochs = 10
num_batches = int(total_items/batch_size)
train_data_generator = my_input_fn(total_items, epochs)
model.fit_generator(generator=train_data_generator, steps_per_epoch=num_batches, epochs=epochs, verbose=1)
EDIT
As implied by giser_yugang in a comment, it is also possible to do it with my_input_fn() as a function returning the dataset instead of the individual batches.
def my_input_fn(total_items, epochs):
dataset = tf.data.Dataset.from_generator(lambda: my_generator(total_items),
output_types=(tf.float64, tf.int64))
dataset = dataset.repeat(epochs)
dataset = dataset.batch(32)
return dataset
if __name__ == "__main__":
tf.enable_eager_execution()
model = tf.keras.Sequential([tf.keras.layers.Flatten(input_shape=(4, 20, 1)),
tf.keras.layers.Dense(64, activation=tf.nn.relu),
tf.keras.layers.Dense(12, activation=tf.nn.softmax)])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
total_items = 100
batch_size = 32
epochs = 10
num_batches = int(total_items/batch_size)
dataset = my_input_fn(total_items, epochs)
model.fit_generator(dataset, epochs=epochs, steps_per_epoch=num_batches)
There does not appear to be any average performance difference between the approaches.

Related

Problem completing BERT model for sentiment classification

I am trying to figure out sentiment classification on movie reviews using BERT, transformers and tensorflow. This is the code I currently have:
def read_dataset(filename, model_name="bert-base-uncased"):
"""Reads a dataset from the specified path and returns sentences and labels"""
tokenizer = BertTokenizer.from_pretrained(model_name)
with open(filename, "r", encoding="utf-8") as f:
lines = f.readlines()
# preallocate memory for the data
sents, labels = list(), np.empty((len(lines), 1), dtype=int)
for i, line in enumerate(lines):
text, str_label, _ = line.split("\t")
labels[i] = int(str_label.split("=")[1] == "POS")
sents.append(text)
return dict(tokenizer(sents, padding=True, truncation=True, return_tensors="tf")), labels
class BertMLP(tf.keras.Model):
def __init__(self, embed_batch_size=100, model_name="bert-base-cased"):
super(BertMLP, self).__init__()
self.bs = embed_batch_size
self.model = TFBertModel.from_pretrained(model_name)
self.classification_head = tf.keras.models.Sequential(
layers = [
tf.keras.Input(shape=(self.model.config.hidden_size,)),
tf.keras.layers.Dense(350, activation="tanh"),
tf.keras.layers.Dense(200, activation="tanh"),
tf.keras.layers.Dense(50, activation="tanh"),
tf.keras.layers.Dense(1, activation="sigmoid", use_bias=False)
]
)
def call(self, inputs):
outputs = self.model(inputs)
return outputs
def evaluate(model, inputs, labels, loss_func):
mean_loss = tf.keras.metrics.Mean(name="train_loss")
accuracy = tf.keras.metrics.BinaryAccuracy(name="train_accuracy")
predictions = model(inputs)
mean_loss(loss_func(labels, predictions))
accuracy(labels, predictions)
return mean_loss.result(), accuracy.result() * 100
if __name__ == "__main__":
train = read_dataset("datasets/rt-polarity.train.vecs")
dev = read_dataset("datasets/rt-polarity.dev.vecs")
test = read_dataset("datasets/rt-polarity.test.vecs")
mlp = BertMLP()
mlp.compile(tf.keras.optimizers.SGD(learning_rate=0.01), loss='mse')
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
print("Before training:", f"Dev Loss: {dev_loss}, Dev Acc: {dev_acc}")
mlp.fit(*train, epochs=10, batch_size=10)
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
print("After training:", f"Dev Loss: {dev_loss}, Dev Acc: {dev_acc}")
However, when I run this code, I get an error:
Traceback (most recent call last):
File "C:\Users\home\anaconda3\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\home\downloads\mlp.py", line 60, in <module>
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
File "c:\users\home\downloads\mlp.py", line 46, in evaluate
predictions = model(inputs)
File "C:\Users\home\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "c:\users\home\downloads\mlp.py", line 39, in call
outputs = self.model(inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py", line 409, in run_call_with_unpacked_inputs
return func(self, **unpacked_inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 1108, in call
outputs = self.bert(
File "C:\Users\home\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py", line 409, in run_call_with_unpacked_inputs
return func(self, **unpacked_inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 781, in call
embedding_output = self.embeddings(
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 203, in call
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
InvalidArgumentError: Exception encountered when calling layer "embeddings" (type TFBertEmbeddings).
indices[1174,8] = 29550 is not in [0, 28996) [Op:ResourceGather]
Call arguments received:
• input_ids=tf.Tensor(shape=(1599, 73), dtype=int32)
• position_ids=None
• token_type_ids=tf.Tensor(shape=(1599, 73), dtype=int32)
• inputs_embeds=None
• past_key_values_length=0
• training=False
I googled for a while, and I can't find anything conclusive. I am pretty sure it has something to do with this part:
def call(self, inputs):
outputs = self.model(inputs)
return outputs
But again, I have tried a lot of different things, including limiting dataset size and installing different versions of transformers and tensorflow, but to no avail. Please let me know what I'm doing wrong. Thank you!
OP was using bert-base-cased for their model, and bert-base-uncased for their tokenizer, causing issues during training when the vocab size of the model and the tokenized data differed.

Trying to load a saved Tensorflow ELMO model but get "TypeError: 'str' object is not callable" when loading

I'm trying to load a saved Tensorflow ELMO model in a different function than I trained it, because I want to do multiple predictions with the model without having to train it every time. My (simplified) code is as follows:
(builder.py)
from word_classifier import train_word_classifier, predict_labels
def builder(lines):
train_word_classifier()
for lst in lines:
print('PRED_LABELS: ', predict_labels(lst))
(word_classifier.py)
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.python.keras import backend as K
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential, model_from_json
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, add, Input
def train_word_classifier():
"""
Input data preparation excluded for readability
"""
sess = tf.compat.v1.Session()
K.set_session(sess)
elmo_model = hub.Module("https://tfhub.dev/google/elmo/3", trainable=True)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_text = Input(shape=(MAX_LEN,), dtype=tf.string)
def elmo_embedding(inData):
return \
elmo_model(inputs={"tokens": tf.squeeze(tf.cast(inData, tf.string)),
"sequence_len": tf.constant(BATCH_SIZE * [MAX_LEN])},
signature="tokens", as_dict=True)["elmo"]
embedding = Lambda(lambda text, : elmo_embedding(text), output_shape=(MAX_LEN, 1024))(input_text, )
x = Bidirectional(LSTM(units=LSTM_UNITS, return_sequences=LSTM_RETURN_SEQ,
recurrent_dropout=LSTM_RO_DROPOUT, dropout=LSTM_DROPOUT))(embedding)
x_rnn = Bidirectional(LSTM(units=LSTM_UNITS, return_sequences=LSTM_RETURN_SEQ,
recurrent_dropout=LSTM_RO_DROPOUT, dropout=LSTM_DROPOUT))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
out = TimeDistributed(Dense(n_tags, activation="softmax"))(x)
model = Model(input_text, out)
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
line_count_training_data = count_lines_in_file(CLASSIFIER_SENTENCE_FILE, 10)
size_train, size_test = get_count_for_batch_train_test_data(line_count_training_data)
print(size_train)
mode_dict = {
"train": size_train,
"test": size_test,
}
x_tr, x_val = x_tr[:mode_dict["train"] * BATCH_SIZE], x_tr[-mode_dict["test"] * BATCH_SIZE:]
y_tr, y_val = y_tr[:mode_dict["train"] * BATCH_SIZE], y_tr[-mode_dict["test"] * BATCH_SIZE:]
y_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)
y_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)
history = model.fit(np.array(x_tr),
y_tr,
validation_data=(np.array(x_val), y_val),
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
verbose=VERBOSE_VALUE)
model_json = model.to_json()
with open("resources/SavedModel/word_classifier/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("resources/SavedModel/word_classifier/model.h5")
def predict_labels(input_data_list):
with open('resources/SavedModel/word_classifier/model.json', 'r') as json_file:
loaded_model_json = json_file.read()
def elmo_embedding(inData):
return \
elmo_model(inputs={"tokens": tf.squeeze(tf.cast(inData, tf.string)),
"sequence_len": tf.constant(BATCH_SIZE * [MAX_LEN])},
signature="tokens", as_dict=True)["elmo"]
loaded_model = tf.keras.models.model_from_json(loaded_model_json, custom_objects={'elmo_embedding': elmo_embedding})
# load weights into new model
loaded_model.load_weights("resources/SavedModel/word_classifier/model.h5")
print("Loaded model from disk")
In the end, after training the model, this gives me the error "TypeError: 'str' object is not callable" with the following traceback:
Traceback (most recent call last):
File "usc_coordinator.py", line 62, in <module>
run_usc_coordinator(fIn, fOut, mode)
File "usc_coordinator.py", line 32, in run_usc_coordinator
user_story_builder(fast_mode, file_in)
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/builder.py", line 45, in builder
print('PRED_LABELS: ', predict_labels(lst))
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/word_classifier.py", line 161, in predict_labels
loaded_model = tf.keras.models.model_from_json(loaded_model_json, custom_objects={'elmo_embedding': elmo_embedding})
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/model_config.py", line 122, in model_from_json
return deserialize(config, custom_objects=custom_objects)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/serialization.py", line 171, in deserialize
return generic_utils.deserialize_keras_object(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/utils/generic_utils.py", line 354, in deserialize_keras_object
return cls.from_config(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 616, in from_config
input_tensors, output_tensors, created_layers = reconstruct_from_config(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 1214, in reconstruct_from_config
process_node(layer, node_data)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 1162, in process_node
output_tensors = layer(input_tensors, **kwargs)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer_v1.py", line 776, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/core.py", line 903, in call
result = self.function(inputs, **kwargs)
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/word_classifier.py", line 101, in <lambda>
embedding = Lambda(lambda text, : elmo_embedding(text), output_shape=(MAX_LEN, 1024))(input_text, )
TypeError: 'str' object is not callable
My versions are:
Python 3.8.10
Keras 2.3.0
Tensorflow 2.3.1
Tensorflow-hub 0.10.0
My guess is that the error is caused by the variable input_text that is set as a dtype tf.string. However, I don't how what to do about that without breaking the training sequence.
I hope that somebody can help!
It is a bug in tensorflow v2.3.1:
Loading a model with a Lambda layer causes a 'str' object is not callable exception #46659
https://github.com/tensorflow/tensorflow/issues/46659
You should change the string to something else like if I have to calculate 2+2, this would be the wrong code:
x = "2"
print(x + 2)
This shows:
Traceback (most recent call):
File "main.py", line 2, in <module>
print(x + 2)
TypeError: can only concatenate str (not "int") to str
So what do we do?
Convert it to something else
x = "2"
y = int(x)
print(y + 2)
Output: 4
int = integer,
float = float.
Edit: You can not convert some things to something else, like
x = "This is an example"
y = int(x)
This would show:
Traceback (most recent call last):
File "main.py", line 2, in <module>
y = int(x)
ValueError: invalid literal for int() with base10: 'This is an example'
#For a string to array
x = "1-2"
y = x.split("-")
print(y)
y would be ["1", "2"]
Or if you do not know short forms you can use full forms
Line #32,
'str' is not callable
fast_mode and file_In would be strings
And same for other lines

Keras CNN Classifier

I do have a question regarding the CNN in Keras if you would like to help me I would really appreciate this.
Disclaimer: I'm a noob in CNN and Keras, I'm just learning them right now.
My Data:
2 Classes (dogs and cats)
Traing: 30 pics each category
Test: 14 pics each category
Valid: 30 pics each category
My code:
data_path = Path("../data")
train_path = data_path / "train"
test_path = data_path / "test"
valid_path = data_path / "valid"
train_batch = ImageDataGenerator().flow_from_directory(directory=train_path,
target_size=(200, 200),
classes=animals,
batch_size=10)
valid_batch = ImageDataGenerator().flow_from_directory(directory=valid_path,
target_size=(200, 200),
classes=animals,
batch_size=10)
test_path = ImageDataGenerator().flow_from_directory(directory=test_path,
target_size=(200, 200),
classes=animals,
batch_size=4)
imgs, labels = next(train_batch)
model = Sequential(
[Conv2D(32, (3, 3), activation="relu", input_shape=(200, 200, 3)), Flatten(),
Dense(len(animals), activation='softmax')])
model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
Here it's my error message:
I've replaced the paths with ""
Traceback (most recent call last):
File "", line 191, in <module>
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
File "y", line 91, in wrapper
return func(*args, **kwargs)
File "", line 1732, in fit_generator
initial_epoch=initial_epoch)
File "", line 185, in fit_generator
generator_output = next(output_generator)
File "", line 742, in get
six.reraise(*sys.exc_info())
File "", line 693, in reraise
raise value
File "", line 711, in get
inputs = future.get(timeout=30)
File "", line 657, in get
raise self._value
File "", line 121, in worker
result = (True, func(*args, **kwds))
File "", line 650, in next_sample
return six.next(_SHARED_SEQUENCES[uid])
TypeError: 'PosixPath' object is not an iterator
Could anyone explain to me what I'm doing wrong please? Also if this is an off-topic question just let me know where I can ask it.
The issue you are having is that you are NOT passing the generator for the training, but the path for the files (you are using train_path instead of train_batch.
Whereas you need to pass a generator for object when using .fit_generator():
model.fit_generator(train_batch, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
This line isn't necessary
imgs, labels = next(train_batch)
from the docs fit_generator first argument is a generator object no a string as you have supplied. Like this
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)

How to fix "AttributeError: 'str' object has no attribute '__array_interface__'" while doing Image classification using python

I am doing Image Classification using Keras and while training the model I am getting an error which says that "AttributeError: 'str' object has no attribute 'array_interface'" and it is being generated from Image.py file of PIL and because of which my model is not getting started for training.
I have already tried to do some changes in Image.py file and in my code file but, the error still exists. The code file was working earlier but now it is throwing me an "AttributeError".
img_width, img_height = 150, 150
train_dir = r'D:\DataSets\Cats_Dogs\train'
validation_dir = r'D:\DataSets\Cats_Dogs\test'
nb_train_samples = 5000
nb_validation_samples = 1000
epochs = 50
batch_size = 20
train_datagen = ImageDataGenerator(rescale=1./255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True,
vertical_flip=True, rotation_range=0.2)
validation_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(img_width, img_height)
, batch_size=batch_size, class_mode='binary')
validation_generator = validation_datagen.flow_from_directory(validation_dir, target_size=(img_width, img_height)
, batch_size=batch_size, class_mode='binary')
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=input_shape, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.summary()
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(train_generator, steps_per_epoch= nb_train_samples // batch_size, epochs=epochs,validation_data=validation_generator, validation_steps= nb_validation_samples // batch_size)
Use tf.cast instead.
Epoch 1/50
Traceback (most recent call last):
File "C:/Users/user/PycharmProjects/ImageClassification/BinaryClass.py", line 56, in <module>
validation_data=validation_generator, validation_steps= nb_validation_samples // batch_size)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\engine\training.py", line 1658, in fit_generator
initial_epoch=initial_epoch)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\engine\training_generator.py", line 181, in fit_generator
generator_output = next(output_generator)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\utils\data_utils.py", line 616, in get
six.reraise(*sys.exc_info())
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\six.py", line 693, in reraise
raise value
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\utils\data_utils.py", line 603, in get
inputs = future.get(timeout=30)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py", line 657, in get
raise self._value
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\multiprocessing\pool.py", line 121, in worker
result = (True, func(*args, **kwds))
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras\utils\data_utils.py", line 406, in get_index
return _SHARED_SEQUENCES[uid][i]
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras_preprocessing\image\iterator.py", line 65, in __getitem__
return self._get_batches_of_transformed_samples(index_array)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras_preprocessing\image\iterator.py", line 230, in _get_batches_of_transformed_samples
interpolation=self.interpolation)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\keras_preprocessing\image\utils.py", line 110, in load_img
img = pil_image.fromarray(path)
File "C:\Users\user\AppData\Local\Continuum\anaconda3\lib\site-packages\PIL\Image.py", line 2643, in fromarray
arr = obj.__array_interface__
AttributeError: 'str' object has no attribute '__array_interface__'
Process finished with exit code 1
In the code, I am giving the path of my training and testing dataset and at the time of making a model fit on training and validation dataset, it is giving me an error.

Error when using the class_weight parameter in the fit function in Keras

I wanted to test my network on a toy data set - a few examples with two imbalanced classes (0 and 1). Unfortunately, there are problems when using the class_weight parameter to improve the balance. It looks like I forget something.
import tensorflow as tf
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.applications.xception import Xception, preprocess_input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
# parsing images from TFRecords
def parse_function(proto):
example = {'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)}
parsed_example = tf.parse_single_example(proto, example)
image = tf.decode_raw(parsed_example['image_raw'], tf.uint8)
image = tf.reshape(image, [HEIGHT, WIDTH, DEPTH])
image = preprocess_input(tf.cast(image, tf.float32))
return image, parsed_example['label']
def get_data(filepath, schuffle_size=32, batch_size=8, prefetch=1, repeat=None, num_parallel_calls=1):
dataset = tf.data.TFRecordDataset(filepath)
if schuffle_size != 0:
dataset = dataset.shuffle(schuffle_size)
dataset = dataset.repeat(repeat)
dataset = dataset.map(parse_function, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch)
iterator = dataset.make_one_shot_iterator()
return iterator
def build_model(number_of_neurons_in_dense_layer, dropout, learning_rate):
base_model = Xception(weights='imagenet', include_top=False, pooling='avg', input_shape=(HEIGHT, WIDTH, 3))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = Dropout(dropout)(x)
x = Dense(number_of_neurons_in_dense_layer, activation='relu')(x)
x = Dropout(dropout)(x)
logits = Dense(NUMBER_OF_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=logits)
model.compile(optimizer=Adam(lr=learning_rate), loss='sparse_categorical_crossentropy', metrics=['categorical_accuracy'])
return model
global NUMBER_OF_CLASSES, HEIGHT, WIDTH, DEPTH
NUMBER_OF_CLASSES = 2
...
CLASS_WEIGHTS = {
0: 1,
1: 7
}
model = build_model(64, 0.4, 0.001)
train = get_data(..., 8, 2, num_parallel_calls=8)
val = get_data(...., 0, 4, num_parallel_calls=8)
model.fit(train, validation_data=val, epochs=3,steps_per_epoch=8//2,
validation_steps=8//4, shuffle=False,
class_weight=CLASS_WEIGHTS)
I am getting the following errors
Original exception was:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 51, in _wrapfunc
return getattr(obj, method)(*args, **kwds)
AttributeError: 'Tensor' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/usr/model.py", line 147, in main
class_weight=CLASS_WEIGHTS)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 776, in fit
shuffle=shuffle)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2432, in _standardize_user_data
feed_sample_weight_modes)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2431, in <listcomp>
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training_utils.py", line 758, in standardize_weights
y_classes = np.reshape(y, y.shape[0])
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 279, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 61, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 41, in _wrapit
result = getattr(asarray(obj), method)(*args, **kwds)
TypeError: __index__ returned non-int (type NoneType)
Without the class_weight parameter, the fit function works correctly.
Just for a future reference:
I ran into this error to and was able to resolve it by passing an array instead of a dictionary.
e.g.
CLASS_WEIGHTS = np.array([1,7])
instead of:
CLASS_WEIGHTS = {
0: 1,
1: 7
}

Categories