training a multi-output keras model - python

I have 10,000 images, each of which are labeled with 20 tags. For each image, the tag is either true or false. I'm trying to train a multi-output model to perform all these 20 binary classifications with one network.
The network is a Residual Network. After the flatten layer, the network branches out into 20 branches. Each branch has 2 fully connected layers, each of which are followed by a drop out layer. And finally a dense layer with one node and sigmoid activation in the end.
The labels for each image and the image name are stored in a text file, for both train and validation set. Like this:
1.jpg 1 -1 1 -1 -1 1 -1.........
I wrote my own generator, but I can't get them to work. I keep getting this error:
Error when checking model target: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 20 array(s), but instead got the following list of 1 arrays.
Function explanations: get_input function reads an image and resizes it.
get_output prepares the labels for each image. The labels are stored in a list and returned in the end. preprocess_input performs preprocessing and converting images into arrays. train_generator and validation_generator generate batches with size 32 to be fed to the model.
Here's my code:
def get_input(img_name):
path = os.path.join("images", img_name)
img = image.load_img(path, target_size=(224, 224))
return img
def get_output(img_name, file_path):
data = pd.read_csv(file_path, delim_whitespace=True, header=None)
img_id = img_name.split(".")[0]
img_id = img_id.lstrip("0")
img_id = int(img_id)
labels = data.loc[img_id - 1].values
labels = labels[1:]
labels = list(labels)
label_arrays = []
for i in range(20):
val = np.zeros((1))
val[0] = labels[i]
label_arrays.append(val)
return label_arrays
def preprocess_input(img_name):
img = get_input(img_name)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
return x
def train_generator(batch_size):
file_path = "train.txt"
data = pd.read_csv(file_path, delim_whitespace=True, header=None)
while True:
for i in range(math.floor(8000/batch_size)):
x_batch = np.zeros(shape=(32, 224, 224, 3))
y_batch = np.zeros(shape=(32, 20))
for j in range(batch_size):
img_name = data.loc[i * batch_size + j].values
img_name = img_name[0]
x = preprocess_input(img_name)
y = get_output(img_name, file_path)
x_batch[j, :, :, :] = x
y_batch[j] = y
yield(x_batch, y_batch)
def val_generator(batch_size):
file_path = "val.txt"
data = pd.read_csv(file_path, delim_whitespace=True, header=None)
while True:
for i in range(math.floor(2000/batch_size)):
x_batch = np.zeros(shape=(32, 224, 224, 3))
y_batch = np.zeros(shape=(32, 20))
for j in range(batch_size):
img_name = data.loc[i * batch_size + j].values
img_name = img_name[0]
x = preprocess_input(img_name)
y = get_output(img_name, file_path)
x_batch[j, :, :, :] = x
y_batch[j] = y
yield(x_batch, y_batch)
Edit:
One quick question. What's the difference between this loop and the one in your answer:
ys = []
for i in range(batch_size):
ys.append(y_batch[i, :])
yield(x_batch, ys)

If your model has 20 outputs then you must provide a list of 20 arrays as target. One way of doing this is to modify the generator (for both training and validation):
ys = []
for i in range(20):
ys.append(y_batch[:,i])
yield(x_batch, ys)
As a side note, you mentioned that you have 20 tags per sample then why have you specified 40 in the input shape?
y_batch = np.zeros(shape=(32, 40))
Further, I don't know about the specific problem you are working on but alternatively you could only have one output of size 20 instead of 20 outputs with size one.

You can test the generator output dimensions initializing the generator and call the function next() to check the dimensions. For example with the train_generator:
train_gen = train_generator(batch_size)
x_batch, y_batch = next(train_gen)
Then check x_batch and y_batch dimensions and datatype
I would make the generator in this way:
def train_generator(batch_size):
file_path = "train.txt"
data = pd.read_csv(file_path, delim_whitespace=True, header=None)
# Initialize empty list
x_batch = []
y_batch = []
while True:
for i in range(math.floor(8000/batch_size)):
for j in range(batch_size):
img_name = data.loc[i * batch_size + j].values
img_name = img_name[0]
x = preprocess_input(img_name)
y = get_output(img_name, file_path)
x_batch.append(x)
y_batch.append(y)
yield(np.array(x_batch), np.array(y_batch))

Related

"Invalid argument: indices[0,0,0,0] = 30 is not in [0, 30)"

Error:
InvalidArgumentError: indices[0,0,0,0] = 30 is not in [0, 30)
[[{{node GatherV2}}]] [Op:IteratorGetNext]
History:
I have a custom data loader for a tf.keras based U-Net for semantic segmentation, based on this example. It is written as follows:
def parse_image(img_path: str) -> dict:
# read image
image = tf.io.read_file(img_path)
#image = tfio.experimental.image.decode_tiff(image)
if xf == "png":
image = tf.image.decode_png(image, channels = 3)
else:
image = tf.image.decode_jpeg(image, channels = 3)
image = tf.image.convert_image_dtype(image, tf.uint8)
#image = image[:, :, :-1]
# read mask
mask_path = tf.strings.regex_replace(img_path, "X", "y")
mask_path = tf.strings.regex_replace(mask_path, "X." + xf, "y." + yf)
mask = tf.io.read_file(mask_path)
#mask = tfio.experimental.image.decode_tiff(mask)
mask = tf.image.decode_png(mask, channels = 1)
#mask = mask[:, :, :-1]
mask = tf.where(mask == 255, np.dtype("uint8").type(NoDataValue), mask)
return {"image": image, "segmentation_mask": mask}
train_dataset = tf.data.Dataset.list_files(
dir_tls(myear = year, dset = "X") + "/*." + xf, seed = zeed)
train_dataset = train_dataset.map(parse_image)
val_dataset = tf.data.Dataset.list_files(
dir_tls(myear = year, dset = "X_val") + "/*." + xf, seed = zeed)
val_dataset = val_dataset.map(parse_image)
## data transformations--------------------------------------------------------
#tf.function
def normalise(input_image: tf.Tensor, input_mask: tf.Tensor) -> tuple:
input_image = tf.cast(input_image, tf.float32) / 255.0
return input_image, input_mask
#tf.function
def load_image_train(datapoint: dict) -> tuple:
input_image = tf.image.resize(datapoint["image"], (imgr, imgc))
input_mask = tf.image.resize(datapoint["segmentation_mask"], (imgr, imgc))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
input_image, input_mask = normalise(input_image, input_mask)
return input_image, input_mask
#tf.function
def load_image_test(datapoint: dict) -> tuple:
input_image = tf.image.resize(datapoint["image"], (imgr, imgc))
input_mask = tf.image.resize(datapoint["segmentation_mask"], (imgr, imgc))
input_image, input_mask = normalise(input_image, input_mask)
return input_image, input_mask
## create datasets-------------------------------------------------------------
buff_size = 1000
dataset = {"train": train_dataset, "val": val_dataset}
# -- Train Dataset --#
dataset["train"] = dataset["train"]\
.map(load_image_train, num_parallel_calls = tf.data.experimental.AUTOTUNE)
dataset["train"] = dataset["train"].shuffle(buffer_size = buff_size,
seed = zeed)
dataset["train"] = dataset["train"].repeat()
dataset["train"] = dataset["train"].batch(bs)
dataset["train"] = dataset["train"].prefetch(buffer_size = AUTOTUNE)
#-- Validation Dataset --#
dataset["val"] = dataset["val"].map(load_image_test)
dataset["val"] = dataset["val"].repeat()
dataset["val"] = dataset["val"].batch(bs)
dataset["val"] = dataset["val"].prefetch(buffer_size = AUTOTUNE)
print(dataset["train"])
print(dataset["val"])
Now I wanted to use a weighted version of tf.keras.losses.SparseCategoricalCrossentropy for my model and I found this tutorial, which is rather similar to the example above.
However, they also offered a weighted version of the loss, using:
def add_sample_weights(image, label):
# The weights for each class, with the constraint that:
# sum(class_weights) == 1.0
class_weights = tf.constant([2.0, 2.0, 1.0])
class_weights = class_weights/tf.reduce_sum(class_weights)
# Create an image of `sample_weights` by using the label at each pixel as an
# index into the `class weights` .
sample_weights = tf.gather(class_weights, indices=tf.cast(label, tf.int32))
return image, label, sample_weights
and
weighted_model.fit(
train_dataset.map(add_sample_weights),
epochs=1,
steps_per_epoch=10)
I combined those approaches since the latter tutorial uses previously loaded data, while I want to draw the images from disc (not enough RAM to load all at once).
Resulting in the code from the first example (long code block above) followed by
def add_sample_weights(image, segmentation_mask):
class_weights = tf.constant(inv_weights, dtype = tf.float32)
class_weights = class_weights/tf.reduce_sum(class_weights)
sample_weights = tf.gather(class_weights,
indices = tf.cast(segmentation_mask, tf.int32))
return image, segmentation_mask, sample_weights
(inv_weights are my weights, an array of 30 float64 values) and
model.fit(dataset["train"].map(add_sample_weights),
epochs = 45, steps_per_epoch = np.ceil(N_img/bs),
validation_data = dataset["val"],
validation_steps = np.ceil(N_val/bs),
callbacks = cllbs)
When I run
dataset["train"].map(add_sample_weights).element_spec
as in the second example, I get an output that looks reasonable to me (similar to the one in the example):
Out[58]:
(TensorSpec(shape=(None, 512, 512, 3), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 512, 512, 1), dtype=tf.float32, name=None),
TensorSpec(shape=(None, 512, 512, 1), dtype=tf.float32, name=None))
However, when I try to fit the model or run something like
a, b, c = dataset["train"].map(add_sample_weights).take(1)
I will receive the error mentioned above.
So far, I have found quite some questions regarding this error (e.g., a, b, c, d), however, they all talk of "embedding layers" and things I am not aware of using.
Where does this error come from and how can I solve it?
Picture tf.gather as a fancy way to do indexing. The error you get is akin to the following example in python:
>>> my_list = [1,2,3]
>>> my_list[3]
IndexError: list index out of range
If you want to use tf.gather, then the range of value of your indices should not be bigger than the dimension size of the Tensor you are willing to index.
In your case, in the call tf.gather(class_weights,indices = tf.cast(segmentation_mask, tf.int32)), with class_weights being a Tensor of dimension (30,), the range of values of segmentation_mask should be between 0 and 29. As far as I can tell from your data pipeline, segmentation_mask has a range of value between 0 and 255. The fix will be problem dependent.

How to split the data into training and testing data

Hi so right now I got data load code and I'm not sure how would i split it into training and testing data. can anyone give me suggestion how to do it this is my data load code.
def __init__(self, root, specific_folder, img_extension, preprocessing_method=None, crop_size=(96, 112),train = True):
"""
Dataloader of the LFW dataset.
root: path to the dataset to be used.
specific_folder: specific folder inside the same dataset.
img_extension: extension of the dataset images.
preprocessing_method: string with the name of the preprocessing method.
crop_size: retrieval network specific crop size.
"""
self.preprocessing_method = preprocessing_method
self.crop_size = crop_size
self.imgl_list = []
self.classes = []
self.people = []
self.model_align = None
self.arr = []
# read the file with the names and the number of images of each people in the dataset
with open(os.path.join(root, 'people.txt')) as f:
people = f.read().splitlines()[1:]
# get only the people that have more than 20 images
for p in people:
p = p.split('\t')
if len(p) > 1:
if int(p[1]) >= 20:
for num_img in range(1, int(p[1]) + 1):
self.imgl_list.append(os.path.join(root, specific_folder, p[0], p[0] + '_' +
'{:04}'.format(num_img) + '.' + img_extension))
self.classes.append(p[0])
self.people.append(p[0])
le = preprocessing.LabelEncoder()
self.classes = le.fit_transform(self.classes)
print(len(self.imgl_list), len(self.classes), len(self.people))
def __getitem__(self, index):
imgl = imageio.imread(self.imgl_list[index])
cl = self.classes[index]
# if image is grayscale, transform into rgb by repeating the image 3 times
if len(imgl.shape) == 2:
imgl = np.stack([imgl] * 3, 2)
imgl, bb = preprocess(imgl, self.preprocessing_method, crop_size=self.crop_size,
is_processing_dataset=True, return_only_largest_bb=True, execute_default=True)
# append image with its reverse
imglist = [imgl, imgl[:, ::-1, :]]
# normalization
for i in range(len(imglist)):
imglist[i] = (imglist[i] - 127.5) / 128.0
imglist[i] = imglist[i].transpose(2, 0, 1)
imgs = [torch.from_numpy(i).float() for i in imglist]
return imgs, cl, imgl, bb, self.imgl_list[index], self.people[index]
def __len__(self):
return len(self.imgl_list)
I need to split the data in there into 20% and 80% data so I can test my module it been almost a week now and still have no idea at all how to do it would be appreciate so much if anyone can help:
In general using PyTorch:
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
dataset = yourdatahere
batch_size = 16 #change to whatever you'd like it to be
test_split = .2
shuffle_dataset = True
random_seed= 42
# Creating data indices for training and validation splits:
dataset_size = len(dataset)
indices = list(range(dataset_size))
split = int(np.floor(test_split * dataset_size))
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
train_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=train_sampler)
test_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=test_sampler)
# Usage Example:
num_epochs = 10
for epoch in range(num_epochs):
# Train:
for batch_index, (faces, labels) in enumerate(train_loader):
# ...
Please note that you should also split your training data into training + validation data. You may use the same logic from above to do so.

Custom Datagenerator keras model expected 2 arrays but receives 1

So I am trying to get this custom generator working right but seem to have issues with it. It seems like the generator is working fine as I tried with using the gen.next() and it is producing what I want it to. However it may not be making it in the same shape that I think it should.
# Image processing
def preprocess_image(image_path):
img = image.load_img(image_path, target_size=(224, 224))
img = image.img_to_array(img)
img = preprocess_input(img)
return img
def image_generator(data, batch_size):
datagen_args = dict(horizontal_flip=True)
datagen = ImageDataGenerator(**datagen_args)
while True:
for i in range(0, len(data) // batch_size):
# get the label and the imagepath
imgpath, label = data[i]
# Process the image
img = preprocess_image(imgpath)
img = datagen.random_transform(img)
#img = np.expand_dims(img, axis=0)
# add a 0 for a dummy variable
dummy_label = np.zeros(len(label))
x_data = np.array([img, label])
yield x_data, dummy_label
# Prepare data need a array [image, label]
X = [] # hold the data before processing
Y = []
IMAGE_DIR = 'dataset/gt_bbox'
for file in os.listdir(IMAGE_DIR):
file_path = os.path.join(IMAGE_DIR, file)
label = int(file.split('_')[0])
X.append(file_path)
Y.append(label)
# Convert to catigorical
Y = to_categorical(Y)
image_dataset = []
for i in range(0,len(X)):
image_dataset.append([X[i], Y[i]])
# Split to train test data
train, val = train_test_split(image_dataset)
BATCHSIZE = 32
imggen = image_generator(train, BATCHSIZE)
valgen = image_generator(val, BATCHSIZE)
model.fit_generator(imggen,
steps_per_epoch=1000,
epochs=10,
validation_data=valgen,
validation_steps=300,
verbose=1)
My model is set up like this
input_images = Input(shape=(224, 224, 3), name='input_image') # input layer for images
input_labels = Input(shape=(1,), name='input_label') # input layer for labels
embeddings = base_network([input_images]) # output of network -> embeddings
labels_plus_embeddings = Concatenate(axis=-1)([input_labels, embeddings]) # concatenating the labels + embeddings
model = Model(inputs=[input_images, input_labels], outputs=labels_plus_embeddings)
I may be wrong in how I am building the model but it seems right to me.
Error Message
ValueError: Error when checking model input: the list of Numpy arrays that you are passing to your model is not the size the model expected. Expected to see 2 array(s), but instead got the following list of 1 arrays: [array([[array([[[-0.56078434, -0.52156866, -0.4980392 ],
[-0.56078434, -0.52156866, -0.4980392 ],
[-0.56078434, -0.52156866, -0.4980392 ],
...,
[-0.5764706 , -0.545098...

How to use flow_from_directory in Keras for multi-class semantic segmentation?

Let's say I have 100 training grayscale images and 100 RGB training masks, each of size 512x512. I was able to one-hot encode the masks using to_categorical in Keras with the below
numclasses=3
masks_one_hot=to_categorical(maskArr,numclasses)
where maskArr is a 100x512x512x1, and masks_one_hot is 100x512x512x3.
However, to use ImageDataGenerator and flow_from_directory using trainGenerator from https://github.com/zhixuhao/unet/blob/master/data.py, I tried to save the one-hot encoded training images and then read them using trainGenerator. However, I noticed after using imwrite on them and then reading them with imread, they changed from one-hot encoded 512x512x3 to 512x512x3 RGB images. That is, instead of each channel having a value of 0 or 1, they now range from 0-255
As a result, if I do:
myGenerator = trainGeneratorOneHot(20,'data/membrane/train','image','label',data_gen_args,save_to_dir = "data/membrane/train/aug", flag_multi_class = True,
num_class = 3, target_size=(512,512,3))
num_batch=3
for i,batch in enumerate(myGenerator):
if(i >= num_batch):
break
where trainGeneratorOneHot is below:
def trainGeneratorOneHot(batch_size,...class_mode=None, image_class_mode=None):
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(train_path,classes = [image_folder], class_mode = image_class_mode, color_mode = image_color_mode,target_size = target_size, ...)
mask_generator = mask_datagen.flow_from_directory(train_path, classes = [mask_folder], class_mode = class_mode, target_size = target_size,...)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img,mask = adjustDataOneHot(img,mask)
yield (img,mask)
def adjustDataOneHot(img,mask):
return (img,mask)
Then I get `ValueError: could not broadcast input array from shape (512,512,1) into shape (512,512,3,1)
How can I fix this?
Was dealing with the same issue a few days ago. I found it essential to make my own data generator class to deal with taking in data from a dataframe, augmenting it, and then one-hot-encoding it before passing it to my model. I was never able to get the Keras ImageDataGenerator to work for semantic segmentation problems with multiple classes.
Below is a data generator class in case it might help you out:
def one_hot_encoder(mask, num_classes = 8):
hot_mask = np.zeros(shape = mask.shape, dtype = 'uint8')
for _ in range(8):
temp = np.zeros(shape = mask.shape[0:2], dtype = 'uint8')
temp[mask[:, :, _] != 0] = 1
hot_mask[:, :, _] = temp
return hot_mask
# Image data generator class
class DataGenerator(keras.utils.Sequence):
def __init__(self, dataframe, batch_size, n_classes = 8, augment = False):
self.dataframe = dataframe
self.batch_size = batch_size
self.n_classes = n_classes
self.augment = augment
# Steps per epoch
def __len__(self):
return len(self.dataframe) // self.batch_size
# Shuffles and resets the index at the end of training epoch
def on_epoch_end(self):
self.dataframe = self.dataframe.reset_index(drop = True)
# Generates data, feeds to training
def __getitem__(self, index):
processed_images = []
processed_masks = []
for _ in range(self.batch_size):
the_image = io.imread(self.dataframe['Images'][index])
the_mask = io.imread(self.dataframe['Masks'][index]).astype('uint8');
one_hot_mask = one_hot_encoder(the_mask, 8)
if(self.augment):
# Resizing followed by some augmentations
processed_image = augs_for_images(image = the_image) / 255.0
processed_mask = augs_for_masks(image = one_hot_mask)
else:
# Still resizing but no augmentations
processed_image = resize(image = the_image) / 255.0
processed_mask = resize(image = one_hot_mask)
processed_images.append(processed_image)
processed_masks.append(processed_mask)
batch_x = np.array( processed_images )
batch_y = np.array( processed_masks )
return (batch_x, batch_y)
Also, here's a link to a repo with some semantic segmentation models that might be of interest to you. The notebook itself shows how the author dealt with multi-class semantic segmentation.

Tensorflow How to iterate on or convert the output of tf.train.batch?

I'm trying to make a Nearest Neighbor algorithm to classify images into 2 classes. I'm working with examples and codes from https://github.com/aymericdamien/TensorFlow-Examples
The following is a snippet of code results in Xtr and Ytr to be 2 arrays that I can loop on with a statement like for i in range(len(Xte)): and index them like Xte[i, :].
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
Xtr, Ytr = mnist.train.next_batch(5000) #5000 for training
Xte, Yte = mnist.test.next_batch(200) #200 for testing
This code can be found here https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/2_BasicModels/nearest_neighbor.ipynb
What I'm trying to do now is do the same with my own dataset of images and I follow the example from: https://github.com/aymericdamien/TensorFlow-Examples/blob/master/notebooks/5_DataManagement/build_an_image_dataset.ipynb to build my own dataset
The following statement is used after decoding and resizing the images and generates a batch of tensors that represents the images with their labels, however I can't use them and loop on them like what I did above:
X, Y = tf.train.batch([image, label], batch_size=batch_size, capacity=batch_size*8, num_threads=4)
My question is how to use those tensors and how to iterate over them or parse them to arrays in order to use them the same way as above?
Here is the full definition of my function read_images that I use to generate the batch:
N_CLASSES = 2
IMG_HEIGHT = 28
IMG_WIDTH = 28
CHANNELS = 1
def read_images(dataset_path, batch_size):
imagepaths, labels = list(), list()
label = 0
try:
classes = sorted(os.walk(dataset_path).next()[1])
except:
classes = sorted(os.walk(dataset_path).__next__()[1])
for c in classes:
c_dir = os.path.join(dataset_path, c)
try:
walk = os.walk(c_dir).next()
except:
walk = os.walk(c_dir).__next__()
for sample in walk[2]:
if(sample.endswith('.jpg') or sample.endswith('.jpeg')):
imagepaths.append(os.path.join(c_dir, sample))
labels.append(label)
imagepaths = tf.convert_to_tensor(imagepaths, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.int32)
image, label = tf.train.slice_input_producer([imagepaths, labels], shuffle=True)
image = tf.read_file(image)
image = tf.image.decode_jpeg(image, channels=CHANNELS)
image = tf.image.resize_images(image, [IMG_HEIGHT, IMG_WIDTH])
image = image * 1.0/127.5 - 1.0
X, Y = tf.train.batch([image, label], batch_size=batch_size, capacity=batch_size*8, num_threads=4)
return X, Y

Categories