NotImplementedError error using custom generator - python

I use custom generator for training my data. It should inherit keras.utils.Sequence and should have defined such methods:
init,len,on_epoch_end,getitem.
when I fit my model "NotImplemented Error" occurred. I know its about one of these overrided function but I dont know how can I handle it
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, root_dir=r'../data/val_test', image_folder='img/', mask_folder='masks/',
batch_size=4, image_size=288, nb_y_features=1,
augmentation=None,
suffle=True):
# self.image_filenames = listdir_fullpath(os.path.join(root_dir, image_folder))
self.image_filenames = np.sort([os.path.join(os.path.join(root_dir, image_folder), f)
for f in os.listdir(os.path.join(root_dir, image_folder))])
# self.mask_names = listdir_fullpath(os.path.join(root_dir, mask_folder))
self.mask_names = np.sort([os.path.join(os.path.join(root_dir, mask_folder), f)
for f in os.listdir(os.path.join(root_dir, mask_folder))])
self.batch_size = batch_size
self.augmentation = augmentation
self.image_size = image_size
self.nb_y_features = nb_y_features
self.suffle = suffle
# def listdir_fullpath(d):
# return np.sort([os.path.join(d, f) for f in os.listdir(d)])
def __getitem__(self, index):
data_index_min = int(index*self.batch_size)
data_index_max = int(min((index+1)*self.batch_size, len(self.image_filenames)))
indexes = self.image_filenames[data_index_min:data_index_max]
this_batch_size = len(indexes) # The last batch can be smaller than the others
X = np.empty((this_batch_size, self.image_size, self.image_size, 3), dtype=np.float32)
y = np.empty((this_batch_size, self.image_size, self.image_size, self.nb_y_features), dtype=np.uint8)
for i, sample_index in enumerate(indexes):
X_sample, y_sample = self.read_image_mask(self.image_filenames[index * self.batch_size + i],
self.mask_names[index * self.batch_size + i])
#if augmentation is defined, we assume its a train set
if self.augmentation is not None:
# Augmentation code
augmented = self.augmentation(self.image_size)(image=X_sample, mask=y_sample)
image_augm = augmented['image']
mask_augm = augmented['mask'].reshape(self.image_size, self.image_size, self.nb_y_features)
# divide by 255 to normalize images from 0 to 1
X[i, ...] = image_augm/255
y[i, ...] = mask_augm/255
else:
...
return X,y
history = model.fit(train_generator,
epochs=EPOCHS,
steps_per_epoch = spe_train,
callbacks=callbacks,
validation_data = validation_generator,
validation_steps=spe_val)
this is error:
NotImplementedError Traceback (most recent call last)
<ipython-input-36-fa9c887c02c7> in <module>
17 callbacks=callbacks,
18 validation_data = validation_generator,
---> 19 validation_steps=spe_val)
1 frames
/usr/local/lib/python3.7/dist-packages/keras/utils/data_utils.py in __len__(self)
489 The number of batches in the Sequence.
490 """
--> 491 raise NotImplementedError
492
493 def on_epoch_end(self):
NotImplementedError:

Related

Expected object of scalar type Long but got scalar type Int for argument #2 in loss function

I have encountered the following error:
RuntimeError Traceback (most recent call last)
<ipython-input-42-276f5444b449> in <module>
----> 1 train_epocs(model, optimizer, train_dl, valid_dl, epochs=15)
<ipython-input-39-6f4616cc5f25> in train_epocs(model, optimizer, train_dl, val_dl, epochs, C)
11 y_bb = y_bb.cuda().float()
12 out_class, out_bb = model(x)
---> 13 loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
14 loss_bb = F.l1_loss(out_bb, y_bb, reduction="none").sum(1)
15 loss_bb = loss_bb.sum()
~\anaconda3\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction)
2822 if size_average is not None or reduce is not None:
2823 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2824 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
2825
2826
RuntimeError: Expected object of scalar type Long but got scalar type Int for argument #2 'target' in call to _thnn_nll_loss_forward
I have been training the network with the following set-up. We have 26 classes. The code is adopted from https://jovian.ai/ranerajesh/road-signs-bounding-box-prediction/v/10 I have my own custom dataset that has been structured in a way required for this code to run. However, I have encountered a RunTime error.
def normalize(im):
"""Normalizes images with Imagenet stats."""
imagenet_stats = np.array([[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]])
return (im - imagenet_stats[0])/imagenet_stats[1]
class RoadDataset(Dataset):
def __init__(self, paths, bb, y, transforms=False):
self.transforms = transforms
self.paths = paths.values
self.bb = bb.values
self.y = y.values
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
path = self.paths[idx]
print(path)
y_class = self.y[idx]
x, y_bb = transformsXY(path, self.bb[idx], self.transforms)
x = normalize(x)
x = np.rollaxis(x, 2)
return x, y_class, y_bb
train_ds = RoadDataset(X_train['new_path'],X_train['new_bb'] ,y_train, transforms=True)
valid_ds = RoadDataset(X_val['new_path'],X_val['new_bb'],y_val)
batch_size = 2
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size=batch_size)
class BB_model(nn.Module):
def __init__(self):
super(BB_model, self).__init__()
resnet = models.resnet34(pretrained=True)
layers = list(resnet.children())[:8]
self.features1 = nn.Sequential(*layers[:6])
self.features2 = nn.Sequential(*layers[6:])
self.classifier = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, 26))
self.bb = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, 26))
def forward(self, x):
x = self.features1(x)
x = self.features2(x)
x = F.relu(x)
x = nn.AdaptiveAvgPool2d((1,1))(x)
x = x.view(x.shape[0], -1)
return self.classifier(x), self.bb(x)
def update_optimizer(optimizer, lr):
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = lr
def train_epocs(model, optimizer, train_dl, val_dl, epochs=10,C=1000):
idx = 0
for i in range(epochs):
model.train()
total = 0
sum_loss = 0
for x, y_class, y_bb in train_dl:
batch = y_class.shape[0]
x = x.cuda().float()
y_class = y_class.cuda()
y_bb = y_bb.cuda().float()
out_class, out_bb = model(x)
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
loss_bb = F.l1_loss(out_bb, y_bb, reduction="none").sum(1)
loss_bb = loss_bb.sum()
loss = loss_class + loss_bb/C
optimizer.zero_grad()
loss.backward()
optimizer.step()
idx += 1
total += batch
sum_loss += loss.item()
train_loss = sum_loss/total
val_loss, val_acc = val_metrics(model, valid_dl, C)
print("train_loss %.3f val_loss %.3f val_acc %.3f" % (train_loss, val_loss, val_acc))
return sum_loss/total
def val_metrics(model, valid_dl, C=1000):
model.eval()
total = 0
sum_loss = 0
correct = 0
for x, y_class, y_bb in valid_dl:
batch = y_class.shape[0]
x = x.cuda().float()
y_class = y_class.cuda()
y_bb = y_bb.cuda().float()
out_class, out_bb = model(x)
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
loss_bb = F.l1_loss(out_bb, y_bb, reduction="none").sum(1)
loss_bb = loss_bb.sum()
loss = loss_class + loss_bb/C
_, pred = torch.max(out_class, 1)
correct += pred.eq(y_class).sum().item()
sum_loss += loss.item()
total += batch
return sum_loss/total, correct/total
model = BB_model().cuda()
parameters = filter(lambda p: p.requires_grad, model.parameters())
optimizer = torch.optim.Adam(parameters, lr=0.006)
train_epocs(model, optimizer, train_dl, valid_dl, epochs=15)
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")
In the line above, y_class is target of your out_class (for model predictions). The output from model is Long and your y_class has a Float type. So you need to change y_class's type to Long by:
y_class = y_class.long()
loss_class = F.cross_entropy(out_class, y_class, reduction="sum")

Having problem with creating a dataloader class in PyTorch

I am building a neural network for Bengali numerical digit classification using PyTorch. I am facing difficulties building the dataset class to load my dataset using a data loader. I have a folder with all the images (numerical digits from 0-9) and a CSV file with 2 columns, the first column contains the name of the image, and the second contains the label(0-9). This is my data loader class which probably isn't causing the error.
class BDRWDataset(Dataset):
"""BDRW dataset."""
def __init__(self, csv_file, imgs_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with labels.
imgs_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.labels = pd.read_csv(csv_file).iloc[:, 1].to_numpy().reshape(-1,1)
self.imgs_dir = imgs_dir
self.transform = transform
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_path = '/content/BDRW_train/digit_' + str(idx) + '.jpg'
image = io.imread(img_name, plugin='matplotlib')
image = Image.fromarray(np.uint8(image))
label = self.labels[idx]
label = float(label)
if self.transform:
image = self.transform(image)
return (image, label)
I create an instance of this class.
transformed_dataset = BDRWDataset(csv_file='/content/labels.csv',imgs_dir='/content/BDRW_train',
transform=transforms.Compose([
Rescale((28, 28)),
transforms.Normalize((0.5,), (0.5,)),
ToTensor()
]))
I have defined rescale and to tensor as follows
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, label = sample
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
return img, label
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
image, label = sample['image'], sample['label']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
image = image.transpose((2, 0, 1))
return (torch.from_numpy(image), torch.from_numpy(label))
Splitting the dataset into test and train and created train loader and validation loader using torch.utils.data.DataLoader
The neural network is
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=5, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=5, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(7*7*32, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
I create an instance of this class and start training
cnn = CNN()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
losses = [];
for epoch in range(num_epochs):
for i, (image, label) in enumerate(valloader):
image = Variable(image.float())
label = Variable(label)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(image)
loss = criterion(output, label)
loss.backward()
optimizer.step()
losses.append(loss.data[0]);
if (i+1) % 100 == 0:
print ('Epoch : %d/%d, Iter : %d/%d, Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
This is where I get the error
AttributeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in open(fp, mode)
2812 try:
-> 2813 fp.seek(0)
2814 except (AttributeError, io.UnsupportedOperation):
AttributeError: 'str' object has no attribute 'seek'
During handling of the above exception, another exception occurred:
AttributeError Traceback (most recent call last)
9 frames
/usr/local/lib/python3.6/dist-packages/PIL/Image.py in open(fp, mode)
2813 fp.seek(0)
2814 except (AttributeError, io.UnsupportedOperation):
-> 2815 fp = io.BytesIO(fp.read())
2816 exclusive_fp = True
2817
AttributeError: 'str' object has no attribute 'read'
It refers to PIL Image which I have used in the dataloader. So it seems to me that's where I am doing something wrong.
https://colab.research.google.com/drive/17XdP7gUoMNLxPCJ6PHEi3B09UQItzKyf?usp=sharing
This is the notebook I am working on. Please help me debug the errors in the code.
https://drive.google.com/open?id=1DznuHV9Fi5jVEbGdP-tg3ckmp5CNDOj1
This is the dataset I am working on.

How to write custom dataGenetator for the Keras model

My current approach looks like this: firstly I feed images into memory this way:
def load_data_from_directory(root_dir, image_height, image_format = 'jpg', mask_format = 'png'):
"""
Loads train images and corresponding masks with specified image sizes.
Masks should have same name as image.
Output files divided by 256 to be between 0-1.
Folder locations:
> images (Jpg format)
> segmentation
Example of usage:
from common_blocks.data_loaders import load_data_from_directory
data_dir = './data_objects'
x_train, y_train = load_data_from_directory(data_dir, image_height = 256)
"""
data = []
for stage in ['train']: #can be added 'test' stage
directory = os.path.join(root_dir, 'images')
file_names = [filename.replace(image_format , mask_format) for filename in os.listdir(directory)]
fps = [os.path.join(directory, filename) for filename in os.listdir(directory)]
for content in ['images', 'segmentation']:
# construct path to each image
directory = os.path.join(root_dir, content)
if content != 'images':
fps = [os.path.join(directory, filename) for filename in file_names]
# read images
images = [imread(filepath)/255 for filepath in fps]
# if images have different sizes you have to resize them before:
image = [resize(image, (image_height, image_height)) for image in images]
# stack to one np.array
np_images = np.stack(image, axis=0)
data.append(np_images)
del image, file_names
gc.collect()
return data
x_train, y_train = load_data_from_directory_crans('./train', image_width, image_height,'jpg', 'png')
Then I feed that images to DataGenerator:
class DataGenerator(Sequence):
'''
Sample usage:
test_generator = DataGenerator(x_train, y_train, 1,
image_sizes, image_sizes, 1, True)
Xtest, ytest = test_generator.__getitem__(1)
plt.imshow(Xtest[0])
plt.show()
plt.imshow(ytest[0, :,:,0])
plt.show()
'''
def __init__(self, X, y, batch_size, height,width, nb_y_features, augmentation = True):
'Initialization'
self.batch_size = batch_size
self.X = X
self.y = y
self.indexes = None
self.currentIndex = 0
self.augmentation = augmentation
self.on_epoch_end()
self.height = height
self.width = width
self.nb_y_features = nb_y_features
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.X) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
data_index_min = int(index*self.batch_size)
data_index_max = int(min((index+1)*self.batch_size, len(self.indexes)))
indexes = self.indexes[data_index_min:data_index_max]
this_batch_size = len(indexes) # The last batch can be smaller than the others
X = np.empty((this_batch_size, self.width, self.height, 3)) #, dtype=int)
y = np.empty((this_batch_size, self.width, self.height, self.nb_y_features), dtype=int)
for i, sample_index in enumerate(indexes):
data_index = self.indexes[index * self.batch_size + i]
X_sample, y_sample = self.X[data_index].copy(), self.y[data_index].copy()
if self.augmentation:
augmented = aug()(image=X_sample, mask=y_sample)
image_augm = augmented['image']
mask_augm = augmented['mask']#.reshape(self.width, self.height, self.nb_y_features)
X[i, ...] = image_augm
y[i, ...] = mask_augm
else:
X[i, ...] = X_sample
y[i, ...] = y_sample
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = list(range(len(self.X)))
np.random.shuffle(self.indexes)
Then this generator is used to train model:
training_generator = DataGenerator(x_train, y_train, batch_size,
height = image_width, width = image_height, nb_y_features = 1, augmentation = True)
model = Unet(BACKBONE, encoder_weights='imagenet', encoder_freeze = False)
model.compile(optimizer = Adam(),
loss=bce_jaccard_loss, metrics=[iou_score])
history = model.fit_generator(training_generator, shuffle =True,
epochs=10)
The problem is the size of data. If it small to fit memory - everything is OK, once is get bigger it fails with out of memory error. How to directly randomly read files from folder?
Something like this should work:
class DataGeneratorFolder(Sequence):
'''
Sample usage
if to_debug:
test_generator = DataGeneratorFolder(image_names
, masks_names
, batch_size=2,
image_size=256,
nb_y_features = 1, augmentation = True)
Xtest, ytest = test_generator.__getitem__(0)
plt.imshow(Xtest[0])
plt.show()
plt.imshow(ytest[0, :,:,0])
plt.show()
'''
def __init__(self, image_filenames, mask_names, batch_size,
image_size=768, nb_y_features = 1, augmentation = True,
center_crop_prop = 0.5):
self.image_filenames, self.mask_names = image_filenames, mask_names
self.batch_size = batch_size
self.currentIndex = 0
self.augmentation = augmentation
self.on_epoch_end()
self.image_size = image_size
self.nb_y_features = nb_y_features
self.indexes = None
self.center_crop_prop = center_crop_prop
def __len__(self):
return np.ceil(len(self.image_filenames) / float(self.batch_size))
def on_epoch_end(self):
'Updates indexes after each epoch'
self.image_filenames, self.mask_names = shuffle(self.image_filenames, self.mask_names)
def read_image_mask(self, image_name, mask_name):
print(image_name, mask_name)
return (imread(image_name)/255).astype(np.float32),\
(imread(mask_name, as_gray = True) > 0).astype(np.int8)
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
data_index_min = int(index*self.batch_size)
data_index_max = int(min((index+1)*self.batch_size, len(self.image_filenames)))
indexes = self.image_filenames[data_index_min:data_index_max]
this_batch_size = len(indexes) # The last batch can be smaller than the others
X = np.empty((this_batch_size, self.image_size, self.image_size, 3), dtype=np.float32)
y = np.empty((this_batch_size, self.image_size, self.image_size, self.nb_y_features), dtype=np.uint8)
for i, sample_index in enumerate(indexes):
X_sample, y_sample = self.read_image_mask(self.image_filenames[index * self.batch_size + i],
self.mask_names[index * self.batch_size + i])
random_crop_prop = 1
if self.augmentation:
if np.sum(y_sample) > 0: # mask is not null
if np.random.choice( ['crop_with_object', 'crop_random'], 1,
p=[self.center_crop_prop, 1-self.center_crop_prop]) == ['crop_with_object']:
X_sample, y_sample = random_crop_box_center(X_sample, y_sample,
self.image_size, self.image_size)
random_crop_prop = 0
augmented = aug_with_crop(self.image_size, random_crop_prop)(image=X_sample, mask=y_sample)
image_augm = augmented['image']
mask_augm = augmented['mask'].reshape(self.image_size, self.image_size, self.nb_y_features)
X[i, ...] = image_augm
y[i, ...] = mask_augm
else:
X[i, ...] = X_sample
y[i, ...] = y_sample
return X, y

ValueError: Output of generator should be a tuple `(x, y, sample_weight)` or `(x, y)`. Found: <keras...>

when I training vgg_face model on Keras, I used data generator, but got this issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: <keras_preprocessing.image.NumpyArrayIterator object at 0x7f03e83a75f8>
I have tried the previous methods about a similar issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None, but without working.
def process_line(line):
path = '/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full'
label_ages = np.asarray([line[1:3]])
label_genders = np.array([line[4:7]])
data = Image.open(path + line)
arr = np.asarray(data, dtype="float32")
arr = cv2.resize(arr, (224, 224))
# return (arr,label_ages)
if label_ages and label_genders != None:
return (arr, label_ages)
def generate_arrays_from_file(data, batch_size, datagen):
# np_utils.to_categorical onehot
while True:
f = data
cnt = 0
X_Y = []
X = []
Y_age = []
Y_gender = []
for line in f:
# x,y_age,y_gender=process_line(line.strip('\n'))
x, y_age = process_line(line.strip('\n'))[0], process_line(line.strip('\n'))[1]
X.append(x)
# X_Y.append(x_y)
Y_age.append(y_age)
# if int(y_gender)==111:
# label=np.array([1,0])
# Y_gender.append(label)
# if int(y_gender)==112:
# label = np.array([0, 1])
# Y_gender.append(label)
cnt += 1
if cnt == batch_size:
cnt = 0
datagen.fit(X)
print(np.asarray(X).shape, np.asarray(Y_age).shape)
yield datagen.flow(np.array(X), np.array(Y_age), batch_size=batch_size)
# yield np.asarray(X), np.asarray(Y_age)
X = []
X_Y = []
Y_age = []
Y_gender = []
# f.close()
def model(epochs, lr, batch_size):
content = open('/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full/AFAD-Full.txt').readlines()
random.shuffle(content)
num = int(len(content) * 0.8)
train_data = content[:num]
test_data = content[num:]
# Convolution Features
vgg_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3),
pooling='max') # pooling: None, avg or max
# custom parameters
last_layer = vgg_model.get_layer('avg_pool').output
x = Flatten(name='flatten')(last_layer)
out_age = Dense(units=1000, activation='relu', name='regression', kernel_regularizer=regularizers.l2(0.01))(x)
out_age1 = Dense(units=500, activation='relu', name='regression1', kernel_regularizer=regularizers.l2(0.01))(
out_age)
out_age2 = Dense(units=100, name='regression2', kernel_regularizer=regularizers.l2(0.01))(out_age1)
out_age3 = Dense(units=1, name='regression3', kernel_regularizer=regularizers.l2(0.01))(out_age2)
out_gender = Dense(units=2, activation='softmax', name='classifier1')(x)
# custom_vgg_model = Model(vgg_model.input, outputs=[out_age3, out_gender])
custom_vgg_model = Model(vgg_model.input, outputs=out_age3)
# Create the model
model = custom_vgg_model
sgd = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# model.compile(optimizer=sgd, loss=["mean_squared_error", "categorical_crossentropy"],
# metrics=['accuracy'])
model.compile(optimizer=sgd, loss=["mean_squared_error"],
metrics=['accuracy'])
logging.debug("Model summary...")
model.count_params()
model.summary()
class Schedule:
def __init__(self, nb_epochs, initial_lr):
self.epochs = nb_epochs
self.initial_lr = initial_lr
def __call__(self, epoch_idx):
if epoch_idx < self.epochs * 0.25:
return self.initial_lr
elif epoch_idx < self.epochs * 0.50:
return self.initial_lr * 0.2
elif epoch_idx < self.epochs * 0.75:
return self.initial_lr * 0.04
return self.initial_lr * 0.008
callbacks = [LearningRateScheduler(schedule=Schedule(epochs, lr)),
ModelCheckpoint("/home/apptech/pixeleye_test/apps/arup/result/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="auto")
]
logging.debug("Running training...")
datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
preprocessing_function=get_random_eraser(v_l=0, v_h=255))
# training_generator = MixupGenerator(X_train, [y_train_a, y_train_g], batch_size=32, alpha=0.2,
# datagen=datagen)()
hist = model.fit_generator(generator=generate_arrays_from_file(train_data, batch_size, datagen),
steps_per_epoch=len(train_data) // batch_size,
validation_data=generate_arrays_from_file(test_data, batch_size, datagen),
validation_steps=len(test_data) // batch_size,
epochs=epochs, verbose=1,
callbacks=callbacks)
fixed.
datagen.flow() returns a generator. To obtain the batch, use:
X_batch, y_batch = datagen.flow(X_train, y_train, batch_size=9).next()

Keras model.fit_generator stops/freezes in first epoch

I'm setting up a simple u-net implementation with Gipl files converted to PIL-Images. Unfortunately, after adding a DataGenerator to distribute the GPU performance more efficiently, the network stops working after initializing the first epoch without any further output.
The network generates the images for the first epoch, when I disable use_multiprocessing on the model, but runs out of memory soon. After enabeling the option again, no images are generated and no output is generated too. At least the preparation of the images should start.
The unet model:
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[mean_iou])
model.summary()
params_train = {'dim': (512,512,1),
'batch_size': 16,
'n_classes': 2,
'n_channels': 1,
'shuffle': True}
X, Xv, Y, Yv = getSubsets(X_train_path, Y_train_path)
training_generator = DataGenerator(X, Y, **params_train)
validation_generator = validation_generator = DataGenerator(Xv, Yv, **params_train)
earlystopper = EarlyStopping(patience=5, verbose=1)
checkpointer = ModelCheckpoint('model-2019-1.h5', verbose=1, save_best_only=True)# path of model
print('last output, no output of datagenerator')
model_checkpoint = ModelCheckpoint('unet_fmr.hdf5', monitor='loss',verbose=1, save_best_only=True)
results = model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
use_multiprocessing = True,
epochs=30,
callbacks=[model_checkpoint])
EDITED And the DataGenerator:
class DataGenerator(keras.utils.Sequence):
def __init__(self, list_IDs, labels, batch_size, dim, n_channels=1,
n_classes=2, shuffle=True):
#...initialize variables
self.on_epoch_end()
def __getitem__(self, index):
'Generate one batch of data'
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
X, Y = self.__data_generation(indexes)
return X, Y
def __len__(self):
return int(np.floor(len(self.list_IDs) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def getImage(self, path):
x_train_array = getGiplAsArray(path)
x_train_i = getResizedGiplImageByArray(x_train_array)
return x_train_i
def make3D(self, img):
img = np.reshape(img, (img.shape[0], img.shape[1], 1))
return img
def __data_generation(self, indexes):
print('__data_generation()')
list_IDs_temp = [self.list_IDs[k] for k in indexes]
list_labels_temp = [self.labels[k] for k in indexes]
X = np.empty((self.batch_size, *self.dim))
Y = np.empty((self.batch_size, *self.dim))
for i, ID in enumerate(list_IDs_temp):
temp = self.getImage(ID)
X[i,] = self.make3D(temp)
for i, ID in enumerate(list_labels_temp):
temp = self.getImage(ID)
Y[i,] = self.make3D(temp)
return X, Y
The given output, ending with hours of waiting:
Total params: 1,940,817
Trainable params: 1,940,817
Non-trainable params: 0
initialization of DataGen
on_epoch_end()
Epoch 1/30

Categories