How to set possbility to tf.keras.layers.RandomFlip? - python

Is there possible to set a possibility when doing random flip operations by using tf.keras.layers.RandomFlip ?
for example:
def augmentation():
data_augmentation = keras.Sequential([
keras.layers.RandomFlip("horizontal", p=0.5),
keras.layers.RandomRotation(0.2, p=0.5)
])
return data_augmentation

Try creating a simple Lambda layer and defining your probability in a separate function:
import random
def random_flip_on_probability(image, probability= 0.5):
if random.random() < probability:
return tf.image.random_flip_left_right(image)
return image
def augmentation():
data_augmentation = keras.Sequential([
keras.layers.Lambda(random_flip_on_probability),
keras.layers.RandomRotation(0.2, p=0.5)
])
return data_augmentation
If you need to use data augmentation during training or inference, you will have to define your own custom layer. Try something like this:
import tensorflow as tf
import pathlib
class RandomFlipOnProbability(tf.keras.layers.Layer):
def __init__(self, probability):
super(RandomFlipOnProbability, self).__init__()
self.probability = probability
def call(self, images):
return tf.cond(tf.random.uniform(()) < self.probability, lambda: tf.image.flip_left_right(images), lambda: images)
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
batch_size = 32
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(180, 180),
batch_size=batch_size)
random_layer = RandomFlipOnProbability(probability = 0.9)
normalization_layer = tf.keras.layers.Rescaling(1./255)
images, _ = next(iter(train_ds.take(1)))
images = normalization_layer(random_layer(images))
image = images[0]
plt.imshow(image.numpy())

Related

Nested Cross Validation in PyTorch

I am working on a multiclass image classification problem that has 4k labeled images. Currently, I am using cross-validation.However, I want to try nested-cross-validation for the problem in pytorch but I couldn't find it.
Is it possible to use nested-cross-validation in pytorch? if so, how?
I built a simple pipeline below. Is it correct to implement nested-cross-validation like this.
import torch
from torch.utils.data import DataLoader,SubsetRandomSampler
from sklearn.model_selection import KFold
from torchvision import datasets
input_size = (256,3,224,244)
target_size = (256,)
class Dataset(datasets.VisionDataset):
def __init__(self):
super().__init__(self)
self.images = torch.rand(input_size).float()
self.targets = torch.randint(0,3,target_size)
def __getitem__(self, index: int) -> any:
return self.images[index],self.targets[index]
def __len__(self) -> int:
return len(self.images)
class BasicModel(torch.nn.Module):
def __init__(self) -> None:
super(BasicModel,self).__init__()
self.conv = torch.nn.Conv2d(3,16,kernel_size=(5,5))
self.adp = torch.nn.AdaptiveAvgPool2d(1)
self.linear = torch.nn.Linear(16,3)
def forward(self,x):
x = self.conv(x)
x = self.adp(x)
x = x.view(x.size(0),-1)
x = self.linear(x)
return x
data_ids = [*range(input_size[0])]
data = Dataset()
model = BasicModel()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.01)
k_fold = 5
kfold_test = KFold(n_splits=k_fold, shuffle=True)
num_epochs = 2
for test_fold,(remain_ids,test_ids) in enumerate(kfold_test.split(data_ids)):
test_sampler = SubsetRandomSampler(test_ids)
testLoader = DataLoader(data,sampler=test_sampler,batch_size=2)
kfold_val = KFold(n_splits=k_fold-1, shuffle=True)
for epoch in range(num_epochs):
for val_fold,(train_ids,val_ids) in enumerate(kfold_val.split(remain_ids)):
train_sampler = SubsetRandomSampler(train_ids)
trainLoader = DataLoader(data,sampler=train_sampler,batch_size=2)
val_sampler = SubsetRandomSampler(val_ids)
valLoader = DataLoader(data,sampler=val_sampler,batch_size=2)
model.train()
for image,target in trainLoader:
with torch.cuda.amp.autocast():
output = model(image)
loss = criterion(output,target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(f"train loss:{loss.item()}")
model.eval()
with torch.no_grad():
for image,target in valLoader:
output = model(image)
loss = criterion(output,target)
print(f"test loss:{loss.item()}")
model.eval()
with torch.no_grad():
for image,target in testLoader:
output = model(image)
loss = criterion(output,target)
print(f"test loss:{loss.item()}")

Pytorch CNN script training, but not getting results

I’m just getting started with pytorch. I am trying to do a simple binary classification project with the cats and dogs dataset. After much fumbling around, I was able to get the model to train, but I’m not getting the expected results.
First, the loss starts out way too low. To me, that seems to indicate I’m not measuring loss correctly.
Second, the model just predicts everything as 0.
I’m sure there are many mistakes here, but I would appreciate it if someone could take a look and let me know what I’m doing wrong. Thank you!
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.io import read_image
from torch.utils.data import Dataset, DataLoader
from torchvision.utils import make_grid
from torchvision.utils import save_image
from sklearn.model_selection import train_test_split
import os
import numpy as np
from sklearn import preprocessing
import glob
import cv2
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
IMAGE_SIZE = 64
DATA_DIR = "C:\\Users\\user\\source\\repos\\pytorch-youtube\\data\\catsdogs\\PetImages\\"
LABELS = ('cat', 'dog')
# custom dataset class
# expects the root folder to have sub folders with class names
# and pictures of classes inside folder
class CustomImageDataset(Dataset):
def __init__(self):
self.imgs_path = DATA_DIR
file_list = glob.glob(self.imgs_path + "*")
self.data = []
for class_path in file_list:
class_name = class_path.split("\\")[-1]
for img_path in glob.glob(class_path + "\\*.jpg"):
self.data.append([img_path, class_name])
self.class_map = {"Dog": 0, "Cat": 1}
self.img_dim = (IMAGE_SIZE, IMAGE_SIZE)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_path, class_name = self.data[idx]
# this is to handle corrupt images in the dataset
# could probably be handled better
try:
img = cv2.imread(img_path)
img = cv2.resize(img, self.img_dim)
except:
img_path, class_name = self.data[idx+1]
img = cv2.imread(img_path)
img = cv2.resize(img, self.img_dim)
class_id = self.class_map[class_name]
img_tensor = torch.from_numpy(img)
img_tensor = img_tensor.permute(2, 0, 1) # not exactly sure what/why for this line
class_id = torch.tensor([class_id])
return img_tensor, class_id
# as is, we aren't using these
transform = transforms.Compose(
[transforms.Resize((64, 64)),
transforms.ConvertImageDtype(torch.float32),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),
]
)
dataset = CustomImageDataset()
dataloader = DataLoader(dataset, batch_size=4, shuffle=True)
dataiter = iter(dataloader)
train_features, train_labels = dataiter.next()
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(2704, 128) # only way I got input size was by running code
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 2)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
# net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(dataloader, 0):
inputs, labels = data
# this is the fix for "expected scalar type Byte but found Float"
# this seems to completely destroy the features in the image to just white
inputs = inputs.float()
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, torch.max(labels,1)[1])
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.10f}')
running_loss = 0.0
print("finished")
# save the model
PATH = './custom_trained_model_dogs_cats.pth'
torch.save(net.state_dict(), PATH)
It seems I was passing in the wrong thing to my loss function. I changed this line
loss = criterion(outputs, torch.max(labels,1)[1])
to this
loss = criterion(outputs, torch.max(labels,1)[0])
and everything seems to be working. I'm able to correctly classify the cats and dogs.

Error: index 255 is out of bounds for axis 1 with size 2 error in preprocessing

I am applying the semantic segmentation and encounter the problem "index 255 is out of bounds for axis 1 with size 2 error in preprocessing" I have two class, building class and background.
While running the "x, y = train_img_gen.next()" it throws me this error.
Can anybody help me. I have tried few things but nothing is working.enter image description here
"""
Define Generator for images and masks so we can read them directly from the drive.
seed=24
batch_size= 16
n_classes=2
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
from tensorflow.keras.utils import to_categorical
#Use this to preprocess input for transfer learning
BACKBONE = 'resnet34'
preprocess_input = sm.get_preprocessing(BACKBONE)
def preprocess_data(img, mask, num_class):
img = scaler.fit_transform(img.reshape(-1, img.shape[-1])).reshape(img.shape)
img = preprocess_input(img)
mask = to_categorical(mask, num_class)
return (img,mask)
#Define the generator.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def trainGenerator(train_img_path, train_mask_path, num_class):
img_data_gen_args = dict(horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
image_datagen = ImageDataGenerator(**img_data_gen_args)
mask_datagen = ImageDataGenerator(**img_data_gen_args)
image_generator = image_datagen.flow_from_directory(
train_img_path,
class_mode = None,
batch_size = batch_size,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_mask_path,
class_mode = None,
color_mode = 'grayscale',
batch_size = batch_size,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = preprocess_data(img, mask, num_class)
yield (img, mask)
train_img_path = "/content/AerialImageDataset/train/data_for_training_and_testing/train_images/"
train_mask_path = "/content/AerialImageDataset/train/data_for_training_and_testing/train_masks/"
train_img_gen = trainGenerator(train_img_path, train_mask_path, num_class=2)
val_img_path = "/content/AerialImageDataset/train/data_for_training_and_testing/val_images/"
val_mask_path = "/content/AerialImageDataset/train/data_for_training_and_testing/val_masks/"
val_img_gen = trainGenerator(val_img_path, val_mask_path, num_class=2)
x, y = train_img_gen.__next__()
"""

Evaluate U-Net by layer

I am coming from medical background and a newbie in this machine learning field. I am trying to train my U-Net model using keras and tensorflow for image segmentation. However, my loss value is all NaN and the prediction is all black.
I would like to check the U-Net layer by layer but I don't know how to feed the data and from where to start. What I meant by checking for each layer is that I want to feed my images to first layer for example and see the output from the first layer and then moving on to the second layer and until to the last layer. Just want to see how the output is produced for each layer and to check from where the nan value is started. Really appreciate for your help.
These are my codes.
import os
import matplotlib.pyplot as plt
import tensorflow as tf
from keras_preprocessing.image
import ImageDataGenerator
from tensorflow import keras
#Constants
SEED = 42
BATCH_SIZE_TRAIN = 16
BATCH_SIZE_TEST = 16
IMAGE_HEIGHT = 512
IMAGE_WIDTH = 512
IMG_SIZE = (IMAGE_HEIGHT, IMAGE_WIDTH)
data_dir = 'data'
data_dir_train = os.path.join(data_dir, 'training')
data_dir_train_image = os.path.join(data_dir_train, 'img')
data_dir_train_mask = os.path.join(data_dir_train, 'mask')
data_dir_test = os.path.join(data_dir, 'test')
data_dir_test_image = os.path.join(data_dir_test, 'img')
data_dir_test_mask = os.path.join(data_dir_test, 'mask')
NUM_TRAIN = 1413
NUM_TEST = 210
NUM_OF_EPOCHS = 10
def create_segmentation_generator_train(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def create_segmentation_generator_test(img_path, mask_path, BATCH_SIZE):
data_gen_args = dict(rescale=1./255)
img_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(*data_gen_args)
img_generator = img_datagen.flow_from_directory(img_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
mask_generator = mask_datagen.flow_from_directory(mask_path, target_size=IMG_SIZE, class_mode=None, color_mode='grayscale', batch_size=BATCH_SIZE, seed=SEED)
return zip(img_generator, mask_generator)
def display(display_list):
plt.figure(figsize=(15,15))
title = ['Input Image', 'True Mask', 'Predicted Mask']
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i+1)
plt.title(title[i])
plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]), cmap='gray')
plt.show()
def show_dataset(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
display([image[0], mask[0]])
def unet(n_levels, initial_features=32, n_blocks=2, kernel_size=3, pooling_size=2, in_channels=1, out_channels=1):
#n_blocks = how many conv in each level
inputs = keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, in_channels))
x = inputs
convpars = dict(kernel_size=kernel_size, activation='relu', padding='same')
#downstream
skips = {}
for level in range(n_levels):
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
if level < n_levels - 1:
skips[level] = x
x = keras.layers.MaxPool2D(pooling_size)(x)
#upstream
for level in reversed(range(n_levels-1)):
x = keras.layers.Conv2DTranspose(initial_features * 2 ** level, strides=pooling_size, **convpars)(x)
x = keras.layers.Concatenate()([x, skips[level]])
for _ in range (n_blocks):
x = keras.layers.Conv2D(initial_features * 2 ** level, **convpars)(x)
#output
activation = 'sigmoid' if out_channels == 1 else 'softmax'
x = keras.layers.Conv2D(out_channels, kernel_size=1, activation='sigmoid', padding='same')(x)
return keras.Model(inputs=[inputs], outputs=[x], name=f'UNET-L{n_levels}-F{initial_features}')
EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE_TRAIN
EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE_TRAIN
model = unet(4)
model.compile(optimizer="adam", loss='binary_crossentropy', metrics=['accuracy'])
model.fit_generator(generator=train_generator, steps_per_epoch=EPOCH_STEP_TRAIN, validation_data=test_generator, validation_steps=EPOCH_STEP_TEST, epochs=NUM_OF_EPOCHS)
def show_prediction(datagen, num=1):
for i in range(0,num):
image,mask = next(datagen)
pred_mask = model.predict(image)[0] > 0.5
display([image[0], mask[0], pred_mask])
show_prediction(test_generator, 2)
To investigate your model layer-by-layer please see example how to show summary of the model and also how to save the model:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
#luodaan input
inputs=keras.Input(shape=(1,))
#luodaan kerros
dense=layers.Dense(64,activation="relu")
x=dense(inputs)
x=layers.Dense(64,activation="relu")(x)
outputs=layers.Dense(10)(x)
#Koostetaa
model=keras.Model(inputs=inputs,outputs=outputs,name="Spesiaali")
#Tarkastellaan
model.summary()
#Tallennellaan
model.save(".\model_to_be_investigated_by_someone_else_to_help_you")
...this makes it possible for you to see the whole model structure for "debugging your AI". If you do not find the solution itself, then add the last row of example to your own code, and then put the resulting folder e.g. to github and ask someone other to see the structure of your model to help you in solving the problem.
The blue drawing illustrates the output of command model.summary() and the red line illustrates the output shape of the first dense layer.

How make customised dataset in Pytorch for images and their masks?

I have two dataset folder of tif images, one is a folder called BMMCdata, and the other one is the mask of BMMCdata images called BMMCmasks(the name of images are corresponds). I am trying to make a customised dataset and also split the data randomly to train and test. at the moment I am getting an error
self.filenames.append(fn)
AttributeError: 'CustomDataset' object has no attribute 'filenames'
Any comment will be appreciated a lot.
import torch
from torch.utils.data.dataset import Dataset # For custom data-sets
from torchvision import transforms
from PIL import Image
import os.path as osp
import glob
folder_data = "/Users/parto/PycharmProjects/U-net/BMMCdata/data"
class CustomDataset(Dataset):
def __init__(self, root):
self.filename = folder_data
self.root = root
self.to_tensor = transforms.ToTensor()
filenames = glob.glob(osp.join(folder_data, '*.tif'))
for fn in filenames:
self.filenames.append(fn)
self.len = len(self.filenames)
print(fn)
def __getitem__(self, index):
image = Image.open(self.filenames[index])
return self.transform(image)
def __len__(self):
return self.len
custom_img = CustomDataset(folder_data)
# total images in set
print(custom_img.len)
train_len = int(0.6*custom_img.len)
test_len = custom_img.len - train_len
train_set, test_set = CustomDataset.random_split(custom_img, lengths=[train_len, test_len])
# check lens of subset
len(train_set), len(test_set)
train_set = CustomDataset(folder_data)
train_set = torch.utils.data.TensorDataset(train_set, train=True, batch_size=4)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=1)
print(train_set)
print(train_loader)
test_set = torch.utils.data.DataLoader(Dataset, batch_size=4, sampler= train_sampler)
test_loader = torch.utils.data.DataLoader(Dataset, batch_size=4)
answer given by #ptrblck in pytorch community. thank you
# get all the image and mask path and number of images
folder_data = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCdata\\data\\*.tif")
folder_mask = glob.glob("D:\\Neda\\Pytorch\\U-net\\BMMCmasks\\masks\\*.tif")
# split these path using a certain percentage
len_data = len(folder_data)
print(len_data)
train_size = 0.6
train_image_paths = folder_data[:int(len_data*train_size)]
test_image_paths = folder_data[int(len_data*train_size):]
train_mask_paths = folder_mask[:int(len_data*train_size)]
test_mask_paths = folder_mask[int(len_data*train_size):]
class CustomDataset(Dataset):
def __init__(self, image_paths, target_paths, train=True): # initial logic
happens like transform
self.image_paths = image_paths
self.target_paths = target_paths
self.transforms = transforms.ToTensor()
def __getitem__(self, index):
image = Image.open(self.image_paths[index])
mask = Image.open(self.target_paths[index])
t_image = self.transforms(image)
return t_image, mask
def __len__(self): # return count of sample we have
return len(self.image_paths)
train_dataset = CustomDataset(train_image_paths, train_mask_paths, train=True)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=True, num_workers=1)
test_dataset = CustomDataset(test_image_paths, test_mask_paths, train=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=4, shuffle=False, num_workers=1)

Categories