I am working on a violence detection service. I am trying to develop software based on the code in this repo. My dataset consists of videos resided in two directories "Violence" and "Non-Violence".
I used this code to generate npy files out of RGB channels and optical flow features. The output of this part would be 2 folders containing npy array with 244x244x5 shape. (np.float32 dtype). so it's like I have video frames in RGB in the first 3 channels (npy[...,:3]) and optical flow features in the next two channels (npy[..., 3:]).
Now I am trying to convert them to tfrecords and use tf.data.tfrecorddataset to speed up the training process. Since my model input has to be a cube tensor, my training elements has to be 64 frames of each video. It means the data point shape has to be 64x244x244x5.
So I used this code to convert the npy files to tfrecords.
from pathlib import Path
from os.path import join
import tensorflow as tf
import numpy as np
import cv2
from tqdm import tqdm
def normalize(data):
mean = np.mean(data)
std = np.std(data)
return (data - mean) / std
def random_flip(video, prob):
s = np.random.rand()
if s < prob:
video = np.flip(m=video, axis=2)
return video
def color_jitter(video):
# range of s-component: 0-1
# range of v component: 0-255
s_jitter = np.random.uniform(-0.2, 0.2)
v_jitter = np.random.uniform(-30, 30)
for i in range(len(video)):
hsv = cv2.cvtColor(video[i], cv2.COLOR_RGB2HSV)
s = hsv[..., 1] + s_jitter
v = hsv[..., 2] + v_jitter
s[s < 0] = 0
s[s > 1] = 1
v[v < 0] = 0
v[v > 255] = 255
hsv[..., 1] = s
hsv[..., 2] = v
video[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return video
def uniform_sample(video: str, target_frames: int = 64) -> np.ndarray:
"""
gets video and outputs n_frames number of frames in video.
Args:
video:
target_frames:
Returns:
"""
len_frames = int(len(data))
interval = int(np.ceil(len_frames / target_frames))
# init empty list for sampled video and
sampled_video = []
for i in range(0, len_frames, interval):
sampled_video.append(video[i])
# calculate number of padded frames and fix it
num_pad = target_frames - len(sampled_video)
if num_pad > 0:
padding = [video[i] for i in range(-num_pad, 0)]
sampled_video += padding
return np.array(sampled_video, dtype=np.float32)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
if __name__ == '__main__':
path = Path('transformed/')
npy_files = list(path.rglob('*.npy'))[:100]
aug = True
# one_hots = to_categorical(range(2), dtype=np.int8)
path_to_save = 'data_tfrecords'
tfrecord_path = join(path_to_save, 'all_data.tfrecord')
with tf.io.TFRecordWriter(tfrecord_path) as writer:
for file in tqdm(npy_files, desc='files converted'):
# load npy files
npy = np.load(file.as_posix(), mmap_mode='r')
data = np.float32(npy)
del npy
# Uniform sampling
data = uniform_sample(data, target_frames=64)
# Add augmentation
if aug:
data[..., :3] = color_jitter(data[..., :3])
data = random_flip(data, prob=0.5)
# Normalization
data[..., :3] = normalize(data[..., :3])
data[..., 3:] = normalize(data[..., 3:])
# Label one hot encoding
label = 1 if file.parent.stem.startswith('F') else 0
# label = one_hots[label]
feature = {'image': _bytes_feature(tf.compat.as_bytes(data.tobytes())),
'label': _int64_feature(int(label))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
The code works fine, but the real problem is that it consumes too much disk drive. my whole dataset consisting of 2000 videos takes 12 GB, when I converted them to npy files, it became around 80 GB, and now using tfrecords It became over 120 GB or so. How can I convert them in an efficient way to reduce the space required to store them?
The answer might be too late. But I see you are still saving the video frame in your tfrecords file.
Try removing the "image" feature from your features list. And saving per frame as their Height, Width, Channels, and so forth.
feature = {'label': _int64_feature(int(label))}
Which is why the file is taking more space.
I am trying to work on an image colorizer using autoencoders. The 'input' is a grayscale image and the 'labels' are their corresponding color images. I am trying to get it to work on google colab on a subset of the dataset. The problem is that the session crashes when I try to convert the list of images to a numpy array.
Here's what I tried:
X = []
y = []
errors = 0
count = 0
image_path = 'drive/My Drive/datasets/imagenette'
for file in tqdm(os.listdir(image_path)):
try:
# Load, transform image
color_image = io.imread(os.path.join(image_path,file))
color_image = transform.resize(color_image,(224,224))
lab_image = color.rgb2lab(color_image)
L = lab_image[:,:,0]/100.
ab = lab_image[:,:,1:]/128.
# Append to list
gray_scale_image = np.stack((L,L,L), axis=2)
X.append(gray_scale_image)
y.append(ab)
count += 1
if count == 5000:
break
except:
errors += 1
print(f'Errors encountered: {errors}')
X = np.array(X)
y = np.array(y)
print(X.shape)
print(y.shape)
Is there a way to load only a few batches, feed them and while the model is being trained on them, load another set of batches?
I want to use SimpleITK or wedpy to convert the 3d images into 2d images.
Or i want to get a three-dimensional matrix, and then i divide the three-dimensional matrix into some two-dimensional matrices.
import SimpleITK as ITK
import numpy as np
#from medpy.io import load
url=r'G:\path\to\my.mha'
image = ITK.ReadImage(url)
frame_num, width, height = image_array.shape
print(frame_num,width,height)
Then only get it:155 240 240
but i want to get [[1,5,2,3,1...],[54,1,3,5...],[5,8,9,6....]]
Just to add to Dave Chen's answer, as it is unclear if you want to get a set of 2D SimpleITK images or numpy arrays. The following code covers all three available options:
import SimpleITK as sitk
import numpy as np
url = "my_file.mha"
image = sitk.ReadImage(url)
max_index = image.GetDepth() # or image.GetWidth() or image.GetHeight() depending on the axis along which you want to extract
# As list of 2D SimpleITK images
list_of_2D_images = [image[:,:,i] for i in range(max_index)]
# As list of 2D numpy arrays which cannot be modified (no data copied)
list_of_2D_images_np_view = [sitk.GetArrayViewFromImage(image[:,:,i]) for i in range(max_index)]
# As list of 2D numpy arrays (data copied to numpy array)
list_of_2D_images_np = [sitk.GetArrayFromImage(image[:,:,i]) for i in range(max_index)]
Also, if you really want to work with URLs and not local files I would suggest looking at the remote download approach used in the SimpleITK notebooks repository, the relevant file is downloaddata.py.
That's not a big deal.
CT images have originally all numbers in int16 type so you don't need to handle float numbers.. In this case, we can think that we can easily change from int16 to uint16 only removing negative values in the image (CT images have some negative numbers as pixel values). Note that we really need uint16, or uint8 type so that OpenCV can handle it... as we have a lot of values in the CT image array, the best choice is uint16, so that we don't lose too much precision.
Ok, now you just need to do as follows:
import SimpleITK as sitk
import numpy as np
import cv2
mha = sitk.ReadImage('/mha/directory') #Importing mha file
array = sitk.GetArrayFromImage(mha) #Converting to array int16 (default)
#Translating each slice to the positive side
for m in range(array.shape[0]):
array[m] = array[m] + abs(np.min(array[m]))
array = np.around(array, decimals=0) #remove any float numbers if exists.. probably not
array = np.asarray(array, dtype='uint16') #From int16 to uint16
After these steps the array is just ready to be saved as png images using opencv.imwrite module:
for image in array:
cv2.imwrite('/dir/to/save/'+'name_image.png', image)
Note that by default SimpleITK handles .mha files by the axial view. I really don't know how to change it because I've never needed it before. Anyway, in this case with some searches you can find something.
I'm not sure exactly what you want to get. But it's easy to extract a 2d slice from a 3d image in SimpleITK.
To get a Z slice where Z=100 you can do this:
zslice = image[100]
To get a Y slice for Y=100:
yslice = image[:, 100]
And a X slice for X=100:
xslice = image[:, :, 100]
#zivy#Dave Chen
I've solved my problem.In fact, running this code will give you 150 240*240 PNG pictures.It's i want to get.
# -*- coding:utf-8 -*-
import numpy as np
import subprocess
import random
import progressbar
from glob import glob
from skimage import io
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
class BrainPipeline(object):
'''
A class for processing brain scans for one patient
INPUT: (1) filepath 'path': path to directory of one patient. Contains following mha files:
flair, t1, t1c, t2, ground truth (gt)
(2) bool 'n4itk': True to use n4itk normed t1 scans (defaults to True)
(3) bool 'n4itk_apply': True to apply and save n4itk filter to t1 and t1c scans for given patient. This will only work if the
'''
def __init__(self, path, n4itk = True, n4itk_apply = False):
self.path = path
self.n4itk = n4itk
self.n4itk_apply = n4itk_apply
self.modes = ['flair', 't1', 't1c', 't2', 'gt']
# slices=[[flair x 155], [t1], [t1c], [t2], [gt]], 155 per modality
self.slices_by_mode, n = self.read_scans()
# [ [slice1 x 5], [slice2 x 5], ..., [slice155 x 5]]
self.slices_by_slice = n
self.normed_slices = self.norm_slices()
def read_scans(self):
'''
goes into each modality in patient directory and loads individual scans.
transforms scans of same slice into strip of 5 images
'''
print('Loading scans...')
slices_by_mode = np.zeros((5, 155, 240, 240))
slices_by_slice = np.zeros((155, 5, 240, 240))
flair = glob(self.path + '/*Flair*/*.mha')
t2 = glob(self.path + '/*_T2*/*.mha')
gt = glob(self.path + '/*more*/*.mha')
t1s = glob(self.path + '/**/*T1*.mha')
t1_n4 = glob(self.path + '/*T1*/*_n.mha')
t1 = [scan for scan in t1s if scan not in t1_n4]
scans = [flair[0], t1[0], t1[1], t2[0], gt[0]] # directories to each image (5 total)
if self.n4itk_apply:
print('-> Applyling bias correction...')
for t1_path in t1:
self.n4itk_norm(t1_path) # normalize files
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
elif self.n4itk:
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
for scan_idx in xrange(5):
# read each image directory, save to self.slices
slices_by_mode[scan_idx] = io.imread(scans[scan_idx], plugin='simpleitk').astype(float)
for mode_ix in xrange(slices_by_mode.shape[0]): # modes 1 thru 5
for slice_ix in xrange(slices_by_mode.shape[1]): # slices 1 thru 155
slices_by_slice[slice_ix][mode_ix] = slices_by_mode[mode_ix][slice_ix] # reshape by slice
return slices_by_mode, slices_by_slice
def norm_slices(self):
'''
normalizes each slice in self.slices_by_slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
if n4itk == True, will apply n4itk bias correction to T1 and T1c images
'''
print('Normalizing slices...')
normed_slices = np.zeros((155, 5, 240, 240))
for slice_ix in xrange(155):
normed_slices[slice_ix][-1] = self.slices_by_slice[slice_ix][-1]
for mode_ix in xrange(4):
normed_slices[slice_ix][mode_ix] = self._normalize(self.slices_by_slice[slice_ix][mode_ix])
print('Done.')
return normed_slices
def _normalize(self, slice):
'''
INPUT: (1) a single slice of any given modality (excluding gt)
(2) index of modality assoc with slice (0=flair, 1=t1, 2=t1c, 3=t2)
OUTPUT: normalized slice
'''
b, t = np.percentile(slice, (0.5,99.5))
slice = np.clip(slice, b, t)
if np.std(slice) == 0:
return slice
else:
return (slice - np.mean(slice)) / np.std(slice)
def save_patient(self, reg_norm_n4, patient_num):
'''
INPUT: (1) int 'patient_num': unique identifier for each patient
(2) string 'reg_norm_n4': 'reg' for original images, 'norm' normalized images, 'n4' for n4 normalized images
OUTPUT: saves png in Norm_PNG directory for normed, Training_PNG for reg
'''
print('Saving scans for patient {}...'.format(patient_num))
progress.currval = 0
if reg_norm_n4 == 'norm': #saved normed slices
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('Norm_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
elif reg_norm_n4 == 'reg':
for slice_ix in progress(xrange(155)):
strip = self.slices_by_slice[slice_ix].reshape(1200, 240)
if np.max(strip) != 0:
strip /= np.max(strip)
io.imsave('Training_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
else:
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('n4_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
def n4itk_norm(self, path, n_dims=3, n_iters='[20,20,10,5]'):
'''
INPUT: (1) filepath 'path': path to mha T1 or T1c file
(2) directory 'parent_dir': parent directory to mha file
OUTPUT: writes n4itk normalized image to parent_dir under orig_filename_n.mha
'''
output_fn = path[:-4] + '_n.mha'
# run n4_bias_correction.py path n_dim n_iters output_fn
subprocess.call('python n4_bias_correction.py ' + path + ' ' + str(n_dims) + ' ' + n_iters + ' ' + output_fn, shell = True)
def save_patient_slices(patients, type):
'''
INPUT (1) list 'patients': paths to any directories of patients to save. for example- glob("Training/HGG/**")
(2) string 'type': options = reg (non-normalized), norm (normalized, but no bias correction), n4 (bias corrected and normalized)
saves strips of patient slices to approriate directory (Training_PNG/, Norm_PNG/ or n4_PNG/) as patient-num_slice-num
'''
for patient_num, path in enumerate(patients):
a = BrainPipeline(path)
a.save_patient(type, patient_num)
def s3_dump(directory, bucket):
'''
dump files from a given directory to an s3 bucket
INPUT (1) string 'directory': directory containing files to save
(2) string 'bucket': name od s3 bucket to dump files
'''
subprocess.call('aws s3 cp' + ' ' + directory + ' ' + 's3://' + bucket + ' ' + '--recursive')
def save_labels(fns):
'''
INPUT list 'fns': filepaths to all labels
'''
progress.currval = 0
for label_idx in progress(range(len(labels))):
slices = io.imread(labels[label_idx], plugin = 'simpleitk')
for slice_idx in range(len(slices)):
io.imsave(r'{}_{}L.png'.format(label_idx, slice_idx), slices[slice_idx])
if __name__ == '__main__':
url = r'G:\work\deeplearning\BRATS2015_Training\HGG\brats_2013_pat0005_1\VSD.Brain.XX.O.MR_T1.54537\VSD.Brain.XX.O.MR_T1.54537.mha'
labels = glob(url)
save_labels(labels)
# patients = glob('Training/HGG/**')
# save_patient_slices(patients, 'reg')
# save_patient_slices(patients, 'norm')
# save_patient_slices(patients, 'n4')
# s3_dump('Graveyard/Training_PNG/', 'orig-training-png')
I couldn't find the solution.My image shape is 128*128*3,it has three channel,but it also cause the error
File "E:/ML/keras_test/vgg.py", line 30, in load_data data[i,:,:,:] = arr
ValueError: could not broadcast input array from shape (128,128) into
shape (128,128,3)
My code as below:
def load_data(path):
data = np.empty((12755,128,128,3),dtype="uint8")
label = np.empty((12755,),dtype="uint8")
imgs = []
imgs_name = []
for each_person in os.listdir(path):
temp = os.path.join(path,each_person)
for each_image in os.listdir(temp):
imgs.append(temp + "\\" + each_image)
imgs_name.append(each_image)
num = len(imgs)
for i in range(num):
img = Image.open(imgs[i])
arr = np.asarray(img,dtype="uint8")
print arr.shape
data[i,:,:,:] = arr
label[i] = int(imgs_name[i].split('.')[0])
print 'load_data is ok!' + str(data.shape[0])
return data,label
You are trying to put the original data into a small package, which is impossible. I think you are trying to transfer a image which has RGB channel into a gray scale image which has one channel. Try
datum = (imgs.sum(axis=2) / 3).reshape((12755, -1))
The resulting datum is a 12755 x 16384 array.
I'm trying to build a numpy array of arrays of arrays with the following code below.
Which gives me a
ValueError: setting an array element with a sequence.
My guess is that in numpy I need to declare the arrays as multi-dimensional from the beginning, but I'm not sure..
How can I fix the the code below so that I can build array of array of arrays?
from PIL import Image
import pickle
import os
import numpy
indir1 = 'PositiveResize'
trainimage = numpy.empty(2)
trainpixels = numpy.empty(80000)
trainlabels = numpy.empty(80000)
validimage = numpy.empty(2)
validpixels = numpy.empty(10000)
validlabels = numpy.empty(10000)
testimage = numpy.empty(2)
testpixels = numpy.empty(10408)
testlabels = numpy.empty(10408)
i=0
tr=0
va=0
te=0
for (root, dirs, filenames) in os.walk(indir1):
print 'hello'
for f in filenames:
try:
im = Image.open(os.path.join(root,f))
Imv=im.load()
x,y=im.size
pixelv = numpy.empty(6400)
ind=0
for i in range(x):
for j in range(y):
temp=float(Imv[j,i])
temp=float(temp/255.0)
pixelv[ind]=temp
ind+=1
if i<40000:
trainpixels[tr]=pixelv
tr+=1
elif i<45000:
validpixels[va]=pixelv
va+=1
else:
testpixels[te]=pixelv
te+=1
print str(i)+'\t'+str(f)
i+=1
except IOError:
continue
trainimage[0]=trainpixels
trainimage[1]=trainlabels
validimage[0]=validpixels
validimage[1]=validlabels
testimage[0]=testpixels
testimage[1]=testlabels
Don't try to smash your entire object into a numpy array. If you have distinct things, use a numpy array for each one then use an appropriate data structure to hold them together.
For instance, if you want to do computations across images then you probably want to just store the pixels and labels in separate arrays.
trainpixels = np.empty([10000, 80, 80])
trainlabels = np.empty(10000)
for i in range(10000):
trainpixels[i] = ...
trainlabels[i] = ...
To access an individual image's data:
imagepixels = trainpixels[253]
imagelabel = trainlabels[253]
And you can easily do stuff like compute summary statistics over the images.
meanimage = np.mean(trainpixels, axis=0)
meanlabel = np.mean(trainlabels)
If you really want all the data to be in the same object, you should probably use a struct array as Eelco Hoogendoorn suggests. Some example usage:
# Construction and assignment
trainimages = np.empty(10000, dtype=[('label', np.int), ('pixel', np.int, (80,80))])
for i in range(10000):
trainimages['label'][i] = ...
trainimages['pixel'][i] = ...
# Summary statistics
meanimage = np.mean(trainimages['pixel'], axis=0)
meanlabel = np.mean(trainimages['label'])
# Accessing a single image
image = trainimages[253]
imagepixels, imagelabel = trainimages[['pixel', 'label']][253]
Alternatively, if you want to process each one separately, you could store each image's data in separate arrays and bind them together in a tuple or dictionary, then store all of that in a list.
trainimages = []
for i in range(10000):
pixels = ...
label = ...
image = (pixels, label)
trainimages.append(image)
Now to access a single images data:
imagepixels, imagelabel = trainimages[253]
This makes it more intuitive to access a single image, but because all the data is not in one big numpy array you don't get easy access to functions that work across images.
Refer to the examples in numpy.empty:
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #random
Give your images a shape with the N dimensions:
testpixels = numpy.empty([96, 96])