My code below is intended to get a batch of images and convert them to RGB. But I keep getting an error which says to convert to type uint8. I have seen other questions regarding the conversion to uint8, but none directly from an array to uint8. Any advice on how to make that happen is welcome, thank you!
from skimage import io
import numpy as np
import glob, os
from tkinter import Tk
from tkinter.filedialog import askdirectory
import cv2
# wavelength in microns
MWIR = 4.5
R = .692
G = .582
B = .140
rgb_sum = R + G + B;
NRed = R/rgb_sum;
NGreen = G/rgb_sum;
NBlue = B/rgb_sum;
path = askdirectory(title='Select PNG Folder') # shows dialog box and return the path
outpath = askdirectory(title='Select SAVE Folder')
for file in os.listdir(path):
if file.endswith(".png"):
imIn = io.imread(os.path.join(path, file))
imOut = np.zeros(imIn.shape)
for i in range(imIn.shape[0]): # Assuming Rayleigh-Jeans law
for j in range(imIn.shape[1]):
imOut[i,j,0] = imIn[i,j,0]/((NRed/MWIR)**4)
imOut[i,j,1] = imIn[i,j,0]/((NGreen/MWIR)**4)
imOut[i,j,2] = imIn[i,j,0]/((NBlue/MWIR)**4)
io.imsave(os.path.join(outpath, file) + '_RGB.png', imOut)
the code I am trying to integrate into my own (found in another thread, used to convert type to uint8) is:
info = np.iinfo(data.dtype) # Get the information of the incoming image type
data = data.astype(np.float64) / info.max # normalize the data to 0 - 1
data = 255 * data # Now scale by 255
img = data.astype(np.uint8)
cv2.imshow("Window", img)
thank you!
Normally imInt is of type uint8, after your normalisation it is of type float32 because of the casting cause by the division. you must convert back to uint8 before saving to PNG file:
io.imsave(os.path.join(outpath, file) + '_RGB.png', imOut.astype(np.uint8))
Note that the two loops are not necessary, you can use numpy vector operations instead:
MWIR = 4.5
R = .692
G = .582
B = .140
vector = [R, G, B]
vector = vector / vector.sum()
vector = vector / MWIR
vector = np.pow(vector, 4)
for file in os.listdir(path):
if file.endswith((".png"):
imgIn = ...
imgOut = imgIn * vector
io.imsave(
os.path.join(outpath, file) + '_RGB.png',
imgOut.astype(np.uint8))
Related
I'm wondering if it is possible to vectorise the nested for loop in my code to speed up the code. I am basically trying to split (or trim) an image into smaller bits.
import numpy as np
import os
from PIL import Image
image_path = 'sample.png'
number_of_rows = 4
input_np_arr_image = np.asarray(Image.open(image_path))
height, width = input_np_arr_image.shape[0:2]
height_trimmed = (height // number_of_rows) * number_of_rows
width_trimmed = (width // number_of_rows) * number_of_rows
trimmed_image = input_np_arr_image[:height_trimmed, :width_trimmed]
image_pieces = [np.hsplit(segment, 4)
for segment in np.vsplit(trimmed_image, 4)
]
I then try to save the result using this nested for loop
for row in range(len(image_pieces)):
for col in range(len(image_pieces[row])):
# create output image name
segment_name = f"{image_path[:len(image_path)-4]}_{row}_{col}{image_path[-4:]}"
# convert array to image
label_image = Image.fromarray(image_pieces[row][col])
# make output directory for saving
output_directory = "split_images"
os.makedirs(output_directory, exist_ok = True)
label_image.save(output_directory +"/"+ segment_name)
I'm curious if it's possible to do this without using the nested for loops. Thanks
I have a code for finding entropy info of an image by resizing the image and dividing it into its RGB channels.
import os
from PIL import Image
import numpy as np
from scipy.misc import imread
import cv2
import imageio
#读取RGB图像
def openRGB(image_path):
f = open(image_path,"rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data =openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2;
However, whenever I run the getentropy() function on a given image it keeps giving back this error
ValueError: cannot reshape array of size 37048 into shape (65536,3)
Any idea how I can reformat the image to fit that array shape?
There is a simple explanation: image_path length in bytes is only 37048.
When using np.array(data).reshape((256*256, 3)), the length of data must be 256*256*3 = 196608 bytes.
You are getting an exception because the lengths do not match.
It's simple to reproduce the problem.
Create an input sample file in size 196608 bytes, and there is no exception.
Create an input sample file in size 37048 bytes, get an exception.
Here is a code sample that reproduces the problem:
import os
#from PIL import Image
import numpy as np
#from scipy.misc import imread
#import cv2
#import imageio
def openRGB(image_path):
f = open(image_path, "rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data = openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2
# Create a binary file with random bytes
image_path = 'tmp.bin'
# When n_bytes=196608, there is no exception.
################################################################################
n_bytes = 256*256*3
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
# When n_bytes=37048, an exception is raised: ValueError: cannot reshape array of size 37048 into shape (65536,3)
################################################################################
n_bytes = 37048
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
Why are you reading 37048 bytes instead of 196608 bytes?
image_path is a JPEG image file, and you are reading the image using f = open(image_path, "rb") and data = f.read().
You may read and reshape the image as follows:
import cv2
def openRGB(image_path):
# For example: image_path = 'img.jpg'
img = cv2.imread(image_path) # Read image in BGR color format.
data = np.array(img).reshape(img.shape[0]*img.shape[1], 3) # Reshape to rows x cols x 3 (Blue in data[:, 0], green in data[:, 1], red in data[:, 2]).
return data
In the above example I used img.shape[0]*img.shape[1], the image resolution is:
height = img.shape[0]
width = img.shape[1]
I'm trying to sort an image by luminosity using NumPy, which I'm new to. I've managed to create a random image and sort it.
def create_image(output, width, height, arr):
array = np.zeros([height, width, 3], dtype=np.uint8)
numOfSwatches = len(arr)
swatchWidth = int(width/ numOfSwatches)
for i in range (0, numOfSwatches):
m = i * swatchWidth
r = (i+1) * swatchWidth
array[:, m:r] = arr[i]
img = Image.fromarray(array)
img.save(output)
Which creates this image:
So far so good. Only now I want to switch from creating random images to loading them and then sorting them.
#!/usr/bin/python3
import numpy as np
from PIL import Image
# --------------------------------------------------------------
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype = "int32" )
return data
# --------------------------------------------------------------
def lum (r,g,b):
return math.sqrt( .241 * r + .691 * g + .068 * b )
myImageFile = "random_colours.png"
imageNP = load_image(myImageFile)
imageNP.sort(key=lambda rgb: lum(*rgb) )
The image should look like this:
The error I get is TypeError: 'key' is an invalid keyword argument for this function I may have created the NP array incorrectly as it worked when it was a random NP array.
Have not ever used PIL, but the following approach hopefully works (I'm not sure as I can't reproduce your exact examples), and of course there might be more efficient ways to do so.
I'm using your functions, having changed the math.sqrt function to np.sqrt in the lum function - as it is better for vector calculations. By the way, I believe this won't work with an int32 type array (as in your load_image function).
The key part is Numpy's argsort function (last line), which gives the indices that would sort the given array; this is applied to a row of the luminosity array (exploiting simmetry) and later used as indexer of img_array.
# Create random image
np.random.seed(4)
img = create_image('test.png', 75, 75, np.random.random((25,3))*255)
# Convert to Numpy array and calculate luminosity
img_array = np.array(img, dtype = np.uint8)
luminosity = lum(img_array[...,0], img_array[...,1], img_array[...,2])
# Sort by luminosity and convert to image again
img_sorted = Image.fromarray(img_array[:,luminosity[0].argsort()])
The original picture:
And the luminosity-sorted one:
I am new to python and Machine Learning. I have a huge image dataset of cars having more than 27000 images and labels. I am trying to create a dataset so I can use it in my training classifier, but ofcourse handling this amount of data will be a real pain for the Memory, and that's where I am stuck. At first I was trying to do something like this.
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import cv2
import gc
import numpy as np
from sklearn.preprocessing import normalize
import gc
import resource
import h5py
bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"
training_data = []
training_labels = []
count = 0
for root, _, files in os.walk(bbox):
cdp = os.path.abspath(root)
for rootImage , _ , fileImage in os.walk(imagepath):
cdpimg = os.path.abspath(r)
for f in files:
ct = 0
name,ext = os.path.splitext(f)
for fI in fileImage:
n , e = os.path.splitext(fI)
if name == n and ext == ".txt" and e == ".jpg":
cip = os.path.join(cdp,f)
cipimg = os.path.join(cdpimg,fI)
txt = open(cip,"r")
for q in txt:
ct = ct + 1
if ct == 3:
x1 = int(q.rsplit(' ')[0])
y1 = int(q.rsplit(' ')[1])
x2 = int(q.rsplit(' ')[2])
y2 = int(q.rsplit(' ')[3])
try:
read_img = mpg.imread(cipimg)
read_img = read_img.astype('float32')
read_img_bbox = read_img[y1:y2, x1:x2,:]
resize_img = cv2.cv2.resize(read_img_bbox,(300,300))
resize_img /= 255.0
training_labels.append(int(cipimg.split('\\')[4]))
training_data.append(resize_img)
print("len Of Training_data",len(training_data))
training_labels.append(int(cipimg.split('/')[8]))
del resize_img
print("len Of Training Labels", len(training_labels))
gc.collect()
except Exception as e:
print("Error",str(e), cip)
count = count + 1
print(count)
txt.flush()
txt.close()
np.save('/run/media/fdai5182/LAMA MADAN/Training_Data_4000Samples',training_data)
np.save('/run/media/fdai5182/LAMA MADAN/Training_Labels_4000Samples',training_labels)
print("DONE")
But it always gives me a huge Memory error after reading images even on 32gb RAM.
So, for that I want to do some other steps which may be useful taking less memory and get this working.
The Steps I want to do are as follows.
allocate np array X of shape N,150,150,3/300,300,3 of type
float32 (not astype)
iterate through images and fill each row of array X with 150,150,3 image pixels
normalize in-place: X /= 255
Write in file (.npy format)
What I did till now is
import cv2
import matplotlib.pyplot as plt
import matplotlib.iamge as mpg
import numpy as np
bbox = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/labels"
imagepath = "/run/media/fdai5182/LAMAMADAN/Morethan4000samples/data/image"
for root, _, files in os.walk(bbox):
cdp = os.path.abspath(root)
for rootImage, _, fileImage in os.walk(imagepath):
cdpimg = os.path.abspath(rootImage)
for f in files:
ct = 0
name,ext = os.path.splitext(f)
for fI in fileImage:
n , e = os.path.splitext(fI)
if name == n and ext == ".txt" and e == ".jpg":
nparrayX = np.zeros((150,150,3)).view('float32')
cip = os.path.join(cdp,f)
cipImg = os.path.join(cdpimg,fI)
read_image = mpg.imread(cip)
resize_image = cv2.cv2.resize(read_image,(150,150))
Am I on the right path?
Also, How can I fill each row of imageformat with 150,150,3 image pixels. I don't want to use list anymore as they take more Memory and time consuming.
Please help me through this.
Also, as a new member if the question is not obeying the rules and regulations of StackOverflow please tell me and I will edit it more.
Thank you,
Both tensorflow/keras and pytorch provide data set / generator classes, which you can use to construct memory efficient data loaders.
For tensorflow/keras there is an excellent tutorial created by Stanford's Shervine Amidi.
For pytorch you find a good tutorial on the project's man page.
I would strongly suggest to make use of these frameworks for your implementation since they allow you to avoid writing boiler-plate code and make your training scalable.
Thank you for your help . But I wanted to do it manually to check How can we do it without using other generators. Below is my Code.
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpg
import numpy as np
import os
N = 0
training_labels = []
bbox = "D:/Morethan4000samples/data/labels"
imagepath = "D:/Morethan4000samples/data/image/"
for root, _, files in os.walk(imagepath):
cdp = os.path.abspath(root)
for f in files:
name, ext = os.path.splitext(f)
if ext == ".jpg":
cip = os.path.join(cdp,f)
N += 1
print(N)
imageX = np.zeros((N,227,227,3), dtype='float32')
i = 0
for root, _ , files in os.walk(imagepath):
cdp = os.path.abspath(root)
print(cdp)
for f in files:
ct = 0
name, ext = os.path.splitext(f)
if ext == ".jpg":
cip = os.path.join(cdp,f)
read = mpg.imread(cip)
cipLabel = cip.replace('image','labels')
cipLabel = cipLabel.replace('.jpg','.txt')
nameL , extL = os.path.splitext(cipLabel)
if extL == '.txt':
boxes = open(cipLabel, 'r')
for q in boxes:
ct = ct + 1
if ct == 3:
x1 = int(q.rsplit(' ')[0])
y1 = int(q.rsplit(' ')[1])
x2 = int(q.rsplit(' ')[2])
y2 = int(q.rsplit(' ')[3])
readimage = read[y1:y2, x1:x2]
resize = cv2.cv2.resize(readimage,(227,227))
resize = cv2.cv2.GaussianBlur(resize, (5,5),0)
imageX[i] = resize
#training_labels.append(int(cip.split('\\')[4]))
training_labels.append(int(cip.split('/')[8]))
print(len(training_labels), len(imageX))
i += 1
print(i)
imageX /= 255.0
plt.imshow(imageX[10])
plt.show()
print(imageX.shape)
print(len(training_labels))
np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/training_images", imageX)
np.save("/run/media/fdai5182/LAMA MADAN/Morethan4000samples/227227/trainin_labels",training_labels)
To save each of your image in a row of matrix of same dimensions is the most efficient way to do that.
I want to use SimpleITK or wedpy to convert the 3d images into 2d images.
Or i want to get a three-dimensional matrix, and then i divide the three-dimensional matrix into some two-dimensional matrices.
import SimpleITK as ITK
import numpy as np
#from medpy.io import load
url=r'G:\path\to\my.mha'
image = ITK.ReadImage(url)
frame_num, width, height = image_array.shape
print(frame_num,width,height)
Then only get it:155 240 240
but i want to get [[1,5,2,3,1...],[54,1,3,5...],[5,8,9,6....]]
Just to add to Dave Chen's answer, as it is unclear if you want to get a set of 2D SimpleITK images or numpy arrays. The following code covers all three available options:
import SimpleITK as sitk
import numpy as np
url = "my_file.mha"
image = sitk.ReadImage(url)
max_index = image.GetDepth() # or image.GetWidth() or image.GetHeight() depending on the axis along which you want to extract
# As list of 2D SimpleITK images
list_of_2D_images = [image[:,:,i] for i in range(max_index)]
# As list of 2D numpy arrays which cannot be modified (no data copied)
list_of_2D_images_np_view = [sitk.GetArrayViewFromImage(image[:,:,i]) for i in range(max_index)]
# As list of 2D numpy arrays (data copied to numpy array)
list_of_2D_images_np = [sitk.GetArrayFromImage(image[:,:,i]) for i in range(max_index)]
Also, if you really want to work with URLs and not local files I would suggest looking at the remote download approach used in the SimpleITK notebooks repository, the relevant file is downloaddata.py.
That's not a big deal.
CT images have originally all numbers in int16 type so you don't need to handle float numbers.. In this case, we can think that we can easily change from int16 to uint16 only removing negative values in the image (CT images have some negative numbers as pixel values). Note that we really need uint16, or uint8 type so that OpenCV can handle it... as we have a lot of values in the CT image array, the best choice is uint16, so that we don't lose too much precision.
Ok, now you just need to do as follows:
import SimpleITK as sitk
import numpy as np
import cv2
mha = sitk.ReadImage('/mha/directory') #Importing mha file
array = sitk.GetArrayFromImage(mha) #Converting to array int16 (default)
#Translating each slice to the positive side
for m in range(array.shape[0]):
array[m] = array[m] + abs(np.min(array[m]))
array = np.around(array, decimals=0) #remove any float numbers if exists.. probably not
array = np.asarray(array, dtype='uint16') #From int16 to uint16
After these steps the array is just ready to be saved as png images using opencv.imwrite module:
for image in array:
cv2.imwrite('/dir/to/save/'+'name_image.png', image)
Note that by default SimpleITK handles .mha files by the axial view. I really don't know how to change it because I've never needed it before. Anyway, in this case with some searches you can find something.
I'm not sure exactly what you want to get. But it's easy to extract a 2d slice from a 3d image in SimpleITK.
To get a Z slice where Z=100 you can do this:
zslice = image[100]
To get a Y slice for Y=100:
yslice = image[:, 100]
And a X slice for X=100:
xslice = image[:, :, 100]
#zivy#Dave Chen
I've solved my problem.In fact, running this code will give you 150 240*240 PNG pictures.It's i want to get.
# -*- coding:utf-8 -*-
import numpy as np
import subprocess
import random
import progressbar
from glob import glob
from skimage import io
np.random.seed(5) # for reproducibility
progress = progressbar.ProgressBar(widgets=[progressbar.Bar('*', '[', ']'), progressbar.Percentage(), ' '])
class BrainPipeline(object):
'''
A class for processing brain scans for one patient
INPUT: (1) filepath 'path': path to directory of one patient. Contains following mha files:
flair, t1, t1c, t2, ground truth (gt)
(2) bool 'n4itk': True to use n4itk normed t1 scans (defaults to True)
(3) bool 'n4itk_apply': True to apply and save n4itk filter to t1 and t1c scans for given patient. This will only work if the
'''
def __init__(self, path, n4itk = True, n4itk_apply = False):
self.path = path
self.n4itk = n4itk
self.n4itk_apply = n4itk_apply
self.modes = ['flair', 't1', 't1c', 't2', 'gt']
# slices=[[flair x 155], [t1], [t1c], [t2], [gt]], 155 per modality
self.slices_by_mode, n = self.read_scans()
# [ [slice1 x 5], [slice2 x 5], ..., [slice155 x 5]]
self.slices_by_slice = n
self.normed_slices = self.norm_slices()
def read_scans(self):
'''
goes into each modality in patient directory and loads individual scans.
transforms scans of same slice into strip of 5 images
'''
print('Loading scans...')
slices_by_mode = np.zeros((5, 155, 240, 240))
slices_by_slice = np.zeros((155, 5, 240, 240))
flair = glob(self.path + '/*Flair*/*.mha')
t2 = glob(self.path + '/*_T2*/*.mha')
gt = glob(self.path + '/*more*/*.mha')
t1s = glob(self.path + '/**/*T1*.mha')
t1_n4 = glob(self.path + '/*T1*/*_n.mha')
t1 = [scan for scan in t1s if scan not in t1_n4]
scans = [flair[0], t1[0], t1[1], t2[0], gt[0]] # directories to each image (5 total)
if self.n4itk_apply:
print('-> Applyling bias correction...')
for t1_path in t1:
self.n4itk_norm(t1_path) # normalize files
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
elif self.n4itk:
scans = [flair[0], t1_n4[0], t1_n4[1], t2[0], gt[0]]
for scan_idx in xrange(5):
# read each image directory, save to self.slices
slices_by_mode[scan_idx] = io.imread(scans[scan_idx], plugin='simpleitk').astype(float)
for mode_ix in xrange(slices_by_mode.shape[0]): # modes 1 thru 5
for slice_ix in xrange(slices_by_mode.shape[1]): # slices 1 thru 155
slices_by_slice[slice_ix][mode_ix] = slices_by_mode[mode_ix][slice_ix] # reshape by slice
return slices_by_mode, slices_by_slice
def norm_slices(self):
'''
normalizes each slice in self.slices_by_slice, excluding gt
subtracts mean and div by std dev for each slice
clips top and bottom one percent of pixel intensities
if n4itk == True, will apply n4itk bias correction to T1 and T1c images
'''
print('Normalizing slices...')
normed_slices = np.zeros((155, 5, 240, 240))
for slice_ix in xrange(155):
normed_slices[slice_ix][-1] = self.slices_by_slice[slice_ix][-1]
for mode_ix in xrange(4):
normed_slices[slice_ix][mode_ix] = self._normalize(self.slices_by_slice[slice_ix][mode_ix])
print('Done.')
return normed_slices
def _normalize(self, slice):
'''
INPUT: (1) a single slice of any given modality (excluding gt)
(2) index of modality assoc with slice (0=flair, 1=t1, 2=t1c, 3=t2)
OUTPUT: normalized slice
'''
b, t = np.percentile(slice, (0.5,99.5))
slice = np.clip(slice, b, t)
if np.std(slice) == 0:
return slice
else:
return (slice - np.mean(slice)) / np.std(slice)
def save_patient(self, reg_norm_n4, patient_num):
'''
INPUT: (1) int 'patient_num': unique identifier for each patient
(2) string 'reg_norm_n4': 'reg' for original images, 'norm' normalized images, 'n4' for n4 normalized images
OUTPUT: saves png in Norm_PNG directory for normed, Training_PNG for reg
'''
print('Saving scans for patient {}...'.format(patient_num))
progress.currval = 0
if reg_norm_n4 == 'norm': #saved normed slices
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('Norm_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
elif reg_norm_n4 == 'reg':
for slice_ix in progress(xrange(155)):
strip = self.slices_by_slice[slice_ix].reshape(1200, 240)
if np.max(strip) != 0:
strip /= np.max(strip)
io.imsave('Training_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
else:
for slice_ix in progress(xrange(155)): # reshape to strip
strip = self.normed_slices[slice_ix].reshape(1200, 240)
if np.max(strip) != 0: # set values < 1
strip /= np.max(strip)
if np.min(strip) <= -1: # set values > -1
strip /= abs(np.min(strip))
# save as patient_slice.png
io.imsave('n4_PNG/{}_{}.png'.format(patient_num, slice_ix), strip)
def n4itk_norm(self, path, n_dims=3, n_iters='[20,20,10,5]'):
'''
INPUT: (1) filepath 'path': path to mha T1 or T1c file
(2) directory 'parent_dir': parent directory to mha file
OUTPUT: writes n4itk normalized image to parent_dir under orig_filename_n.mha
'''
output_fn = path[:-4] + '_n.mha'
# run n4_bias_correction.py path n_dim n_iters output_fn
subprocess.call('python n4_bias_correction.py ' + path + ' ' + str(n_dims) + ' ' + n_iters + ' ' + output_fn, shell = True)
def save_patient_slices(patients, type):
'''
INPUT (1) list 'patients': paths to any directories of patients to save. for example- glob("Training/HGG/**")
(2) string 'type': options = reg (non-normalized), norm (normalized, but no bias correction), n4 (bias corrected and normalized)
saves strips of patient slices to approriate directory (Training_PNG/, Norm_PNG/ or n4_PNG/) as patient-num_slice-num
'''
for patient_num, path in enumerate(patients):
a = BrainPipeline(path)
a.save_patient(type, patient_num)
def s3_dump(directory, bucket):
'''
dump files from a given directory to an s3 bucket
INPUT (1) string 'directory': directory containing files to save
(2) string 'bucket': name od s3 bucket to dump files
'''
subprocess.call('aws s3 cp' + ' ' + directory + ' ' + 's3://' + bucket + ' ' + '--recursive')
def save_labels(fns):
'''
INPUT list 'fns': filepaths to all labels
'''
progress.currval = 0
for label_idx in progress(range(len(labels))):
slices = io.imread(labels[label_idx], plugin = 'simpleitk')
for slice_idx in range(len(slices)):
io.imsave(r'{}_{}L.png'.format(label_idx, slice_idx), slices[slice_idx])
if __name__ == '__main__':
url = r'G:\work\deeplearning\BRATS2015_Training\HGG\brats_2013_pat0005_1\VSD.Brain.XX.O.MR_T1.54537\VSD.Brain.XX.O.MR_T1.54537.mha'
labels = glob(url)
save_labels(labels)
# patients = glob('Training/HGG/**')
# save_patient_slices(patients, 'reg')
# save_patient_slices(patients, 'norm')
# save_patient_slices(patients, 'n4')
# s3_dump('Graveyard/Training_PNG/', 'orig-training-png')