I am working with ct scans medical images in raw format. It is basically a 3d matrix of voxels (512*512*nb of slices). I'd like to extract each slice of the file into separate files.
import numpy as np
import matplotlib.pyplot as plt
# reading the raw image into a string. The image files can be found at:
# https://grand-challenge.org/site/anode09/details/
f = open('test01.raw', 'rb')
img_str = f.read()
# converting to a uint16 numpy array
img_arr = np.fromstring(img_str, np.uint16)
# get the first image and plot it
im1 = img_arr[0:512*512]
im1 = np.reshape(im1, (512, 512))
plt.imshow(im1, cmap=plt.cm.gray_r)
plt.show()
The result definitely looks like a chest ct scan, but the texture of the image is strange, as if the pixels were misplaced.
Some relevant info might be located in the associated .mhd info file, but I'm not sure where to look:
ObjectType = Image
NDims = 3
BinaryData = True
BinaryDataByteOrderMSB = False
CompressedData = False
TransformMatrix = 1 0 0 0 1 0 0 0 1
Offset = 0 0 0
CenterOfRotation = 0 0 0
AnatomicalOrientation = RPI
ElementSpacing = 0.697266 0.697266 0.7
DimSize = 512 512 459
ElementType = MET_SHORT
ElementDataFile = test01.raw
Try it this way:
Dim_size=np.array((512,512,459),dtype=np.int) #Or read that from your mhd info File
f = open(FileName,'rb') #only opens the file for reading
img_arr=np.fromfile(f,dtype=np.uint16)
img_arr=img_arr.reshape(Dim_size[0],Dim_size[1],Dim_size[2])
if you are Memory limited read the file in chunks
f = open(FileName,'rb') #only opens the file for reading
for i in range(0,Dim_size[2]):
img_arr=np.fromfile(f,dtype=np.uint16,count=Dim_size[0]*Dim_size[1])
img=img.reshape(Dim_size[0],Dim_size[1])
#Do something with the Slice
A good way to show what's actually in the raw- File would also be to read it in ImageJ. For reading such ITK compatible files, there is even a PlugIn available, but direct raw import should also work.
https://imagej.net/Welcome
http://ij-plugins.sourceforge.net/plugins/3d-io/
Related
I am working on a violence detection service. I am trying to develop software based on the code in this repo. My dataset consists of videos resided in two directories "Violence" and "Non-Violence".
I used this code to generate npy files out of RGB channels and optical flow features. The output of this part would be 2 folders containing npy array with 244x244x5 shape. (np.float32 dtype). so it's like I have video frames in RGB in the first 3 channels (npy[...,:3]) and optical flow features in the next two channels (npy[..., 3:]).
Now I am trying to convert them to tfrecords and use tf.data.tfrecorddataset to speed up the training process. Since my model input has to be a cube tensor, my training elements has to be 64 frames of each video. It means the data point shape has to be 64x244x244x5.
So I used this code to convert the npy files to tfrecords.
from pathlib import Path
from os.path import join
import tensorflow as tf
import numpy as np
import cv2
from tqdm import tqdm
def normalize(data):
mean = np.mean(data)
std = np.std(data)
return (data - mean) / std
def random_flip(video, prob):
s = np.random.rand()
if s < prob:
video = np.flip(m=video, axis=2)
return video
def color_jitter(video):
# range of s-component: 0-1
# range of v component: 0-255
s_jitter = np.random.uniform(-0.2, 0.2)
v_jitter = np.random.uniform(-30, 30)
for i in range(len(video)):
hsv = cv2.cvtColor(video[i], cv2.COLOR_RGB2HSV)
s = hsv[..., 1] + s_jitter
v = hsv[..., 2] + v_jitter
s[s < 0] = 0
s[s > 1] = 1
v[v < 0] = 0
v[v > 255] = 255
hsv[..., 1] = s
hsv[..., 2] = v
video[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return video
def uniform_sample(video: str, target_frames: int = 64) -> np.ndarray:
"""
gets video and outputs n_frames number of frames in video.
Args:
video:
target_frames:
Returns:
"""
len_frames = int(len(data))
interval = int(np.ceil(len_frames / target_frames))
# init empty list for sampled video and
sampled_video = []
for i in range(0, len_frames, interval):
sampled_video.append(video[i])
# calculate number of padded frames and fix it
num_pad = target_frames - len(sampled_video)
if num_pad > 0:
padding = [video[i] for i in range(-num_pad, 0)]
sampled_video += padding
return np.array(sampled_video, dtype=np.float32)
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
if __name__ == '__main__':
path = Path('transformed/')
npy_files = list(path.rglob('*.npy'))[:100]
aug = True
# one_hots = to_categorical(range(2), dtype=np.int8)
path_to_save = 'data_tfrecords'
tfrecord_path = join(path_to_save, 'all_data.tfrecord')
with tf.io.TFRecordWriter(tfrecord_path) as writer:
for file in tqdm(npy_files, desc='files converted'):
# load npy files
npy = np.load(file.as_posix(), mmap_mode='r')
data = np.float32(npy)
del npy
# Uniform sampling
data = uniform_sample(data, target_frames=64)
# Add augmentation
if aug:
data[..., :3] = color_jitter(data[..., :3])
data = random_flip(data, prob=0.5)
# Normalization
data[..., :3] = normalize(data[..., :3])
data[..., 3:] = normalize(data[..., 3:])
# Label one hot encoding
label = 1 if file.parent.stem.startswith('F') else 0
# label = one_hots[label]
feature = {'image': _bytes_feature(tf.compat.as_bytes(data.tobytes())),
'label': _int64_feature(int(label))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
The code works fine, but the real problem is that it consumes too much disk drive. my whole dataset consisting of 2000 videos takes 12 GB, when I converted them to npy files, it became around 80 GB, and now using tfrecords It became over 120 GB or so. How can I convert them in an efficient way to reduce the space required to store them?
The answer might be too late. But I see you are still saving the video frame in your tfrecords file.
Try removing the "image" feature from your features list. And saving per frame as their Height, Width, Channels, and so forth.
feature = {'label': _int64_feature(int(label))}
Which is why the file is taking more space.
I am trying to convert netCDF files to EPSG:3857 for use with Mapbox by using GDAL. This would be .nc to .nc conversion. Not to raster. I am open to using GDAL or other methods to do this. This data must be reprojected before it goes to a console app - and this process is taking weeks to find a solution for - I figured it was simple.
I am working on colorizing satellite data. There are 3 .nc files (blue, red, and infrared) that when combined and processed create a color image. After the 3 files are downloaded (from Amazon AWS), a python console app does the processing and dumps a .jpg to the same folder. The source code for that application is Located here so you may validate the data. (It is slow as the files are super high resolution).
The code I have tried is :
gdalwarp -t_srs EPSG:3857 test.nc test-projected.nc
However, there have been several other variations tried and nothing works.
I am not a professional with this, but should I even be using gdalwarp to do this? I only want to change the projection - nothing else, so the python app can still work with the data. It must be able to create the .jpg using the reprojected files.
The following links are samples of the data that needs to be converted :
.nc file on AWS > Color Channel 1 (Blue 1km resolution)
.nc file on AWS > Color Channel 2 (Red, Higher 0.5km resolution & larger file size)
.nc file on AWS > Color Channel 3 (Infrared - serves as green)
Additionaly, someone else online has accomplished this using a similar projection via the pyproj module at https://github.com/blaylockbk/pyBKB_v2/tree/master/BB_GOES16. (Mine must be EPSG:3857 for use with Mapbox). If the python code were modified to do this all in one go, that would be great too. I am opening a bounty as the final hope.
I do not know python, so I have been attempting GDAL for the most part- however working python code added to my source code to achieve the expected result (or a working GDAL script) will earn the bounty.
Here is my solution:
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 17:39:45 2019
#author: Guy Serbin
"""
import os, sys, glob, argparse
from osgeo import gdal, osr
from scipy.misc import imresize
parser = argparse.ArgumentParser(description = 'Script to create CONUS true color image from GOES 16 L1b data.')
parser.add_argument('-i', '--indir', type = str, default = r'C:\Data\Freelancer\DavidHolcomb', help = 'Input directory name.')
parser.add_argument('-o', '--outdir', type = str, default = None, help = 'Output directory name.')
parser.add_argument('-p', '--proj', type = int, default = 3857, help = 'Output projection, must be EPSG number.')
args = parser.parse_args()
if not args.indir:
print('ERROR: --indir not set. exiting.')
sys.exit()
elif not os.path.isdir(args.indir):
print('ERROR: --indir not set to a valid directory path. exiting.')
sys.exit()
if not args.outdir:
print('WARNING: --outdir not set. Output will be written to --indir.')
args.outdir = args.indir
o_srs = osr.SpatialReference()
o_srs.ImportFromEPSG(args.proj)
# based upon code ripped from https://riptutorial.com/gdal/example/25859/read-a-netcdf-file---nc--with-python-gdal
# Path of netCDF file
netcdf_red = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C02_G16_s*.nc'))[0]
netcdf_green = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C03_G16_s*.nc'))[0]
netcdf_blue = glob.glob(os.path.join(args.indir, 'OR_ABI-L1b-RadC-M3C01_G16_s*.nc'))[0]
baselist = os.path.basename(netcdf_blue).split('_')
outputfilename = os.path.join(args.outdir, 'OR_ABI-L1b-RadC-M3TrueColor_1_G16_{}.tif'.format(baselist[3]))
print('Output file will be: {}'.format(outputfilename))
tempfile = os.path.join(args.outdir, 'temp.tif')
# Specify the layer name to read
layer_name = "Rad"
# Open netcdf file.nc with gdal
print('Opening red band file: {}'.format(netcdf_red))
dsR = gdal.Open("NETCDF:{0}:{1}".format(netcdf_red, layer_name))
print('Opening green band file: {}'.format(netcdf_green))
dsG = gdal.Open("NETCDF:{0}:{1}".format(netcdf_green, layer_name))
print('Opening blue band file: {}'.format(netcdf_blue))
dsB = gdal.Open("NETCDF:{0}:{1}".format(netcdf_blue, layer_name))
red_srs = osr.SpatialReference()
red_srs.ImportFromWkt(dsR.GetProjectionRef())
i_srs = osr.SpatialReference()
i_srs.ImportFromWkt(dsG.GetProjectionRef())
GeoT = dsG.GetGeoTransform()
print(i_srs.ExportToWkt())
red_transform = osr.CoordinateTransformation(red_srs, o_srs)
transform = osr.CoordinateTransformation(i_srs, o_srs)
# Read full data from netcdf
print('Reading red band into memory.')
red = dsR.ReadAsArray(0, 0, dsR.RasterXSize, dsR.RasterYSize)
print('Resizing red band to match green and blue bands.')
red = imresize(red, 50, interp = 'bicubic')
print('Reading green band into memory.')
green = dsG.ReadAsArray(0, 0, dsG.RasterXSize, dsG.RasterYSize)
print('Reading blue band into memory.')
blue = dsB.ReadAsArray(0, 0, dsB.RasterXSize, dsB.RasterYSize)
red[red < 0] = 0
green[green < 0] = 0
blue[blue < 0] = 0
# Stack data and output
print('Stacking data.')
driver = gdal.GetDriverByName('GTiff')
stack = driver.Create('/vsimem/stack.tif', dsB.RasterXSize, dsB.RasterYSize, 3, gdal.GDT_Int16)
stack.SetProjection(i_srs.ExportToWkt())
stack.SetGeoTransform(GeoT)
stack.GetRasterBand(1).WriteArray(red)
stack.GetRasterBand(2).WriteArray(green)
stack.GetRasterBand(3).WriteArray(blue)
print('Warping data to new projection.')
warped = gdal.Warp('/vsimem/warped.tif', stack, dstSRS = o_srs, outputType = gdal.GDT_Int16)
print('Writing output to disk.')
outRaster = gdal.Translate(outputfilename, '/vsimem/warped.tif')
outRaster = None
red = None
green = None
blue = None
tmp_ds = None
dsR = None
dsG = None
dsB = None
print('Processing complete.')
You can use rioxarray to do this. An example of doing so is here: https://corteva.github.io/rioxarray/html/examples/reproject.html
Here is an example targeted for your use case:
import rioxarray
xds = rioxarray.open_rasterio("OR_ABI-L1b-RadC-M3C01_G16_s20190621802131_e20190621804504_c20190621804546.nc")
<xarray.Dataset>
Dimensions: (band: 1, x: 5000, y: 3000)
Coordinates:
* y (y) float64 1.584e+06 1.585e+06 ... 4.588e+06 4.589e+06
* x (x) float64 -3.627e+06 -3.626e+06 ... 1.381e+06 1.382e+06
* band (band) int64 1
spatial_ref int64 0
Data variables:
Rad (band, y, x) int16 ...
DQF (band, y, x) int8 ...
xds.rio.crs
CRS.from_wkt('PROJCS["unnamed",GEOGCS["unknown",DATUM["unnamed",SPHEROID["Spheroid",6378137,298.2572221]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]]],PROJECTION["Geostationary_Satellite"],PARAMETER["central_meridian",-75],PARAMETER["satellite_height",35786023],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],EXTENSION["PROJ4","+proj=geos +lon_0=-75 +h=35786023 +x_0=0 +y_0=0 +ellps=GRS80 +units=m +no_defs +sweep=x"]]')
Then, reproject:
xds_3857 = xds.rio.reproject("epsg:3857")
<xarray.Dataset>
Dimensions: (band: 1, x: 7693, y: 4242)
Coordinates:
* x (x) float64 -1.691e+07 -1.691e+07 ... -5.892e+06 -5.891e+06
* y (y) float64 7.714e+06 7.712e+06 ... 1.641e+06 1.64e+06
* band (band) int64 1
spatial_ref int64 0
Data variables:
Rad (band, y, x) int16 1023 1023 1023 1023 ... 1023 1023 1023 1023
DQF (band, y, x) int8 0 0 0 0 0 0 0 0 0 0 0 ... 0 0 0 0 0 0 0 0 0 0
Attributes:
creation_date: 2019-09-25 01:02:54.590053
xds_3857.rio.crs
CRS.from_epsg(3857)
Write to netcdf:
xds_3857.to_netcdf("epsg3857.nc")
I want to read pfm format images in python. I tried with imageio.read but it is throwing an error. Can I have any suggestion, please?
img = imageio.imread('image.pfm')
The following Python 3 implementation will decode .pfm files.
Download the example memorial.pfm from Paul Devebec's page.
from pathlib import Path
import numpy as np
import struct
def read_pfm(filename):
with Path(filename).open('rb') as pfm_file:
line1, line2, line3 = (pfm_file.readline().decode('latin-1').strip() for _ in range(3))
assert line1 in ('PF', 'Pf')
channels = 3 if "PF" in line1 else 1
width, height = (int(s) for s in line2.split())
scale_endianess = float(line3)
bigendian = scale_endianess > 0
scale = abs(scale_endianess)
buffer = pfm_file.read()
samples = width * height * channels
assert len(buffer) == samples * 4
fmt = f'{"<>"[bigendian]}{samples}f'
decoded = struct.unpack(fmt, buffer)
shape = (height, width, 3) if channels == 3 else (height, width)
return np.flipud(np.reshape(decoded, shape)) * scale
import matplotlib.pyplot as plt
image = read_pfm('memorial.pfm')
plt.imshow(image)
plt.show()
I am not at all familiar with Python, but here are a few suggestions on reading a PFM (Portable Float Map) file.
Option 1
The ImageIO documentation here suggests there is a FreeImage reader you can download and use.
Option 2
I pieced together a simple reader myself below that seems to work fine on a few sample images I found around the 'net and generated with ImageMagick. It may contain inefficiencies or bad practices because I do not speak Python.
#!/usr/local/bin/python3
import sys
import re
from struct import *
# Enable/disable debug output
debug = True
with open("image.pfm","rb") as f:
# Line 1: PF=>RGB (3 channels), Pf=>Greyscale (1 channel)
type=f.readline().decode('latin-1')
if "PF" in type:
channels=3
elif "Pf" in type:
channels=1
else:
print("ERROR: Not a valid PFM file",file=sys.stderr)
sys.exit(1)
if(debug):
print("DEBUG: channels={0}".format(channels))
# Line 2: width height
line=f.readline().decode('latin-1')
width,height=re.findall('\d+',line)
width=int(width)
height=int(height)
if(debug):
print("DEBUG: width={0}, height={1}".format(width,height))
# Line 3: +ve number means big endian, negative means little endian
line=f.readline().decode('latin-1')
BigEndian=True
if "-" in line:
BigEndian=False
if(debug):
print("DEBUG: BigEndian={0}".format(BigEndian))
# Slurp all binary data
samples = width*height*channels;
buffer = f.read(samples*4)
# Unpack floats with appropriate endianness
if BigEndian:
fmt=">"
else:
fmt="<"
fmt= fmt + str(samples) + "f"
img = unpack(fmt,buffer)
Option 3
If you cannot read your PFM files in Python, you could convert them at the command line using ImageMagick to another format, such as TIFF, that can store floating point samples. ImageMagick is installed on most Linux distros and is available for macOS and Windows:
magick input.pfm output.tif
I’m trying to extract dicom PixelData from Siemens’ dose report but it contains only zeros. With GE dose report I read the data without a trouble with pydicom or simpleITK. Any ideas why siemens report contains only zeros?
Thanks!
Came this far, but characters are a mess, I don't know what else to do.
import dicom
import numpy as np
import matplotlib.pyplot as plt
f="patient-protocol.dcm"
ds=dicom.read(f)
overlay_px_data = ds[0x6000, 0x3000].value
rows = ds[0x6000, 0x0010].value
cols=ds[0x6000, 0x0011].value
arr=np.fromstring(overlay_px_data, dtype="uint8")
arr=np.unpackbits(arr)
arr=np.reshape(arr, (rows, cols))
plt.imshow(arr, cmap="gray")
plt.show()
dose-image
i_overlay = 1
n_bits = 8
# On (60xx,3000) are stored ovelays.
# First is (6000,3000), second (6002,3000), third (6004,3000),
# and so on.
dicom_tag1 = 0x6000 + 0*i_overlay
overlay_raw = data[0x6000,0x3000].value
# On (60xx,0010) and (60xx,0011) is stored overlay size
rows = data[0x6000,0x0010].value # rows = 512
cols = data[0x6000,0x0011].value # cols = 512
decoded_linear = np.zeros(len(overlay_raw)*n_bits)
# Decoding data. Each bit is stored as array element
for i in range(1,len(overlay_raw)):
for k in range (0,n_bits):
byte_as_int = overlay_raw[i]
decoded_linear[i*n_bits + k] = (byte_as_int >> k) & 0b1
overlay = np.reshape(decoded_linear,[rows,cols])
plt.imshow(overlay)
plt.show()
Found the solution for now. Edited a code a bit (removed ord()), a numpy decoding solution would be great/better.
code source link
I am trying to read a binary file from a readout board that will be converted to an image. In Matlab, all the bytes are correctly read and the image is completelly populated. But in python (ver2.7 using anaconda) there is a line of zeros every 127 columns.
The Matlab code is:
fid = fopen(filename);
Rawdata = fread(fid,'uint8');
Data1d = Rawdata(2:2:end).* 256+ Rawdata(1:2:end) ;
% converts Data1 to a 2D vector, adding a row of zeros to make the reshape
% possible to 3D
Data2d = [reshape(Data1d,4127,1792); zeros(1,1792)];
% reshapes again, but adding a new dimension
Data3d = reshape(Data2d(:),129,32,1792);
% selects the first 128 values in the first dimension
Data3d = Data3d(1:128,:,:);
Data2d = reshape(Data3d(:),4096,1792);
Data2d = Data2d';
CMVimage = Data2d;
fclose(fid); %VGM 2017-01-14 the file should be closed.
In python I tried np.fromfile() and directly reading from python using f.read()
with the same result.
import numpy as np
import matplotlib.pyplot as plt
"""
reads the input .dat file and converts it to an image
Problem: line of zeros every 127 columns in columns: 127,257,368...
curiosly, the columns are in the position of the new byte.
In matlab it works very well.
"""
def readDatFile(filename):
""" reads the binary file in python not in numpy
the data is byte type and it is converted to integer.
"""
import binascii
f = open(filename, 'rb')
data = f.read()
#dataByte = bytearray(data)
f.close()
data_out = []
for num in data:
aux = int(binascii.hexlify(num), 16)
data_out.append(aux)
#print aux
myarray = np.asarray(data_out)
return myarray
def rawConversionNew(filename):
# reads data from a binary file with tupe uint
# f = open(filename, 'rb')
# Rawdata = np.fromfile(f, dtype=np.uint8)
# f.close()
Rawdata = readDatFile(filename)
## gets the image
Data1d = 256*Rawdata[1::2] + Rawdata[0::2]
Data2d = Data1d.reshape(1792,4127)
Data2d = Data2d.T
Data2d = np.vstack([Data2d,np.zeros((1,1792),dtype=np.uint16)] )
Data3d = Data2d.reshape(129,32,1792)
Data3d = Data3d[0:128,:,:]
#plt.figure()
#plt.plot(np.arange(Data3d.shape[0]),Data3d[:,1,1])
#print (Data3d[:,0,0])
CMVimage = Data3d.reshape(4096,1792).T
return CMVimage
There were in fact two errors, not labeling the file as binary ("rb") and the reshape, which is done in a different way in Matlab and numpy.
If the reshape is done using reshape(dim1,dim2,order='F') the results are the same. Check: Matlab vs Python: Reshape