Python, 2D array of conditions to be applied on 3D array - python

I have a 3D array (8000,12000,2), the dimensions corresponds to 8000 frames, 12000 particles, 2 -> (x,y) position of each particle.
I want to apply a 2D grid boxes on each frame so that I can count the number of particles in each box. I have done that, but I feel I can speed it up.
The code below works fine on 1 frame, but I have to do it for all frames and for different files, which makes things slow.
#Lx,Ly are my box dimensions, all particles positions are between
# -Lx/2,Lx/2 and -Ly/2,Ly/2
# Ly = Lx*3
ratio = 6 # control number of boxes
lx = Lx/ratio
ly = Ly/ratio/3
boxes = np.zeros((int(Ly/ly),int(Lx/lx),2))
for i in range(boxes.shape[0]):
for j in range(boxes.shape[1]):
x_l = -Lx/2 + lx*j
y_l = -Ly/2 + ly*i
x_h = x_l+lx
y_h = y_l+ly
temp_cord = coord [(x_l<= coord[:,0]) & (coord[:,0] <=x_h) & (y_l<= coord[:,1]) & (coord[:,1] <=y_h)]
if ((temp_cord[:,2]).sum() - temp_cord[:,2].shape[0])/temp_cord[:,2].shape[0] < .1 :
boxes[i,j,0] = 1 #set box type
boxes[i,j,1] = temp_cord.shape[0]

Related

Tensorflow Object Detection model doesnt detect the images in a video

I developed a TensorFlow model with one class (it had a loss of 0.03 and was trained on 680 labelled images.) I am trying to use this model to detect the object on every video frame. However, whenever I run my code, it detects something in the top left of the screen in the black border surrounding the video. I tried changing the model from one trained with mobile net to one with efficientdet D3, and the same issue persisted. I then tried changing my code to require a minisize and a higher score. I have also tried letting it make multiple detections and use the one with the highest score. However, with all of these conditions, it didn't detect anything that fulfilled the requirements. my code is as follows:
import os
import time
import tensorflow as tf
import cv2
import scipy
import math
import pandas as pd
import numpy as np
from PIL import Image
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as viz_utils
from base64 import b64encode
#os.chdir("C:\\Users\\Ibrahim\\desktop")
PATH_TO_SAVED_MODEL = "C:/Users/Ibrahim/Desktop/fine_tuned_model/content/fine_tuned_model/saved_model"
# Load label map and obtain class names and ids
#label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
category_index=label_map_util.create_category_index_from_labelmap("C:\\Users\\Ibrahim\\Desktop\\customTF2-20221225T123609Z-001\\customTF2\\data\\label_map.pbtxt",use_display_name=True)
file = "CL_1_S0003.mp4"
video = cv2.VideoCapture(file)
ret,frame=video.read()
#getting the walls bbox
wall_bbox = cv2.selectROI(frame)
(x_wall,y_wall,x2_wall,y2_wall) = wall_bbox
print(wall_bbox)
num_cont_frames=0
#video = cv.VideoCapture(path)
ball_size = 0.22 #diameter of a regulation ball in meters
fps_cam = 10000 # Change this to the required fps of the video
fps_vid =video.get(cv2.CAP_PROP_FPS)
fps_time= fps_vid / fps_cam
#print(fps_time)
model = tf.saved_model.load(PATH_TO_SAVED_MODEL)
signature = list(model.signatures.values())[0]
# Initialize variable to track state of ball (in contact with wall or not)
in_contact = False
# Initialize variable to track whether in_contact has ever been True
in_contact_ever = False
# Initialize lists for inbound and outbound velocities
inbound_velocities = []
outbound_velocities = []
# Calculate time interval between frames in seconds
time_interval = 1 / fps_cam
scale = []
x_list = []
y_list = []
x_def=[]
inbound_x = []
inbound_y = []
outbound_x = []
outbound_y = []
w1=[]
score_thresh = 0.8 # Minimum threshold for object detection
max_detections = 20
while True:
# Read frame from video
ret, frame = video.read()
if not ret:
break
# Add a batch dimension to the frame tensor
frame_tensor = tf.expand_dims(frame, axis=0)
# Get detections for image
detections = signature(frame_tensor) # Replace this with a call to your TensorFlow model's predict method
scores = detections['detection_scores'][0, :max_detections].numpy()
bboxes = detections['detection_boxes'][0, :max_detections].numpy()
labels = detections['detection_classes'][0, :max_detections].numpy().astype(np.int64)
labels = [category_index[n]['name'] for n in labels]
# Initialize variables to keep track of the maximum score and corresponding bounding box
max_score = 0
selected_bbox = None
# Loop through all bounding boxes
for bbox, score in zip(bboxes, scores):
# Check if the score is greater than the current maximum score
if score > max_score:
# Update the maximum score and corresponding bounding box
max_score = score
selected_bbox = bbox
# Check if a bounding box was selected
if selected_bbox is not None:
# Extract bounding box coordinates
(x, y, w, h) = selected_bbox
# Filter out bounding boxes that are too small (smaller than a minimum size)
if w >= 10 and h >= 10:
# Draw bounding box on frame
cv2.rectangle(frame, (int(x), int(y)), (int(x+w), int(y+h)), (0,255,0), 20, 1)
cv2.imshow('Frame', frame)
cv2.waitKey(1)
x2=x+w
y2=y+h
# Calculate center point of bounding box
x_center = (x + x2) / 2
y_center = (y + y2) / 2
# Append x and y center points to lists
x_list.append(x_center)
y_list.append(y_center)
w1.append(w)
# Calculate other variables and metrics using bbox
scale.append(ball_size/h) #meters per pixel.diameter in pixels or coordinate value / real diameter in m to give pixel per m for a scale factor
#x_list.append(x2) #list of x positions of right edge
#y_list.append(y2)
if (x_center - w) < max(x2_wall, x_wall): #sometimes the bbox is the wrong way around
# Set in_contact to True
in_contact = True
# Set in_contact_ever to True
in_contact_ever = True
# Increment counter
num_cont_frames = num_cont_frames + 1
x_defe = x2-x2_wall
x_def.append(x_defe)
else:
in_contact = False
if in_contact == False and in_contact_ever==False:
inbound_x.append(x_center) #list of x positions at center of ball
inbound_y.append(y_center) #list of y positions at center of ball
if in_contact == False and in_contact_ever==True:
outbound_x.append(x_center) #list of x positions of right edge
outbound_y.append(x_center)
print(outbound_x)
else:
cv2.putText(frame,'Error',(100,0),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
cv2.imshow('Tracking',frame)
if cv2.waitKey(1) & 0XFF==27:
break
cv2.destroyAllWindows()
scale_ave=scipy.stats.trim_mean(scale, 0.2) #trim_mean 20% either way to remove some extrainious results
x_diff=[]
y_diff=[]
x_len=len(x_list)-1 #minus 1 as python starts with 0 so we dont overflow
for i in range(x_len):
x_diff.append(x_list[i]-x_list[i+1]) #find x distance per frame
for i in range(x_len):
y_diff.append(y_list[i]-y_list[i+1]) #find y distance per frame
pyth_dist=[]
pyth_sub=[]
x2_len=len(x_diff)-1
x_speed=[]
y_speed=[]
for i in range(x2_len):
x_speeds=x_diff[i]*scale_ave*fps_cam
x_speed.append(x_speeds)
y_speeds=y_diff[i]*scale_ave*fps_cam
y_speed.append(y_speeds)
pyth_sub=math.hypot(x_diff[i] , y_diff[i])
pyth_dist.append(pyth_sub) #do pythagoras to find pixel distance per frame
realdist=[]
speed=[]
for i in range(x2_len):
realdistcalc=(pyth_dist[i]*scale_ave)
realdist.append(realdistcalc) # change from pixels to meters
for item in realdist:
if item > 1:
realdist.remove(item)
distlen=len(realdist)-1
for i in range(distlen):
speedcalc=realdist[i]*fps_cam
speed.append(speedcalc)
contact_time=num_cont_frames/fps_cam
print(contact_time)
if x_def:
realxdef = min(x_def)*scale_ave
else:
realxdef = 0
print(realxdef)
# Calculate inbound velocities
inbound_x_diff = []
inbound_y_diff = []
# Calculate inbound x- and y-velocities
inbound_x_velocities = []
inbound_y_velocities = []
inbound_len = len(inbound_x) - 1
# Calculate differences between consecutive x and y coordinates
for i in range(inbound_len):
inbound_x_diff.append(inbound_x[i] - inbound_x[i + 1])
inbound_y_diff.append(inbound_y[i] - inbound_y[i + 1])
# Calculate inbound velocities in meters per second
inbound_velocities = []
for i in range(inbound_len):
inbound_x_velocity = inbound_x_diff[i] * scale_ave * fps_cam
inbound_x_velocities.append(inbound_x_velocity)
inbound_y_velocity = inbound_y_diff[i] * scale_ave * fps_cam
inbound_y_velocities.append(inbound_y_velocity)
inbound_velocity = math.hypot(inbound_x_diff[i], inbound_y_diff[i]) * scale_ave * fps_cam
inbound_velocities.append(inbound_velocity)
# Calculate outbound velocities
outbound_x_diff = []
outbound_y_diff = []
outbound_len = len(outbound_x) - 1
# Calculate differences between consecutive x and y coordinates
for i in range(outbound_len):
outbound_x_diff.append(outbound_x[i] - outbound_x[i + 1])
outbound_y_diff.append(outbound_y[i] - outbound_y[i + 1])
# Calculate outbound velocities in meters per second
outbound_velocities = []
outbound_x_velocities = []
outbound_y_velocities = []
for i in range(outbound_len):
outbound_x_velocity = outbound_x_diff[i] * scale_ave * fps_cam
outbound_x_velocities.append(outbound_x_velocity)
outbound_y_velocity = outbound_y_diff[i] * scale_ave * fps_cam
outbound_y_velocities.append(outbound_y_velocity)
outbound_velocity = math.hypot(outbound_x_diff[i], outbound_y_diff[i]) * scale_ave * fps_cam
outbound_velocities.append(outbound_velocity)e
I expected a bounding box around the ball. I tried changing the model, increasing the number of maximum detections, adding a minimum size to the detections, and increasing the required score for detection.

Missing coordinates when dividing image into grid

I am trying to partition a set of provided coordinates into several buckets in Python 3 with numpy. I have a grid of buckets. See below:
def partition(image, num_tiles):
"""Divide an image into a (num_tiles x num_tiles) grid and return the
partitioned input."""
# The object to return. Ignore - I am just trying to test 'draw' works currently.
partitioned_image = np.empty((num_tiles, num_tiles), dtype=object)
draw = []
# The input array contains coordinates of the form [xMin, xMax, yMin, yMax].
# This is because these are coordinates for bounding boxes around biological cells.
# When I say 'point(s)', I refer to a [xMn, xMx, yMn, yMx] array(s).
xMin = image[:,0]
xMax = image[:,1]
yMin = image[:,2]
yMax = image[:,3]
# The base to start searching from (not 0,0).
x_base = min(xMin)
y_base = min(yMin)
# max(?Max) - min(?Min) defines the entire range for the variable. Divide this
# range by the number of tiles, which is the number of ticks of the grid.
# E.g. range is 100, want a 10x10 grid, so we step along in steps of 10.
x_step = (max(xMax) - min(xMin)) // num_tiles
y_step = (max(yMax) - min(yMin)) // num_tiles
for i in range(num_tiles):
for j in range(num_tiles):
# Define the bottom-left point of the region of interest (a tile)
x_left = x_base + x_step * i
y_low = y_base + y_step * j
# Define the upper-right point of the region of interest
x_right = x_base + x_step * (i + 1)
y_high = y_base + y_step * (j + 1)
# Every point in image that is within the region gets added to the
# draw list. Remember, each point is of the form [xMn, xMx, yMn, yMx]
result = ((yMin >= y_low) & (yMax < y_high) &
(xMin >= x_left) & (xMax < x_right)).nonzero()[0]
for coordinates in image[result]:
draw.append(coordinates)
# I would want to add the actual points to my partitioned_input array
# here, in the corresponding tile. The above code for draw is *JUST TESTING*.
# Convert draw list to numpy array and check to see we got all the points.
draw = np.asarray(draw)
print(draw.shape == image.shape) # We do not. This is annoying.
# Below is the code for plotting. I just take the average of
# the xMin/yMin and xMax/yMax values for this.
draw_xAvg = np.mean(np.array([draw[:,0], draw[:,1]]), axis=0)
draw_yAvg = np.mean(np.array([draw[:,2], draw[:,3]]), axis=0)
image_xAvg = np.mean(np.array([image[:,0], image[:,1]]), axis=0)
image_yAvg = np.mean(np.array([image[:,2], image[:,3]]), axis=0)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(30, 10))
ax1.set_title('Test', fontsize=30)
ax1.scatter(draw_xAvg, draw_yAvg, s=0.1, c='b')
ax2.set_title('Image', fontsize=30)
ax2.scatter(image_xAvg, image_yAvg, s=0.1, c='r')
ax3.set_title('Overlay (Image)', fontsize=30)
ax3.scatter(image_xAvg, image_yAvg, s=0.1, c='r')
ax3.scatter(draw_xAvg, draw_yAvg, s=0.1, c='b')
# Would return this once I partitioned the input correctly.
# The idea is to have a list per tile of all the points found in that tile.
# All I am doing is checking that I get the right number of points in total.
return partitioned_image
Calling code:
partitioned_cells = partition(cells, 20)
As you can see I step along the input in steps proportionate to the size of the input. This should be completely fine and I do get the vast majority of points so clearly the code is not completely wrong and my logic is fine. However, I expect perfect overlap in the third figure below:
If you look closely at the righthand figure you can see a distinct grid-like red coming up which the blue is not overlapping, particularly on the right of that figure - the sizes of the resulting numpy arrays (12948 v 13804) also confirm there is a mismatch, with the red outnumbering the blue. I am missing some coordinates in my partitioning.
I have no idea why this is - even when my boundaries are inclusive (>= or <=) they still do not get all of the points. I don't understand why. Can someone explain or have a guess as to why this might be?

Trilinear Interpolation on Voxels at specific angle

I'm currently attempting to implement this algorithm for volume rendering in Python, and am conceptually confused about their method of generating the LH histogram (see section 3.1, page 4).
I have a 3D stack of DICOM images, and calculated its gradient magnitude and the 2 corresponding azimuth and elevation angles with it (which I found out about here), as well as finding the second derivative.
Now, the algorithm is asking me to iterate through a set of voxels, and "track a path by integrating the gradient field in both directions...using the second order Runge-Kutta method with an integration step of one voxel".
What I don't understand is how to use the 2 angles I calculated to integrate the gradient field in said direction. I understand that you can use trilinear interpolation to get intermediate voxel values, but I don't understand how to get the voxel coordinates I want using the angles I have.
In other words, I start at a given voxel position, and want to take a 1 voxel step in the direction of the 2 angles calculated for that voxel (one in the x-y direction, the other in the z-direction). How would I take this step at these 2 angles and retrieve the new (x, y, z) voxel coordinates?
Apologies in advance, as I have a very basic background in Calc II/III, so vector fields/visualization of 3D spaces is still a little rough for me.
Creating 3D stack of DICOM images:
def collect_data(data_path):
print "collecting data"
files = [] # create an empty list
for dirName, subdirList, fileList in os.walk(data_path):
for filename in fileList:
if ".dcm" in filename:
files.append(os.path.join(dirName,filename))
# Get reference file
ref = dicom.read_file(files[0])
# Load dimensions based on the number of rows, columns, and slices (along the Z axis)
pixel_dims = (int(ref.Rows), int(ref.Columns), len(files))
# Load spacing values (in mm)
pixel_spacings = (float(ref.PixelSpacing[0]), float(ref.PixelSpacing[1]), float(ref.SliceThickness))
x = np.arange(0.0, (pixel_dims[0]+1)*pixel_spacings[0], pixel_spacings[0])
y = np.arange(0.0, (pixel_dims[1]+1)*pixel_spacings[1], pixel_spacings[1])
z = np.arange(0.0, (pixel_dims[2]+1)*pixel_spacings[2], pixel_spacings[2])
# Row and column directional cosines
orientation = ref.ImageOrientationPatient
# This will become the intensity values
dcm = np.zeros(pixel_dims, dtype=ref.pixel_array.dtype)
origins = []
# loop through all the DICOM files
for filename in files:
# read the file
ds = dicom.read_file(filename)
#get pixel spacing and origin information
origins.append(ds.ImagePositionPatient) #[0,0,0] coordinates in real 3D space (in mm)
# store the raw image data
dcm[:, :, files.index(filename)] = ds.pixel_array
return dcm, origins, pixel_spacings, orientation
Calculating gradient magnitude:
def calculate_gradient_magnitude(dcm):
print "calculating gradient magnitude"
gradient_magnitude = []
gradient_direction = []
gradx = np.zeros(dcm.shape)
sobel(dcm,0,gradx)
grady = np.zeros(dcm.shape)
sobel(dcm,1,grady)
gradz = np.zeros(dcm.shape)
sobel(dcm,2,gradz)
gradient = np.sqrt(gradx**2 + grady**2 + gradz**2)
azimuthal = np.arctan2(grady, gradx)
elevation = np.arctan(gradz,gradient)
azimuthal = np.degrees(azimuthal)
elevation = np.degrees(elevation)
return gradient, azimuthal, elevation
Converting to patient coordinate system to get actual voxel position:
def get_patient_position(dcm, origins, pixel_spacing, orientation):
"""
Image Space --> Anatomical (Patient) Space is an affine transformation
using the Image Orientation (Patient), Image Position (Patient), and
Pixel Spacing properties from the DICOM header
"""
print "getting patient coordinates"
world_coordinates = np.empty((dcm.shape[0], dcm.shape[1],dcm.shape[2], 3))
affine_matrix = np.zeros((4,4), dtype=np.float32)
rows = dcm.shape[0]
cols = dcm.shape[1]
num_slices = dcm.shape[2]
image_orientation_x = np.array([ orientation[0], orientation[1], orientation[2] ]).reshape(3,1)
image_orientation_y = np.array([ orientation[3], orientation[4], orientation[5] ]).reshape(3,1)
pixel_spacing_x = pixel_spacing[0]
# Construct affine matrix
# Method from:
# http://nipy.org/nibabel/dicom/dicom_orientation.html
T_1 = origins[0]
T_n = origins[num_slices-1]
affine_matrix[0,0] = image_orientation_y[0] * pixel_spacing[0]
affine_matrix[0,1] = image_orientation_x[0] * pixel_spacing[1]
affine_matrix[0,3] = T_1[0]
affine_matrix[1,0] = image_orientation_y[1] * pixel_spacing[0]
affine_matrix[1,1] = image_orientation_x[1] * pixel_spacing[1]
affine_matrix[1,3] = T_1[1]
affine_matrix[2,0] = image_orientation_y[2] * pixel_spacing[0]
affine_matrix[2,1] = image_orientation_x[2] * pixel_spacing[1]
affine_matrix[2,3] = T_1[2]
affine_matrix[3,3] = 1
k1 = (T_1[0] - T_n[0])/ (1 - num_slices)
k2 = (T_1[1] - T_n[1])/ (1 - num_slices)
k3 = (T_1[2] - T_n[2])/ (1 - num_slices)
affine_matrix[:3, 2] = np.array([k1,k2,k3])
for z in range(num_slices):
for r in range(rows):
for c in range(cols):
vector = np.array([r, c, 0, 1]).reshape((4,1))
result = np.matmul(affine_matrix, vector)
result = np.delete(result, 3, axis=0)
result = np.transpose(result)
world_coordinates[r,c,z] = result
# print "Finished slice ", str(z)
# np.save('./data/saved/world_coordinates_3d.npy', str(world_coordinates))
return world_coordinates
Now I'm at the point where I want to write this function:
def create_lh_histogram(patient_positions, dcm, magnitude, azimuthal, elevation):
print "constructing LH histogram"
# Get 2nd derivative
second_derivative = gaussian_filter(magnitude, sigma=1, order=1)
# Determine if voxels lie on boundary or not (thresholding)
# Still have to code out: let's say the thresholded voxels are in
# a numpy array called voxels
#Iterate through all thresholded voxels and integrate gradient field in
# both directions using 2nd-order Runge-Kutta
vox_it = voxels.nditer(voxels, flags=['multi_index'])
while not vox_it.finished:
# ???

How to solve...ValueError: cannot convert float NaN to integer

I'm running quite a complex code so I won't bother with details as I've had it working before but now im getting this error.
Particle is a 3D tuple filled with 0 or 255, and I am using the scipy centre of mass function and then trying to turn the value into its closest integer (as I'm dealing with arrays). The error is found with on the last line... can anyone explain why this might be??
2nd line fills Particle
3rd line deletes any surrounding particles with a different label (This is in a for loop for all labels)
Particle = []
Particle = big_labelled_stack[x_start+20:x_stop+20,y_start+20:y_stop+20,z_start+20:z_stop+20]
Particle = np.where(Particle == i ,255,0)
CoM = scipy.ndimage.measurements.center_of_mass(Particle)
CoM = [ (int(round(x)) for x in CoM ]
Thanks in advance. If you need more code just ask but I dont think it will help you and its very messy.
################## MORE CODE
border = 30
[labelled_stack,no_of_label] = label(labelled,structure_array,output_type)
# RE-LABEL particles now no. of seeds has been reduced! LAST LABELLING
#Increase size of stack by increasing borders and equal them to 0; to allow us to cut out particles into cube shape which else might lye outside the border
h,w,l = labelled.shape
big_labelled_stack = np.zeros(shape=(h+60,w+60,l+60),dtype=np.uint32)
# Creates an empty border around labelled_stack full of zeros of size border
if (no_of_label > 0): #Small sample may return no particles.. so this stage not neccesary
info = np.zeros(shape=(no_of_label,19)) #Creates array to store coordinates of particles
for i in np.arange(1,no_of_label,1):
coordinates = find_objects(labelled_stack == i)[0] #Find coordinates of label i.
x_start = int(coordinates[0].start)
x_stop = int(coordinates[0].stop)
y_start = int(coordinates[1].start)
y_stop = int(coordinates[1].stop)
z_start = int(coordinates[2].start)
z_stop = int(coordinates[2].stop)
dx = (x_stop - x_start)
dy = (y_stop - y_start)
dz = (z_stop - z_start)
Particle = np.zeros(shape=(dy,dx,dz),dtype = np.uint16)
Particle = big_labelled_stack[x_start+30:x_start+dx+30,y_start+30:y_start+dy+30,z_start+30:z_start+dz+30]
Particle = np.where(Particle == i ,255,0)
big_labelled_stack[border:h+border,border:w+border,border:l+border] = labelled_stack
big_labelled_stack = np.where(big_labelled_stack == i , 255,0)
CoM_big_stack = scipy.ndimage.measurements.center_of_mass(big_labelled_stack)
C = np.asarray(CoM_big_stack) - border
if dx > dy:
b = dx
else: #Finds the largest of delta_x,y,z and saves as b, so that we create 'Cubic_Particle' of size 2bx2bx2b (cubic box)
b = dy
if dz > b:
b = dz
CoM = scipy.ndimage.measurements.center_of_mass(Particle)
CoM = [ (int(round(x))) for x in CoM ]
Cubic_Particle = np.zeros(shape=(2*b,2*b,2*b))
Cubic_Particle[(b-CoM[0]):(b+dx-CoM[0]),(b-CoM[1]):(b+dy-CoM[1]),(b-CoM[2]):(b+dz-CoM[2])] = Particle
volume = Cubic_Particle.size # Gives volume of the box in voxels
info[i-1,:] = [C[0],C[1],C[2],i,C[0]-b,C[1]-b,C[2]-b,C[0]+b,C[1]+b,C[2]+b,volume,0,0,0,0,0,0,0,0] # Fills an array with label.No., size of box, and co-ords
else:
print('No particles found, try increasing the sample size')
info = []
Ok, so I have a stack full of labelled particles, there are two things I am trying to do, first find the centre of masses of each particle with respect ot the labelled_stack which is what CoM_big_labelled_stack (and C) does. and stores the co-ords in a list (tuple) called info. I am also trying to create a cubic box around the particle, with its centre of mass as the centre (which is relating to the CoM variable), so first I use the find objects function in scipy to find a particle, i then use these coordinates to create a non-cubic box around the particle, and find its centre of mass.I then find the longest dimension of the box and call it b, creating a cubic box of size 2b and filling it with particle in the right position.
Sorry this code is a mess, I am very new to Python

How to extract arbitrary 2D slice from 3D volume using Scipy?

I'm using Scipy for rendering planes from 3D data (vector 200x200x200).
I can specify the wanted plane by 2 vectors or vector and an angle.
I want to extract such an arbitrary slice from this 3D volume.
I found how to do it in Matlab:
http://www.mathworks.com/help/techdoc/ref/slice.html
How do I do it in Scipy?
You can use scipy.ndimage.interpolation.rotate to rotate your 3d array to whatever angle you want (it uses spline interpolation) then you can take a slice out of it.
def extract_slice(data, triplet):
"""
Algorithm:
1. find intersections of the plane with the data box edges
2. for these pts, find axis-oriented b-box
3. find the "back" trans (A,T) from R2 to R3, like X' = AX + T
use (0,0), (0,h), (w,0) which are easy to calculate
4. use the trans (with trilinear-interpolation) for every value in the 2D (w,h) image
"""
I will release the code properly in a few months I believe as a part of this project.
I was addressing a similar task as the OP so I came up with this code based on numpy (not scipy) to extract any given slice from a volume given a position vector of any point of the plane and three orthogonal orientation vectors.
I apologise for the length of my answer, but given the complexity of the problem at hand i thought it would be better to give this amount of detail.
For my particular problem these vectors were defined in mm instead of pixels so the spacing (i.e. distance between two consecutive volume pixels at each direction) was also used as input. I have used a nearest neighbour approach to interpolate the subpixel points of the slice.
reslice_volume (volume, spacing, o1, o2, n, pos)
The main steps behind this algorithm are as follow. Note that i use plane and slice interchangeably:
1. Get intersection lines between the desired plane and the bounds of the volume.
def PlaneBoundsIntersectionsLines (n, pos):
"""Outputs points and vectors defining the lines that the view creates by intersecting the volume's bounds.
Input:
Normal vector of the given plane and the coords of a point belonging to the plane.
Output:
normals_line, points_line
"""
def intersectionPlanePlane(n1,p1,n2,p2):
# Get direction of line
nout = np.cross(n1.reshape((1,3)),n2.reshape((1,3))).reshape(3,1)
nout = normalizeLength(nout)
M = np.concatenate((n1.reshape(1,3),n2.reshape(1,3)), axis=0)
b = np.zeros((2,1))
# print(n1.shape, p1.shape)
b[0,0]=np.dot(n1,p1)
b[1,0]=np.dot(n2,p2)
pout,resid,rank,s = np.linalg.lstsq(M,b, rcond=None)
return pout, nout
# ... For each face
normalFaces = np.concatenate((np.eye(3,3),np.eye(3,3)), axis = 1)
pointsFaces = np.array([[0,0,0],[0,0,0],[0,0,0], [379.9872, 379.9872, 169.5], [379.9872, 379.9872, 169.5], [379.9872, 379.9872, 169.5]]).transpose()
points_line = np.zeros((3,6))
normals_line = np.zeros((3,6))
for face in range(6):
n1 = normalFaces[:,face].reshape(3,)
p1 = pointsFaces[:,face].reshape(3,)
pout, nout = intersectionPlanePlane(n1,p1,n,pos)
points_line[:,face] = pout.reshape((3,))
normals_line[:,face] = nout.reshape((3,))
return normals_line, points_line
2. Get intersection points between these lines that are close enough to the borders of the volume to be considered corners of the intersection between the plane and the volume.
def FindPlaneCorners(normals_line, points_line):
"""Outputs the points defined by the intersection of the input lines that
are close enough to the borders of the volume to be considered corners of the view plane.
Input:
Points and vectors defining lines
Output:
p_intersection, intersecting_lines
"""
def intersectionLineLine(Up,P0,Uq,Q0):
# Computes the closest point between two lines
# Must be column points
b = np.zeros((2,1))
b[0,0] = -np.dot((P0-Q0),Up)
b[1,0] = -np.dot((P0-Q0),Uq)
A = np.zeros((2,2))
A[0,0] = np.dot(Up,Up)
A[0,1] = np.dot(-Uq,Up)
A[1,0] = np.dot(Up,Uq)
A[1,1] = np.dot(-Uq,Uq)
if ( np.abs(np.linalg.det(A)) < 10^(-10) ):
point = np.array([np.nan, np.nan, np.nan]).reshape(3,1)
else:
lbd ,resid,rank,s = np.linalg.lstsq(A,b, rcond=None)
# print('\n')
# print(lbd)
P1 = P0 + lbd[0]*Up;
Q1 = Q0 + lbd[1]*Uq;
point = (P1+Q1)/2;
return point
# ... ... Get closest point for every possible pair of lines and select only the ones inside the box
npts = 0
p_intersection = []
intersecting_lines = []
# ... Get all possible pairs of lines
possible_pairs = np.array(list(itertools.combinations(np.linspace(0,5,6), 2)))
for pair in possible_pairs:
k = int(pair[0])
j = int(pair[1])
Up = normals_line[:,k]
P0 = points_line[:,k]
Uq = normals_line[:,j]
Q0 = points_line[:,j]
closest_point = intersectionLineLine(Up,P0,Uq,Q0)
epsilon = 2.2204e-10
# ... ... Is point inside volume? Is it close to the border?
if closest_point[0] <= 379.9872 + epsilon and closest_point[0] >= 0 - epsilon and \
closest_point[1] <= 379.9872 + epsilon and closest_point[1] >= 0 - epsilon and \
closest_point[2] <= 169.5 + epsilon and closest_point[2] >= 0 - epsilon:
# ... ... Is it close to the border? 25 mm?
th = 25
if 379.9872 - closest_point[0] <= th or closest_point[0] - 0 <= th or \
379.9872 - closest_point[1] <= th or closest_point[1] - 0 <= th or \
169.5 - closest_point[2] <= th or closest_point[2] - 0 <= th:
# print('It is close to teh border')
npts += 1
p_intersection.append(closest_point)
intersecting_lines.append([k,j])
p_intersection = np.array(p_intersection).transpose()
return p_intersection, intersecting_lines
3. Transform the points found into the slice's reference frame (sRF) (we can center the RF arbitrarily within the slice plane).
dim = volume.shape
# ... Get intersection lines between plane and volume bounds
normals_line, points_line = PlaneBoundsIntersectionsLines (n, pos)
# ... Get intersections between generated lines to get corners of view plane
p_intersection, intersecting_lines = FindPlaneCorners (normals_line, points_line)
# ... Calculate parameters of the 2D slice
# ... ... Get corners of slice from volume RF (vrf) to slice RF (srf) - in this case centered in the middle of teh slice
# ... ... ... Define T_vrf2srf
Pose_slice_vrf = M_creater(o1,o2,n,pos)
# ... ... ... Apply transform
p_intersection_slicerf = np.zeros(p_intersection.shape)
for corner in range(p_intersection.shape[1]):
pt_arr = np.concatenate((p_intersection[:,corner],np.ones((1,))) ,axis = 0).reshape((4,1))
p_intersection_slicerf[:,corner] = np.matmul(np.linalg.inv(Pose_slice_vrf), pt_arr)[:-1].reshape((3,))
4. Get minimum x and y coordinates across these points and define a corner point that will be used as origin of the plane/slice. Transform this origin point back to the volume's RF (vRF) and define a new transform matrix that shifts RF from vRF to a sRF but now centered on said origin point.
5. From these inslice points coordinates we can determine the size of the slice and then use it to generate all possible inslice indexes of the target slice.
# ... ... Get slice size based on corners and spacing
spacing_slice = [1,1,8]
min_bounds_slice_xy = np.min(p_intersection_slicerf,axis=1)
max_bounds_slice_xy = np.max(p_intersection_slicerf,axis=1)
size_slice_x = int(np.ceil((max_bounds_slice_xy[0] - min_bounds_slice_xy[0] - 1e-6) / spacing_slice[0]))
size_slice_y = int(np.ceil((max_bounds_slice_xy[1] - min_bounds_slice_xy[1] - 1e-6) / spacing_slice[1]))
slice_size = [size_slice_x, size_slice_y, 1]
print('slice_size')
print(slice_size)
# ... ... Get corner in slice coords and redefine transform mat - make corner origin of the slice
origin_corner_slice = np.array([min_bounds_slice_xy[0],min_bounds_slice_xy[1],0])
pt_arr = np.concatenate((origin_corner_slice,np.ones((1,))) ,axis = 0).reshape((4,1))
origin_corner_slice_vrf = np.matmul(Pose_slice_vrf, pt_arr)[:-1].reshape((3,))
Pose_slice_origin_corner_vrf = M_creater(o1,o2,n,origin_corner_slice_vrf)
# ... ... Get every possible inslice coordinates
xvalues = np.linspace(0,size_slice_x-1,size_slice_x)
yvalues = np.linspace(0,size_slice_y-1,size_slice_y)
zvalues = np.linspace(0,0,1)
xx, yy = np.meshgrid(xvalues, yvalues)
xx = xx.transpose()
yy = yy.transpose()
zz = np.zeros(xx.shape)
inslice_coords = np.concatenate((xx.reshape(-1,1), yy.reshape(-1,1), zz.reshape(-1,1)), axis = 1)
6. Next step is to use the newly defined transform matrix (step 4) to map every possible inslice index to the volume's reference frame.
# ... ... Map every point of slice into volume's RF
inslice_coords_vrf = np.zeros(inslice_coords.shape)
for coord_set in range(inslice_coords.shape[0]):
pt_arr = np.concatenate((inslice_coords[coord_set,:],np.ones((1,))) ,axis = 0).reshape((4,1))
inslice_coords_vrf[coord_set,:] = np.matmul(Pose_slice_origin_corner_vrf, pt_arr)[:-1].reshape((3,))
7. We now have all the vRF coordinates that the slice encompasses that should be promptly converted into pixel values by dividing them by the respective spacing. At this step we find that we end up with non-integer pixel values as the slices passes through subpixel locations of the volume. We round the pixel value to its nearest integer - nearest neighbour interpolation.
# ... ... ... Convert to pixel coord - here we used teh resampled spacing
inslice_coords_vrf_px = inslice_coords_vrf.copy()
inslice_coords_vrf_px[:,0] = inslice_coords_vrf[:,0] / spacing[0]
inslice_coords_vrf_px[:,1] = inslice_coords_vrf[:,1] / spacing[1]
inslice_coords_vrf_px[:,2] = inslice_coords_vrf[:,2] / spacing[2]
# ... ... Interpolate pixel value at each mapped point - nearest neighbour int
# ... ... ... Convert pixel value to its closest existing value in the volume
inslice_coords_vrf_px = np.round(inslice_coords_vrf_px, 0).astype(int)
8. Next, we determine which pixels of the slice are actually within the bounds of the volume and get their values. Pixels outside volume are padded to 0.
# ... ... Find slice voxels within volume bounds
in_mask = np.zeros((inslice_coords_vrf_px.shape[0], 1))
idx_in = []
for vox in range(in_mask.shape[0]):
if not np.any(inslice_coords_vrf_px[vox,:]<0) and \
inslice_coords_vrf_px[vox,0]<dim[0] and \
inslice_coords_vrf_px[vox,1]<dim[1] and \
inslice_coords_vrf_px[vox,2]<dim[2]:
in_mask[vox] = 1
idx_in.append(vox)
idx_in = np.array(idx_in)
# ... ... Get pixel value from volume based on interpolated pixel indexes
extracted_slice = np.zeros((inslice_coords_vrf_px.shape[0], 1))
for point in range(inslice_coords_vrf_px.shape[0]):
if point in idx_in:
vol_idx = inslice_coords_vrf_px[point,:]
extracted_slice[point] = volume[vol_idx[0], vol_idx[1], vol_idx[2]]
# ... ... Reshape to slice shape
extracted_slice = extracted_slice.reshape((slice_size[0], slice_size[1]))
I added a plot for extra clarity. Here the volume is defined by the bounding box in black. Line intersections of the slice with the planes defined by the faces of the box/volume in dotted orange. In blue the intersection points between the previous lines. Points in pink belong to the slice and the orange ones belong to the slice and are within the volume.
In my case i was dealing with MRI volumes, so as example I added my resulting slice from the volume.

Categories