3D world to 2D image transformation with equidistant camera model - python

I'm currently implementing a simulation, where a 3D point needs to be transformed to a fisheye image with the equivalent of Blenders 'PANO' camera and the equidistant camera model. Does anyone know where I can find the sources (scientific papers, even technical reports) of Blenders implementation of the 3D->2D transformation?
In detail, I'm looking for:
Transformation from world to camera coordinate system
Transformation from camera coordinate system (carthesian) to camera coordinate system (spherical). This is necessary for the projection of the point to the unit sphere, before projecting the point onto the image plane
Transformation from the sperical camera coordinate system to the sensor plane
"normalization" in terms of shifting the point from the principal point to the left top corner of the sensor
Why this question? While the implementation from a perspective camera is natively available in the bpy_extras module, the transformation for the equidistant camera model is not.
The code, which I've generated is below. Currently the result of the image is different from the result in blender, any suggestions why?
main.py:
#!/usr/bin/env python3
import numpy as np
np.set_printoptions(precision=4, suppress=True)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
import plotting
def compute_R(alpha_deg, beta_deg, gamma_deg):
## Rotation Matrix R (code from CV2 exercise)
# convert into radians...
alpha = alpha_deg * np.pi / 180
beta = beta_deg * np.pi / 180
gamma = gamma_deg * np.pi / 180
# prepare sine and cosine values
ca = np.cos(alpha)
sa = np.sin(alpha)
cb = np.cos(beta)
sb = np.sin(beta)
cg = np.cos(gamma)
sg = np.sin(gamma)
# rotation about x axis
R_x = np.array([
[1, 0, 0],
[0, ca, -sa],
[0, sa, ca],
], dtype=float)
# rotation about y axis
R_y = np.array([
[ cb, 0, sb],
[ 0, 1, 0],
[-sb, 0, cb]
], dtype=float)
# rotation about z axis
R_z = np.array([
[cg, -sg, 0],
[sg, cg, 0],
[ 0, 0, 1]
], dtype=float)
# compose R_xyz
R = R_z.dot(R_y.dot(R_x))
return R
def cart2spherical_wiki(X_cam):
## from https://en.wikipedia.org/wiki/Spherical_coordinate_system
r = np.sqrt(X_cam[0]**2 + X_cam[1]**2 + X_cam[2]**2)
print(f'manual_{r=}')
#r = np.linalg.norm(X_cam, axis=0)
#print(f'np.linalg_{r=}')
theta = np.arccos(X_cam[2] / r)
phi = -np.arctan2(X_cam[1], X_cam[0])
return theta, phi
def cart2spherical_blender(X_cam, fov):
# can be ignored for the moment
r = np.arctan2(np.sqrt(X_cam[1] * X_cam[1] + X_cam[2] * X_cam[2]), X_cam[0]) / fov
phi = np.arctan2(X_cam[2], X_cam[1])
return r, phi
def spherical2normimg(f_x, f_y, theta, phi):
# from phd-thesis michel
x_img_norm = f_x * theta * np.cos(phi)
y_img_norm = f_y * theta * np.sin(phi)
#print(x_img_norm)
#print(y_img_norm)
return x_img_norm, y_img_norm
def spherical2normimg_blender(r, phi):
# can be ignored for the moment
x_img_norm = r * np.cos(phi) + 0.5
y_img_norm = r * np.sin(phi) + 0.5
return x_img_norm, y_img_norm
def points_to_image(x, y, resolution):
# Sensorpunkt (0,0) liegt im Bild an Position (w/2, h/2)
# deshalb Addition der halben Bildauflösung
# (x,y) - Sensorpunkte
# (x_new, y_new) - Bildpunkte
x_new = x + resolution[0]//2
y_new = y + resolution[1]//2
return x_new, y_new
def main():
# plot input/output?
plot = 'True'
## World points
X_wrld = np.array([
[1.5, 5, 1], # right foot
[2.5, 5, 1], # left foot
[2, 5, 3], # hip
[1, 5, 5], # right hand
[3, 5, 5] # left hand
])
## Camera
C = np.array([2, 1, 4]).transpose() # camera projection center in world coordinates
## Plot World with 3D Points and Camera
fig = plt.figure()
if plot == 'True':
title = 'World coordinate system: 3D Points and Camera'
plt1 = plotting.plot_world(X_wrld, C, elev=0, azim=-90, roll=0, title=title, fig=fig)
# transpose X_wrld
X_wrld = X_wrld.T # take the transpose
# add ones for homogenious representation
row_ones = np.ones([1, X_wrld.shape[1]])
X_wrld_hom = np.concatenate([X_wrld, row_ones], axis=0)
#print(f'{X_wrld_hom=}')
## Use Rotation-Matrix and C for creating H-Matrix (Homography, 4x4)
# orientation / rotation
alpha_deg = 0 # first: rotate about alpha degrees around the camera's x-axis
beta_deg = 270 # second: rotate about beta degrees around the camera's y-axis
gamma_deg = 0 # third: rotate about gamma degrees around the camera's z-axis
# Compute 3x3 Rotation Matrix
R = compute_R(alpha_deg, beta_deg, gamma_deg)
#print(f'{R=}')
# Build H-Matrix (4x4)
# RC ist 3x1
RC = np.zeros([3,1])
RC = R.dot(C)
RC = RC.T
H = np.column_stack( (R, -RC) )
H = np.row_stack( (H, [0, 0, 0, 1]) )
#print(f'{H=}')
# Transformation from world to camera coordinates
X_cam = H.dot(X_wrld_hom)
print(f'{X_cam=}')
if plot == 'True':
title = f'camera coordinate system: camera rotation of {alpha_deg=}, {beta_deg=}, {gamma_deg=}'
#plt2 = plotting.plot_camera(X_cam=X_cam.T, C=[0, 0, 0], elev=180, azim=90, roll=90, title=title, fig=fig)
plt2 = plotting.plot_camera(X_cam=X_cam.T, C=[0, 0, 0], elev=alpha_deg, azim=beta_deg, roll=gamma_deg, title=title, fig=fig)
### Intrinsics ###
# focal length from Canon FD 1:5,6 / 7,5mm (see, e.g. https://de.wikipedia.org/wiki/Fischaugenobjektiv)
f_mm = 0.0075 #7.5mm
# field of view
fov = 180 * np.pi / 180
# Vollformat Sensor, e.g. https://de.wikipedia.org/wiki/Sony_Alpha_1
chip_w = 35.7 * 1e-3 # 36mm
chip_h = 23.8 * 1e-3 # 24mm
# Resolution of camera
res_w = 8640 # px
res_h = 5760 # px
# pixel densities
m_x = res_w / chip_w # px/mm
m_y = res_h / chip_h # px/mm
# focal length normalized with pixel densities
f_x = f_mm * m_x # unitless
f_y = f_mm * m_y # unitless, f_x != f_y if pixel on the sensor is not square
### Intrinsics End ###
## From Camera coordinates (carthesian) to spherical camera coordinates (a) Wiki
theta, phi = cart2spherical_wiki(X_cam)
## From spherical camera coordinates to normalized image coordinates (a) Wiki
x_img_norm, y_img_norm = spherical2normimg(f_x, f_y, theta, phi)
# camera to spherical from blender implementation
#r, phi = cart2spherical_blender(X_cam, fov)
# spherical to normalized image coordinates from blender code
#x_img_norm, y_img_norm = spherical2normimg_blender(r, phi)
if plot == 'True':
title = 'Sensor coordinate system: Camera points on sensor plane'
plt3 = plotting.plot_sensor(x_img_norm=x_img_norm, y_img_norm=y_img_norm, title=title, fig=fig)
# From normalized image coordinates to image coordinates by shifting to left top of sensor
x_img, y_img = points_to_image(x_img_norm, y_img_norm, (res_w, res_h))
if plot == 'True':
title = 'Image coordinate system: points on image plane shifted to left top corner'
plotting.plot_image(x_img, y_img, (0, res_w), (0, res_h), title, fig)
plt.show()
if __name__ == "__main__":
main()
plotting.py:
#!/usr/bin/env python3
import numpy as np
np.set_printoptions(precision=4, suppress=True)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
def plot_world(X_wrld, C, elev, azim, roll, title, fig):
plt1 = fig.add_subplot(2, 2, 1, projection='3d')
colors = ['r', 'b', 'g', 'm', 'y']
labels = ['right foot', 'left foot', 'hip', 'right hand', 'left hand']
for X,Y,Z,c,l in zip(X_wrld[:,0], X_wrld[:,1], X_wrld[:,2], colors, labels):
plt1.plot3D(X, Y, Z, c=c, marker=".", linestyle='None', label=l)
plt1.plot3D(C[0], C[1], C[2], c="red", marker="v", linestyle='None', label="camera")
plt1.set(xlabel='X', ylabel='Y', zlabel='Z')
plt1.set_xlim(0, 4)
plt1.set_ylim(0, 6)
plt1.set_zlim(0, 6)
plt1.legend()
plt1.set_title(title)
plt1.view_init(elev=elev, azim=azim, roll=roll, vertical_axis='z')
return plt1
def plot_camera(X_cam, C, elev, azim, roll, title, fig):
plt2 = fig.add_subplot(2, 2, 2, projection='3d')
colors = ['r', 'b', 'g', 'm', 'y']
labels = ['right foot', 'left foot', 'hip', 'right hand', 'left hand']
for X,Y,Z,c,l in zip(X_cam[:,0], X_cam[:,1], X_cam[:,2], colors, labels):
plt2.plot3D(X, Y, Z, c=c, marker=".", linestyle='None', label=l)
plt2.plot3D(C[0], C[1], C[2], c="red", marker="v", linestyle='None', label="camera")
plt2.set(xlabel='X', ylabel='Y', zlabel='Z')
plt2.legend()
plt2.set_title(title)
plt2.view_init(elev=elev, azim=azim, roll=roll, vertical_axis='z')
return plt2
def plot_sensor(x_img_norm, y_img_norm, title, fig):
plt3 = fig.add_subplot(2, 2, 3)
colors = ['r', 'b', 'g', 'm', 'y']
labels = ['right foot', 'left foot', 'hip', 'right hand', 'left hand']
for x, y, c, l in zip(x_img_norm, y_img_norm, colors, labels):
plt3.plot(x, y, c=c, marker=".", linestyle='None', label=l)
plt3.set_title(title)
return plt3
def plot_image(x_img_norm, y_img_norm, limit_x, limit_y, title, fig):
plt4 = fig.add_subplot(2, 2, 4)
colors = ['r', 'b', 'g', 'm', 'y']
labels = ['right foot', 'left foot', 'hip', 'right hand', 'left hand']
for x, y, c, l in zip(x_img_norm, y_img_norm, colors, labels):
plt4.plot(x, y, marker='.', label= l, linestyle='None', c=c)
plt4.set(xlim=limit_x, ylim=limit_y)
plt4.set_title(title)
plt4.legend()
return plt4
The blend-file, where I have tried to simulate the same five points and render it with a equidistant camera, you can download in the following post:
[1] https://blender.stackexchange.com/q/280271/158481
I've tried different combinations of rotations (R_x, R_y, R_z), see script below. I've compared the result of a rendered output from blender with the output of my simulation, where differences lie in a reflection on the vertical axis in the image coordinate system.

Related

How to Create 3D Torus from Circle Revolved about x=2r, r is the radius of circle (Python or JULIA)

I need help to create a torus out of a circle by revolving it about x=2r, r is the radius of the circle.
I am open to either JULIA code or Python code. Whichever that can solve my problem the most efficient.
I have Julia code to plot circle and the x=2r as the axis of revolution.
using Plots, LaTeXStrings, Plots.PlotMeasures
gr()
θ = 0:0.1:2.1π
x = 0 .+ 2cos.(θ)
y = 0 .+ 2sin.(θ)
plot(x, y, label=L"x^{2} + y^{2} = a^{2}",
framestyle=:zerolines, legend=:outertop)
plot!([4], seriestype="vline", color=:green, label="x=2a")
I want to create a torus out of it, but unable, meanwhile I have solid of revolution Python code like this:
# Calculate the surface area of y = sqrt(r^2 - x^2)
# revolved about the x-axis
import matplotlib.pyplot as plt
import numpy as np
import sympy as sy
x = sy.Symbol("x", nonnegative=True)
r = sy.Symbol("r", nonnegative=True)
def f(x):
return sy.sqrt(r**2 - x**2)
def fd(x):
return sy.simplify(sy.diff(f(x), x))
def f2(x):
return sy.sqrt((1 + (fd(x)**2)))
def vx(x):
return 2*sy.pi*(f(x)*sy.sqrt(1 + (fd(x) ** 2)))
vxi = sy.Integral(vx(x), (x, -r, r))
vxf = vxi.simplify().doit()
vxn = vxf.evalf()
n = 100
fig = plt.figure(figsize=(14, 7))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222, projection='3d')
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224, projection='3d')
# 1 is the starting point. The first 3 is the end point.
# The last 200 is the number of discretization points.
# help(np.linspace) to read its documentation.
x = np.linspace(1, 3, 200)
# Plot the circle
y = np.sqrt(2 ** 2 - x ** 2)
t = np.linspace(0, np.pi * 2, n)
xn = np.outer(x, np.cos(t))
yn = np.outer(x, np.sin(t))
zn = np.zeros_like(xn)
for i in range(len(x)):
zn[i:i + 1, :] = np.full_like(zn[0, :], y[i])
ax1.plot(x, y)
ax1.set_title("$f(x)$")
ax2.plot_surface(xn, yn, zn)
ax2.set_title("$f(x)$: Revolution around $y$")
# find the inverse of the function
y_inverse = x
x_inverse = np.power(2 ** 2 - y_inverse ** 2, 1 / 2)
xn_inverse = np.outer(x_inverse, np.cos(t))
yn_inverse = np.outer(x_inverse, np.sin(t))
zn_inverse = np.zeros_like(xn_inverse)
for i in range(len(x_inverse)):
zn_inverse[i:i + 1, :] = np.full_like(zn_inverse[0, :], y_inverse[i])
ax3.plot(x_inverse, y_inverse)
ax3.set_title("Inverse of $f(x)$")
ax4.plot_surface(xn_inverse, yn_inverse, zn_inverse)
ax4.set_title("$f(x)$: Revolution around $x$ \n Surface Area = {}".format(vxn))
plt.tight_layout()
plt.show()
Here is a way that actually allows rotating any figure in the XY plane around the Y axis.
"""
Rotation of a figure in the XY plane about the Y axis:
ϕ = angle of rotation
z' = z * cos(ϕ) - x * sin(ϕ)
x' = z * sin(ϕ) + x * cos(ϕ)
y' = y
"""
using Plots
# OP definition of the circle, but we put center at x, y of 4, 0
# for the torus, otherwise we get a bit of a sphere
θ = 0:0.1:2.1π
x = 4 .+ 2cos.(θ) # center at (s, 0, 0)
y = 0 .+ 2sin.(θ)
# add the original z values as 0
z = zeros(length(x))
plot(x, y, z, color=:red)
# add the rotation axis
ϕ = 0:0.1:π/2 # for full torus use 2π at stop of range
xprime, yprime, zprime = Float64[], Float64[], Float64[]
for a in ϕ, i in eachindex(θ)
push!(zprime, z[i] + z[i] * cos(a) - x[i] * sin(a))
push!(xprime, z[i] * sin(a) + x[i] * cos(a))
push!(yprime, y[i])
end
plot!(xprime, yprime, zprime, alpha=0.3, color=:green)
Here is a way using the Meshes package for the construction of the mesh and the MeshViz package for the visualization. You'll just have to translate to fulfill your desiderata.
using Meshes
using MeshViz
using LinearAlgebra
using GLMakie
# revolution of the polygon defined by (x,y) around the z-axis
# x and y have the same length
function revolution(x, y, n)
u_ = LinRange(0, 2*pi, n+1)[1:n]
j_ = 1:(length(x) - 1) # subtract 1 because of periodicity
function f(u, j)
return [x[j] * sin(u), x[j] * cos(u), y[j]]
end
points = [f(u, j) for u in u_ for j in j_]
topo = GridTopology((length(j_), n), (true, true))
return SimpleMesh(Meshes.Point.(points), topo)
end
# define the section to be rotated: a circle
R = 3 # major radius
r = 1 # minor radius
ntheta = 100
theta_ = LinRange(0, 2*pi, ntheta)
x = [R + r*cos(theta) for theta in theta_]
y = [r*sin(theta) for theta in theta_]
# make mesh
mesh = revolution(x, y, 100)
# visualize mesh
viz(mesh)
EDIT: animation
using Meshes
using MeshViz
using LinearAlgebra
using GLMakie
using Makie
using Printf
function revolutionTorus(R, r, alpha; n1=30, n2=90)
theta_ = LinRange(0, 2, n1+1)[1:n1]
x = [R + r*cospi(theta) for theta in theta_]
y = [r*sinpi(theta) for theta in theta_]
full = alpha == 2
u_ = LinRange(0, alpha, n2 + full)[1:n2]
function f(u, j)
return [x[j] * sinpi(u), x[j] * cospi(u), y[j]]
end
points = [f(u, j) for u in u_ for j in 1:n1]
topo = GridTopology((n1, n2 - !full), (true, full))
return SimpleMesh(Meshes.Point.(points), topo)
end
# generates `nframes` meshes for alpha = 0 -> 2 (alpha is a multiple of pi)
R = 3
r = 1
nframes = 10
alpha_ = LinRange(0, 2, nframes+1)[2:(nframes+1)]
meshes = [revolutionTorus(R, r, alpha) for alpha in alpha_]
# draw and save the frames in a loop
for i in 1:nframes
# make a bounding box in order that all frames have the same aspect
fig, ax, plt =
viz(Meshes.Box(Meshes.Point(-4.5, -4.5, -2.5), Meshes.Point(4.5, 4.5, 2.5)); alpha = 0)
ax.show_axis = false
viz!(meshes[i])
scale!(ax.scene, 1.8, 1.8, 1.8)
png = #sprintf "revolutionTorus%02d.png" i
Makie.save(png, fig)
end
# make GIF with ImageMagick
comm = #cmd "convert -delay 1x2 'revolutionTorus*.png' revolutionTorus.gif"
run(comm)

Drawing an arc tangent to two lines segments in Python

I'm trying to draw an arc of n number of steps between two points so that I can bevel a 2D shape. This image illustrates what I'm looking to create (the blue arc) and how I'm trying to go about it:
move by the radius away from the target point (red)
get the normals of those lines
get the intersections of the normals to find the center of the circle
Draw an arc between those points from the circle's center
This is what I have so far:
As you can see, the circle is not tangent to the line segments. I think my approach may be flawed thinking that the two points used for the normal lines should be moved by the circle's radius. Can anyone please tell me where I am going wrong and how I might be able to find this arc of points? Here is my code:
import matplotlib.pyplot as plt
import numpy as np
#https://stackoverflow.com/questions/51223685/create-circle-tangent-to-two-lines-with-radius-r-geometry
def travel(dx, x1, y1, x2, y2):
a = {"x": x2 - x1, "y": y2 - y1}
mag = np.sqrt(a["x"]*a["x"] + a["y"]*a["y"])
if (mag == 0):
a["x"] = a["y"] = 0;
else:
a["x"] = a["x"]/mag*dx
a["y"] = a["y"]/mag*dx
return [x1 + a["x"], y1 + a["y"]]
def plot_line(line,color="go-",label=""):
plt.plot([p[0] for p in line],
[p[1] for p in line],color,label=label)
def line_intersection(line1, line2):
xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])
ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])
def det(a, b):
return a[0] * b[1] - a[1] * b[0]
div = det(xdiff, ydiff)
if div == 0:
raise Exception('lines do not intersect')
d = (det(*line1), det(*line2))
x = det(d, xdiff) / div
y = det(d, ydiff) / div
return x, y
line_segment1 = [[1,1],[4,8]]
line_segment2 = [[4,8],[8,8]]
line = line_segment1 + line_segment2
plot_line(line,'k-')
radius = 2
l1_x1 = line_segment1[0][0]
l1_y1 = line_segment1[0][1]
l1_x2 = line_segment1[1][0]
l1_y2 = line_segment1[1][1]
new_point1 = travel(radius, l1_x2, l1_y2, l1_x1, l1_y1)
l2_x1 = line_segment2[0][0]
l2_y1 = line_segment2[0][1]
l2_x2 = line_segment2[1][0]
l2_y2 = line_segment2[1][1]
new_point2 = travel(radius, l2_x1, l2_y1, l2_x2, l2_y2)
plt.plot(line_segment1[1][0], line_segment1[1][1],'ro',label="Point 1")
plt.plot(new_point2[0], new_point2[1],'go',label="radius from Point 1")
plt.plot(new_point1[0], new_point1[1],'mo',label="radius from Point 1")
# normal 1
dx = l1_x2 - l1_x1
dy = l1_y2 - l1_y1
normal_line1 = [[new_point1[0]+-dy, new_point1[1]+dx],[new_point1[0]+dy, new_point1[1]+-dx]]
plot_line(normal_line1,'m',label="normal 1")
# normal 2
dx2 = l2_x2 - l2_x1
dy2 = l2_y2 - l2_y1
normal_line2 = [[new_point2[0]+-dy2, new_point2[1]+dx2],[new_point2[0]+dy2, new_point2[1]+-dx2]]
plot_line(normal_line2,'g',label="normal 2")
x, y = line_intersection(normal_line1,normal_line2)
plt.plot(x, y,'bo',label="intersection") #'blue'
theta = np.linspace( 0 , 2 * np.pi , 150 )
a = x + radius * np.cos( theta )
b = y + radius * np.sin( theta )
plt.plot(a, b)
plt.legend()
plt.axis('square')
plt.show()
Thanks a lot!
You could try making a Bezier curve, like in this example. A basic implementation might be:
import matplotlib.path as mpath
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
Path = mpath.Path
fig, ax = plt.subplots()
# roughly equivalent of your purple, red and green points
points = [(3, 6.146), (4, 8), (6, 8.25)]
pp1 = mpatches.PathPatch(
Path(points, [Path.MOVETO, Path.CURVE3, Path.CURVE3]),
fc="none",
transform=ax.transData
)
ax.add_patch(pp1)
# lines between points
ax.plot([points[0][0], points[1][0]], [points[0][1], points[1][1]], 'b')
ax.plot([points[1][0], points[2][0]], [points[1][1], points[2][1]], 'b')
# plot points
for point in points:
ax.plot(point[0], point[1], 'o')
ax.set_aspect("equal")
plt.show()
which gives:
To do this without using a Matplotlib PathPatch object, you can calculate the Bezier points as, for example, in this answer, which I'll use below to do the same as above (note to avoid using scipy's comb function, as in that answer, I've used the comb function from here):
import numpy as np
from math import factorial
from matplotlib import pyplot as plt
def comb(n, k):
"""
N choose k
"""
return factorial(n) / factorial(k) / factorial(n - k)
def bernstein_poly(i, n, t):
"""
The Bernstein polynomial of n, i as a function of t
"""
return comb(n, i) * ( t**(n-i) ) * (1 - t)**i
def bezier_curve(points, n=1000):
"""
Given a set of control points, return the
bezier curve defined by the control points.
points should be a list of lists, or list of tuples
such as [ [1,1],
[2,3],
[4,5], ..[Xn, Yn] ]
n is the number of points at which to return the curve, defaults to 1000
See http://processingjs.nihongoresources.com/bezierinfo/
"""
nPoints = len(points)
xPoints = np.array([p[0] for p in points])
yPoints = np.array([p[1] for p in points])
t = np.linspace(0.0, 1.0, n)
polynomial_array = np.array(
[bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)]
)
xvals = np.dot(xPoints, polynomial_array)
yvals = np.dot(yPoints, polynomial_array)
return xvals, yvals
# set control points (as in the first example)
points = [(3, 6.146), (4, 8), (6, 8.25)]
# get the Bezier curve points at 100 points
xvals, yvals = bezier_curve(points, n=100)
# make the plot
fig, ax = plt.subplots()
# lines between control points
ax.plot([points[0][0], points[1][0]], [points[0][1], points[1][1]], 'b')
ax.plot([points[1][0], points[2][0]], [points[1][1], points[2][1]], 'b')
# plot control points
for point in points:
ax.plot(point[0], point[1], 'o')
# plot the Bezier curve
ax.plot(xvals, yvals, "k--")
ax.set_aspect("equal")
fig.show()
This gives:
If you are not just interested in the solution but in better understanding of this problem, you should read the article on Curved Paths that Amit Patel wrote in his 'Red Blob Games' blog.
https://www.redblobgames.com/articles/curved-paths/

Rotation in 3D coordinate system

I have some fixed points and axes in the 3D coordinate system.
I compute the angle between these axes and some specific points in t = 0, and I am trying to predict their coordinates in any t knowing that my points rotate according to one particular ax with an alpha angle. How can I compute the predicted coordinates using rotation matrices?
I tried something like this :
def rotate(axis=(1., 0., 0.), angle=0.0, radians=None):
""" 4x4 rotation matrix around 'axis' with 'angle' degrees or 'radians' """
x, y, z = normalized(vec(axis))
s, c = sincos(angle, radians)
nc = 1 - c
return np.array([[x*x*nc + c, x*y*nc - z*s, x*z*nc + y*s, 0],
[y*x*nc + z*s, y*y*nc + c, y*z*nc - x*s, 0],
[x*z*nc - y*s, y*z*nc + x*s, z*z*nc + c, 0],
[0, 0, 0, 1]], 'f')
.
.
.
.
x = item.x
y = item.y
shape = image.shape
relative_x = int(x * shape[1])
relative_y = int(y * shape[0])
theta = atan(relative_y/relative_x)
predicted = [np.array([200,200,0,0]),np.array([400,400,0,0])] # an example of pts
r = rotate(axis=(1., 0 , 0 ), radians=theta)
predicted = [r # predicted[0] ,r # predicted[1]]
but the result is false. Is the computation of the rotation matrix valid? Thank you

System blows up when trying to optimise vehicle movement using control package

I am attempting to optimise the movement of a vehicle from position A to position B. I have tried copying the code from the documentation, but it does not work when I paste it.
This is my code so far:
import numpy as np
import control as ct
import control.optimal as opt
import matplotlib.pyplot as plt
def vehicle_update(t, x, u, params):
# Get the parameters for the model
l = params.get('wheelbase', 3.) # vehicle wheelbase (m)
phimax = params.get('maxsteer', 0.5) # max steering angle (rad)
# Saturate the steering input
phi = np.clip(u[1], -phimax, phimax)
# Return the derivative of the state
return np.array([
np.cos(x[2]) * u[0], # xdot = cos(theta) v
np.sin(x[2]) * u[0], # ydot = sin(theta) v
(u[0] / l) * np.tan(phi) # thdot = v/l tan(phi)
])
def vehicle_output(t, x, u, params):
return x # return x, y, theta (full state)
# Define the vehicle steering dynamics as an input/output system
vehicle = ct.NonlinearIOSystem(
vehicle_update, vehicle_output, states=3, name='vehicle',
inputs=('v', 'phi'), outputs=('x', 'y', 'theta'))
#We consider an optimal control problem that consists of “changing lanes” by moving from the point x = 0 m, y = -2 m, \theta = 0 to the point x = 100 m, y = 2 m, \theta = 0) over a period of 10 seconds and with a with a starting and ending velocity of 10 m/s:
x0 = [0, -2, 0]; u0 = [10, 0]
xf = [100, 2, 0]; uf = [10, 0]
Tf = 10
#To set up the optimal control problem we design a cost function that penalizes the state and input using quadratic cost functions:
Q = np.diag([0.1, 10, 0.1]) # keep lateral error low
R = np.eye(2) * 0.1
cost = opt.quadratic_cost(vehicle, Q, R, x0=xf, u0=uf)
#We also constraint the maximum turning rate to 0.1 radians (about 6 degees) and constrain the velocity to be in the range of 9 m/s to 11 m/s:
constraints = [ opt.input_range_constraint(vehicle, [8, -0.1], [11, 0.1]) ]
#Finally, we solve for the optimal inputs:
horizon = np.linspace(0, Tf, 20, endpoint=True)
bend_left = [10, 0.01] # slight left veer
result = opt.solve_ocp(
vehicle, horizon, x0, cost, constraints, initial_guess=bend_left,
options={'eps': 0.01}) # set step size for gradient calculation
# Extract the results
u = result.inputs
t, y = ct.input_output_response(vehicle, horizon, u, x0)
#Plotting the results:
Norows = 3
Nocols = 1
width = 10
height = 10
fig, axes = plt.subplots(nrows=Norows, ncols=Nocols, figsize=(width, height)) #Graph Layout
# Plot the results
plt.subplot(Norows, Nocols, 1)
plt.plot(y[0], y[1])
plt.plot(x0[0], x0[1], 'ro', xf[0], xf[1], 'ro')
plt.xlabel("x [m]")
plt.ylabel("y [m]")
plt.subplot(Norows, Nocols, 2)
plt.plot(t, u[0])
plt.axis([0, 10, 8.5, 11.5])
plt.plot([0, 10], [9, 9], 'k--', [0, 10], [11, 11], 'k--')
plt.xlabel("t [sec]")
plt.ylabel("u1 [m/s]")
plt.subplot(Norows, Nocols, 3)
plt.plot(t, u[1])
plt.axis([0, 10, -0.15, 0.15])
plt.plot([0, 10], [-0.1, -0.1], 'k--', [0, 10], [0.1, 0.1], 'k--')
plt.xlabel("t [sec]")
plt.ylabel("u2 [rad/s]")
plt.suptitle("Lane change manoeuvre")
plt.tight_layout()
plt.show()
Despite the effort, the system appears to blow up, as shown in the first graph when running the code.

Numpy mask from cylinder coordinates

I generated the coordinates of a cylinder. Its two faces connect two arbitrary points already given.
Is it possible to build a 3D numpy mask of the filled cylinder from the coordinates with standard Python libraries? Creating a 2D mask seems simple enough, but I'm encountering some difficulties with 3D.
Here the code for generating the cylinder, taken from here and here:
import scipy
import scipy.linalg
import numpy as np
import nibabel as nib
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# defining mask
shape = (100, 100, 100)
image = np.zeros(shape=shape)
# set radius and centres values
r = 3
start = [30, 45, 60]
end = [40, 58, 70]
p1 = np.array(start)
p2 = np.array(end)
# # calculate p2-p1 distance
# dx = p2[0] - p1[0]
# dy = p2[1] - p1[1]
# dz = p2[2] - p1[2]
# dist = math.sqrt(dx**2 + dy**2 + dz**2)
# vector in direction of axis
v = p2 - p1
# find magnitude of vector
mag = scipy.linalg.norm(v)
# unit vector in direction of axis
v = v / mag
# make some vector not in the same direction as v
not_v = np.array([1, 0, 0])
if (v == not_v).all():
not_v = np.array([0, 1, 0])
# make vector perpendicular to v
n1 = np.cross(v, not_v)
# normalize n1
n1 /= scipy.linalg.norm(n1)
# make unit vector perpendicular to v and n1
n2 = np.cross(v, n1)
#surface ranges over t from 0 to length of axis and 0 to 2*pi
t = np.linspace(0, mag, 100)
theta = np.linspace(0, 2 * np.pi, 100)
rsample = np.linspace(0, r, 2)
#use meshgrid to make 2d arrays
t, theta2 = np.meshgrid(t, theta)
rsample, theta = np.meshgrid(rsample, theta)
# generate coordinates for surface
# "Tube"
X, Y, Z = [p1[i] + v[i] * t + r * np.sin(theta2) * n1[i] + r * np.cos(theta2) * n2[i] for i in [0, 1, 2]]
# "Bottom"
X2, Y2, Z2 = [p1[i] + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]]
# "Top"
X3, Y3, Z3 = [p1[i] + v[i] * mag + rsample[i] * np.sin(theta) * n1[i] + rsample[i] * np.cos(theta) * n2[i] for i in [0, 1, 2]]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z)
ax.plot_surface(X2, Y2, Z2)
ax.plot_surface(X3, Y3, Z3)
plt.show()
I need the 3D numpy mask to select all the values inside the cylinder of a 3D image. The shape of mask and image is the same.
In the end I looped through the coordinates of tube and faces.
I got the coordinates following this link: 3D points from Numpy meshgrid coordinates
tube = np.stack((X.ravel(), Y.ravel(), Z.ravel()), axis=1)
face1 = np.stack((X2.ravel(), Y2.ravel(), Z2.ravel()), axis=1)
face2 = np.stack((X3.ravel(), Y3.ravel(), Z3.ravel()), axis=1)
# filling numpy mask
for i in range(len(tube)):
image[int(tube[i][0]), int(tube[i][1]), int(tube[i][2])] = 255
for j in range(len(face1)):
image[int(face1[j][0]), int(face1[j][1]), int(face1[j][2])] = 255
for k in range(len(face2)):
image[int(face2[k][0]), int(face2[k][1]), int(face2[k][2])] = 255
mask_new = nib.Nifti1Image(image.astype(np.float32), ctsurg_file.affine)
nib.save(mask_new, os.path.join(currdir, 'mask_cyl.nii.gz'))

Categories