Related
I'm trying to project 3D body keypoints to 2D keypoints,
My 3D points are:
points = np.array([[-7.55801499e-02, -3.69511306e-01, -2.63576955e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[ 3.08661222e-01, -2.93346141e-02, 3.72593999e-02],
[ 5.96781611e-01, -2.82074720e-01, 4.71359938e-01],
[ 5.38534284e-01, -8.05779934e-01, 4.68694866e-01],
[-3.67936224e-01, -1.09069087e-01, 9.90774706e-02],
[-5.24732828e-01, -2.87176669e-01, 6.09635711e-01],
[-4.37022656e-01, -7.87327409e-01, 4.43706572e-01],
[ 1.33009470e-09, -5.10657072e-09, 1.00000000e+00],
[ 1.13241628e-01, 3.25177647e-02, 1.24026799e+00],
[ 3.43442023e-01, -2.51034945e-01, 1.90472209e+00],
[ 2.57550180e-01, -2.86886752e-01, 2.75528717e+00],
[-1.37361348e-01, -2.60521360e-02, 1.19951272e+00],
[-3.26779515e-01, -5.59706092e-01, 1.75905156e+00],
[-4.65996087e-01, -7.69565761e-01, 2.56634569e+00],
[-1.89841837e-02, -3.19088846e-01, -3.69913191e-01],
[-1.61812544e-01, -3.10732543e-01, -3.47061515e-01],
[ 7.68100023e-02, -1.19293019e-01, -3.72248143e-01],
[-2.24317372e-01, -1.02143347e-01, -3.32051814e-01],
[-3.77829641e-01, -1.19915462e+00, 2.56900430e+00],
[-5.45104921e-01, -1.13393784e+00, 2.57149625e+00],
[-5.66698492e-01, -6.89325571e-01, 2.67840290e+00],
[ 4.65222150e-01, -6.44857705e-01, 2.83186650e+00],
[ 5.27995050e-01, -4.69421804e-01, 2.87518311e+00],
[ 1.77749291e-01, -1.74753308e-01, 2.88810611e+00]])
I plotted them using:
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlim3d(1, -1)
ax.set_ylim3d(1, -1)
ax.set_zlim3d(1, -1)
ax.scatter3D(points[:, 0], points[:, 1],
points[:, 2], cmap='Greens')
The result is:
I want an array of 2D points with the same camera view, so my desired result a 2D array:
I have tried so far:
import cv2
ans = []
for k in range(25):
tmp = np.array(s[0, k, :]).reshape(1,3)
revc = np.array([0, 0, 0], np.float) # rotation vector
tvec = np.array([0, 0, 0], np.float) # translation vector
fx = fy = 1.0
cx = cy = 0.0
cameraMatrix = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
result = cv2.projectPoints(tmp, revc, tvec, cameraMatrix, None)
ans.append(result[0])
ans = np.array(ans).squeeze()
But the result I'm getting is:
plt.scatter(ans[:,0], ans[:, 1])
I can't figure out why the information is lost during projection, kindly help me in this. Also its not necessary for me to use OpenCV so you can suggest other methods like using numpy too.
Thanks
Here's a way to do this from "scratch". I have the following import statements:
import numpy as np
import matplotlib.pyplot as plt
from numpy import sin,cos,pi
from scipy.linalg import norm
After your 3d plotting code, I added the following:
azim = ax.azim*pi/180
elev = ax.elev*pi/180
elev *= 1.2 # this seems to improve the outcome
a_vec = np.array([cos(azim),sin(azim),0])
normal = cos(elev)*a_vec + np.array([0,0,sin(elev)])
z_vec = np.array([0,0,1])
y_comp = z_vec - (z_vec#normal)*normal
y_comp = y_comp/norm(y_comp)
x_comp = np.cross(y_comp,normal)
proj_mat = np.vstack([x_comp,y_comp]) # build projection matrix
proj_mat = -proj_mat # account for flipped axes
points_2D = points # proj_mat.T # apply projection
plt.figure()
plt.scatter(*points_2D.T)
plt.gca().set_aspect('equal', adjustable='box')
plt.axis('off')
plt.show()
The resulting points:
I have an irregularly shaped image and I want to get equally spaced grid points inside that.
The image that I have for example is Image I have
I am thinking of using OpenCV to get the corner coordinates and that is easy. But I do not know how to pass all the corner coordinates or divide my shape in identifiable geometric shapes and do this.
Right now, I have hard coded the coordinates and created a function to pass the coordinates.
import numpy as np
import matplotlib.pyplot as plt
import functools
def gridFunc(arr):
center = np.mean(arr, axis=0)
x = np.arange(min(arr[:, 0]), max(arr[:, 0]) + 0.04, 0.4)
y = np.arange(min(arr[:, 1]), max(arr[:, 1]) + 0.04, 0.4)
a, b = np.meshgrid(x, y)
points = np.stack([a.reshape(-1), b.reshape(-1)]).T
def normal(a, b):
v = b - a
n = np.array([v[1], -v[0]])
# normal needs to point out
if (center - a) # n > 0:
n *= -1
return n
mask = functools.reduce(np.logical_and, [((points - a) # normal(a, b)) < 0 for a, b in zip(arr[:-1], arr[1:])])
#plt.plot(arr[:, 0], arr[:, 1])
#plt.gca().set_aspect('equal')
#plt.scatter(points[mask][:, 0], points[mask][:, 1])
#plt.show()
return points[mask]
arr1 = np.array([[0, 7],[3, 10],[3, 4],[0, 7]])
arr2 = np.array([[3, 0], [3, 14], [12, 14], [12, 0], [3,0]])
arr3 = np.array([[12, 4], [12, 10], [20, 10], [20, 4], [12, 4]])
arr_1 = gridFunc(arr1)
arr_2 = gridFunc(arr2)
arr_3 = gridFunc(arr3)
res = np.append(arr_1, arr_2)
res = np.reshape(res, (-1, 2))
res = np.append(res, arr_3)
res = np.reshape(res, (-1, 2))
plt.scatter(res[:,0], res[:,1])
plt.show()
The image that I get is this, But I am doing this manually And I want to extend this to other shapes as well.
Image I get
I know, for those who know Python well piece of cake a question.
I have an excel file and it looks like this:
1 7 5 8 2 4 6 3
1 7 4 6 8 2 5 3
6 1 5 2 8 3 7 4
My purpose is to draw a cube in Python and draw a line according to the order of these numbers.
Note: There is no number greater than 8 in arrays.
I can explain better with a pictures.
First Step:
Second Step
Last Step:
I need to print the final version of the 3D cube for each row in Excel.
My way to solution
import numpy as np
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import matplotlib.pyplot as plt
df = pd.read_csv("uniquesolutions.csv",header=None,sep='\t')
myArray = df.values
points = solutionsarray
def connectpoints(x,y,p1,p2):
x1, x2 = x[p1], x[p2]
y1, y2 = y[p1], y[p2]
plt.plot([x1,x2],[y1,y2],'k-')
# cube[0][0][0] = 1
# cube[0][0][1] = 2
# cube[0][1][0] = 3
# cube[0][1][1] = 4
# cube[1][0][0] = 5
# cube[1][0][1] = 6
# cube[1][1][0] = 7
# cube[1][1][1] = 8
for i in range():
connectpoints(cube[i][i][i],cube[],points[i],points[i+1]) # Confused!
ax = fig.add_subplot(111, projection='3d')
# plot sides
ax.add_collection3d(Poly3DCollection(verts,
facecolors='cyan', linewidths=1, edgecolors='r', alpha=.25))
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
In the question here, they managed to draw something with the points given inside the cube.
I tried to use this 2D connection function.
Last Question: Can I print the result of red lines in 3D? How can I do this in Python?
First, it looks like you are using pandas with pd.read_csv without importing it. Since, you are not reading the headers and just want a list of values, it is probably sufficient to just use the numpy read function instead.
Since I don't have access to your csv, I will define the vertex lists as variables below.
vertices = np.zeros([3,8],dtype=int)
vertices[0,:] = [1, 7, 5, 8, 2, 4, 6, 3]
vertices[1,:] = [1, 7, 4, 6, 8, 2, 5, 3]
vertices[2,:] = [6, 1, 5, 2, 8, 3, 7, 4]
vertices = vertices - 1 #(adjust the vertex numbers by one since python starts with zero indexing)
Here I used a 2d numpy array to define the vertices. The first dimension, with length 3, is for the number of vertex list, and the second dimension, with length 8, is each vertex list.
I subtract 1 from the vertices list because we will use this list to index another array and python indexing starts at 0, not 1.
Then, define the cube coordaintes.
# Initialize an array with dimensions 8 by 3
# 8 for each vertex
# -> indices will be vertex1=0, v2=1, v3=2 ...
# 3 for each coordinate
# -> indices will be x=0,y=1,z=1
cube = np.zeros([8,3])
# Define x values
cube[:,0] = [0, 0, 0, 0, 1, 1, 1, 1]
# Define y values
cube[:,1] = [0, 1, 0, 1, 0, 1, 0, 1]
# Define z values
cube[:,2] = [0, 0, 1, 1, 0, 0, 1, 1]
Then initialize the plot.
# First initialize the fig variable to a figure
fig = plt.figure()
# Add a 3d axis to the figure
ax = fig.add_subplot(111, projection='3d')
Then add the red lines for vertex list 1. You can repeat this for the other vertex list by increasing the first index of vertices.
# Plot first vertex list
ax.plot(cube[vertices[0,:],0],cube[vertices[0,:],1],cube[vertices[0,:],2],color='r-')
# Plot second vertex list
ax.plot(cube[vertices[1,:],0],cube[vertices[1,:],1],cube[vertices[1,:],2],color='r-')
The faces can be added by defining the edges of each faces. There is a numpy array for each face. In the array there are 5 vertices, where the edge are defined by the lines between successive vertices. So the 5 vertices create 4 edges.
# Initialize a list of vertex coordinates for each face
# faces = [np.zeros([5,3])]*3
faces = []
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
# Bottom face
faces[0][:,0] = [0,0,1,1,0]
faces[0][:,1] = [0,1,1,0,0]
faces[0][:,2] = [0,0,0,0,0]
# Top face
faces[1][:,0] = [0,0,1,1,0]
faces[1][:,1] = [0,1,1,0,0]
faces[1][:,2] = [1,1,1,1,1]
# Left Face
faces[2][:,0] = [0,0,0,0,0]
faces[2][:,1] = [0,1,1,0,0]
faces[2][:,2] = [0,0,1,1,0]
# Left Face
faces[3][:,0] = [1,1,1,1,1]
faces[3][:,1] = [0,1,1,0,0]
faces[3][:,2] = [0,0,1,1,0]
# front face
faces[4][:,0] = [0,1,1,0,0]
faces[4][:,1] = [0,0,0,0,0]
faces[4][:,2] = [0,0,1,1,0]
# front face
faces[5][:,0] = [0,1,1,0,0]
faces[5][:,1] = [1,1,1,1,1]
faces[5][:,2] = [0,0,1,1,0]
ax.add_collection3d(Poly3DCollection(faces, facecolors='cyan', linewidths=1, edgecolors='k', alpha=.25))
All together it looks like this.
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import matplotlib.pyplot as plt
vertices = np.zeros([3,8],dtype=int)
vertices[0,:] = [1, 7, 5, 8, 2, 4, 6, 3]
vertices[1,:] = [1, 7, 4, 6, 8, 2, 5, 3]
vertices[2,:] = [6, 1, 5, 2, 8, 3, 7, 4]
vertices = vertices - 1 #(adjust the indices by one since python starts with zero indexing)
# Define an array with dimensions 8 by 3
# 8 for each vertex
# -> indices will be vertex1=0, v2=1, v3=2 ...
# 3 for each coordinate
# -> indices will be x=0,y=1,z=1
cube = np.zeros([8,3])
# Define x values
cube[:,0] = [0, 0, 0, 0, 1, 1, 1, 1]
# Define y values
cube[:,1] = [0, 1, 0, 1, 0, 1, 0, 1]
# Define z values
cube[:,2] = [0, 0, 1, 1, 0, 0, 1, 1]
# First initialize the fig variable to a figure
fig = plt.figure()
# Add a 3d axis to the figure
ax = fig.add_subplot(111, projection='3d')
# plotting cube
# Initialize a list of vertex coordinates for each face
# faces = [np.zeros([5,3])]*3
faces = []
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
faces.append(np.zeros([5,3]))
# Bottom face
faces[0][:,0] = [0,0,1,1,0]
faces[0][:,1] = [0,1,1,0,0]
faces[0][:,2] = [0,0,0,0,0]
# Top face
faces[1][:,0] = [0,0,1,1,0]
faces[1][:,1] = [0,1,1,0,0]
faces[1][:,2] = [1,1,1,1,1]
# Left Face
faces[2][:,0] = [0,0,0,0,0]
faces[2][:,1] = [0,1,1,0,0]
faces[2][:,2] = [0,0,1,1,0]
# Left Face
faces[3][:,0] = [1,1,1,1,1]
faces[3][:,1] = [0,1,1,0,0]
faces[3][:,2] = [0,0,1,1,0]
# front face
faces[4][:,0] = [0,1,1,0,0]
faces[4][:,1] = [0,0,0,0,0]
faces[4][:,2] = [0,0,1,1,0]
# front face
faces[5][:,0] = [0,1,1,0,0]
faces[5][:,1] = [1,1,1,1,1]
faces[5][:,2] = [0,0,1,1,0]
ax.add_collection3d(Poly3DCollection(faces, facecolors='cyan', linewidths=1, edgecolors='k', alpha=.25))
# plotting lines
ax.plot(cube[vertices[0,:],0],cube[vertices[0,:],1],cube[vertices[0,:],2],color='r')
ax.plot(cube[vertices[1,:],0],cube[vertices[1,:],1],cube[vertices[1,:],2],color='r')
ax.plot(cube[vertices[2,:],0],cube[vertices[2,:],1],cube[vertices[2,:],2],color='r')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
Alternatively, If you want each set of lines to have their own color, replace
ax.plot(cube[vertices[0,:],0],cube[vertices[0,:],1],cube[vertices[0,:],2],color='r')
ax.plot(cube[vertices[1,:],0],cube[vertices[1,:],1],cube[vertices[1,:],2],color='r')
ax.plot(cube[vertices[2,:],0],cube[vertices[2,:],1],cube[vertices[2,:],2],color='r')
with
colors = ['r','g','b']
for i in range(3):
ax.plot(cube[vertices[i,:],0],cube[vertices[i,:],1],cube[vertices[i,:],2],color=colors[i])
That code below starts fine in pycharm.
But by starting with the command line:
"python field_basket_design_uwr.py"
it gives error:
Traceback (most recent call last):
File "field_basket_design_uwr.py", line 677, in <module>
mp.set_start_method('spawn')
AttributeError: 'module' object has no attribute 'set_start_method'
Has somebody an idea how to make the script starting without error?
#!/usr/bin/python3.5
import math
import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk as gtk, Gdk as gdk, GLib, GObject as gobject
import string
import os
import subprocess
import glob
from datetime import datetime, timedelta
import time
import numpy as np
import matplotlib; matplotlib.use('Gtk3Agg')
import matplotlib.animation as animation
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.text import Annotation
from matplotlib.backends.backend_gtk3cairo import FigureCanvasGTK3Cairo as FigureCanvas
import matplotlib.pyplot as plt
import multiprocessing as mp
class Annotation3D(Annotation):
'''Annotate the point xyz with text s'''
def __init__(self, s, xyz, *args, **kwargs):
Annotation.__init__(self,s, xy=(0,0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy=(xs,ys)
Annotation.draw(self, renderer)
#
def annotate3D(ax, s, *args, **kwargs):
'''add anotation text s to to Axes3d ax'''
tag = Annotation3D(s, *args, **kwargs)
ax.add_artist(tag)
#
def draw_basket(ax1, x, y, z, h, color='black'):
'''add basket to the ax1 figure'''
t = np.linspace(0, np.pi * 2, 16)
ax1.plot(x+0.24*np.cos(t), y+0.24*np.sin(t), z, linewidth=1, color=color)
ax1.plot(x+0.16*np.cos(t), y+0.16*np.sin(t), z, linewidth=1, color=color)
ax1.plot(x+0.24*np.cos(t), y+0.24*np.sin(t), z+h, linewidth=1, color=color)
A=0
while A < 16:
xBar = [x+ 0.16 * math.sin(A*22.5*np.pi/180),x+ 0.24 * math.sin(A*22.5*np.pi/180)]
yBar = [y+ 0.16 * math.cos(A*22.5*np.pi/180),y+ 0.24 * math.cos(A*22.5*np.pi/180)]
zBar = [0,h]
ax1.plot(xBar, yBar, zBar, color=color)
A = A+1
def draw_halfsphere (ax1, x, y, z, sph_radius, color=(0,0,1,1)):
''' add free distance surface to Axes3d ax1 '''
u, v = np.mgrid[0:2 * np.pi:20j, 0:np.pi/2:10j]
xP1 = x + sph_radius * np.cos(u) * np.sin(v)
yP1 = y + sph_radius * np.sin(u) * np.sin(v)
zP1 = z - sph_radius * np.cos(v)
halffreesphere = ax1.plot_wireframe(xP1, yP1, zP1, color=color, alpha=0.3)
return halffreesphere
def OnClick(event):
global selected_coord
global clicked_coord
clicked_coord [0, 0] = clicked_coord [1, 0]
clicked_coord [0, 1] = clicked_coord [1, 1]
clicked_coord [0, 2] = clicked_coord [1, 2]
clicked_coord [1, 0] = selected_coord[0]
clicked_coord [1, 1] = selected_coord[1]
clicked_coord [1, 2] = selected_coord[2]
print ("selected position X: %5.2f Y: %5.2f Z: %5.2f" % (selected_coord[0], selected_coord[1],selected_coord[2]))
print ("distance between selected points: %5.2f", np.sqrt ((clicked_coord [0, 0] - clicked_coord [1, 0])**2
+ (clicked_coord [0, 1]- clicked_coord [1, 1])**2
+ (clicked_coord [0, 2] - clicked_coord [1, 2])**2))
def distance(point, event):
"""Return distance between mouse position and given data point
Args:
point (np.array): np.array of shape (3,), with x,y,z in data coords
event (MouseEvent): mouse event (which contains mouse position in .x and .xdata)
Returns:
distance (np.float64): distance (in screen coords) between mouse pos and data point
"""
x2, y2, _ = proj_transform(point[0], point[1], point[2], plt.gca().get_proj())
x3, y3 = ax1.transData.transform((x2, y2))
return np.sqrt ((x3 - event.x)**2 + (y3 - event.y)**2)
def calcClosestDatapoint(X, event):
""""Calculate which data point is closest to the mouse position.
Args:
X (np.array) - array of points, of shape (numPoints, 3)
event (MouseEvent) - mouse event (containing mouse position)
returns:
smallestIndex (int) - the index (into the array of points X) of the element closest to the mouse position
"""
distances = [distance (X[i, 0:3], event) for i in range(X.shape[0])]
return np.argmin(distances),np.amin(distances)
def annotatePlot(X, index):
global selected_coord
"""Create popover label in 3d chart
Args:
X (np.array) - array of points, of shape (numPoints, 3)
index (int) - index (into points array X) of item which should be printed
Returns:
None
"""
# If we have previously displayed another label, remove it first
if hasattr(annotatePlot, 'label'):
annotatePlot.label.remove()
# Get data point from array of points X, at position index
x2, y2, _ = proj_transform(X[index, 0], X[index, 1], X[index, 2], ax1.get_proj())
annotatePlot.label = plt.annotate( "Select %d" % (index+1),
xy = (x2, y2), xytext = (-20, 20), textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
# make coord from label available global for other function like distance measurement between points
selected_coord[0]=X[index, 0]
selected_coord[1]=X[index, 1]
selected_coord[2]=X[index, 2]
#
fig.canvas.draw()
def onMouseMotion(event):
global pos_pb_now, pos_pw_now
"""Event that is triggered when mouse is moved. Shows text annotation over data point closest to mouse."""
closestIndexW,LowestDistanceW = calcClosestDatapoint(pos_pw_now, event)
closestIndexB,LowestDistanceB = calcClosestDatapoint(pos_pb_now, event)
if LowestDistanceW < LowestDistanceB:
annotatePlot (pos_pw_now, closestIndexW)
else:
annotatePlot (pos_pb_now, closestIndexB)
#
def OneWindow(s_w_shared,s_d_shared,s_l_shared,el_w_shared,elevation_shared, azimut_shared, pb,
pw, ball):
import numpy as np
import matplotlib.pyplot as plt
''' Sub-processed Plot viewer of the main windows; copy/paste in one; it helps for PC with 2 monitors
The main windows remain the control window of the trainer. This window is the view windows of the trained player'''
#
def animate_one(i):
p_b_one._offsets3d = pos_pb_now_one[:, 0], pos_pb_now_one[:, 1], pos_pb_now_one[:, 2]
p_w_one._offsets3d = pos_pw_now_one[:, 0], pos_pw_now_one[:, 1], pos_pw_now_one[:, 2]
p_ball_one._offsets3d = pos_ball_now_one[:, 0], pos_ball_now_one[:, 1], pos_ball_now_one[:, 2]
ax1_one.view_init(elev=elevation_shared.value, azim=azimut_shared.value)
fig_one = plt.figure()
ax1_one = fig_one.add_subplot(111,projection='3d')
#
arrpb = np.frombuffer(pb.get_obj(), dtype='f')
pos_pb_now_one = np.reshape(arrpb, (6, 3))
#
arrpw = np.frombuffer(pw.get_obj(), dtype='f')
pos_pw_now_one = np.reshape(arrpw, (6, 3))
#
arrball = np.frombuffer(ball.get_obj(), dtype='f')
pos_ball_now_one = np.reshape(arrball, (1, 3))
xG = [0,s_w_shared.value,s_w_shared.value,0,0, 0,s_w_shared.value,s_w_shared.value,s_w_shared.value,
s_w_shared.value,s_w_shared.value, 0, 0,0, 0,s_w_shared.value]
yG = [0, 0, 0,0,0,s_l_shared.value,s_l_shared.value, 0, 0,s_l_shared.value,s_l_shared.value,s_l_shared.value,
s_l_shared.value,0,s_l_shared.value,s_l_shared.value]
zG = [0, 0, s_d_shared.value,s_d_shared.value,0, 0, 0, 0, s_d_shared.value, s_d_shared.value, 0, 0,
s_d_shared.value,s_d_shared.value, s_d_shared.value, s_d_shared.value]
ax1_one.plot_wireframe (xG,yG,zG,colors= (0,0,1,1)) # blue line game area
xW = [s_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value,
s_w_shared.value,s_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,
s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,s_w_shared.value+el_w_shared.value,
s_w_shared.value,s_w_shared.value,s_w_shared.value,s_w_shared.value,s_w_shared.value+el_w_shared.value]
yW = [0, 0, 0, 0, 0,s_l_shared.value,s_l_shared.value, 0, 0,s_l_shared.value,s_l_shared.value,s_l_shared.value,
s_l_shared.value, 0,s_l_shared.value,s_l_shared.value]
zW = [0, 0, s_d_shared.value, s_d_shared.value, 0, 0, 0, 0, s_d_shared.value, s_d_shared.value, 0, 0,
s_d_shared.value, s_d_shared.value, s_d_shared.value, s_d_shared.value]
ax1_one.plot_wireframe (xW,yW,zW,colors= (0,1,1,1)) # light blue line exchange area
#
ax1_one.set_xlabel('Wide')
ax1_one.set_ylabel('Length')
ax1_one.set_zlabel('Water')
#
# draw the 2 lines which show the depth
xG1 = [0, s_w_shared.value]
yG1 = [s_d_shared.value, s_d_shared.value]
zG1 = [0, 0]
ax1_one.plot_wireframe(xG1, yG1, zG1, colors=(0, 0, 1, 1),linestyle=':') # blue line
xG2 = [0, s_w_shared.value]
yG2 = [s_l_shared.value-s_d_shared.value, s_l_shared.value-s_d_shared.value]
zG2 = [0, 0]
ax1_one.plot_wireframe(xG2, yG2, zG2, colors=(0, 0, 1, 1),linestyle=':') # blue line
#
# put the axis fix
ax1_one.set_xlim3d(0, s_w_shared.value+el_w_shared.value)
ax1_one.set_ylim3d(0, s_l_shared.value)
ax1_one.set_zlim3d(0, s_d_shared.value)
ax1_one.set_aspect(aspect=0.222)
draw_basket(ax1_one, s_w_shared.value / 2, 0.24, 0., 0.45)
draw_basket(ax1_one, s_w_shared.value / 2, s_l_shared.value - 0.24, 0., 0.45)
#
p_b_one = ax1_one.scatter(pos_pb_now_one[:, 0], pos_pb_now_one[:, 1], pos_pb_now_one[:, 2],
s=400, alpha = 0.5, c=(0, 0, 1, 1))
p_w_one = ax1_one.scatter(pos_pw_now_one[:, 0], pos_pw_now_one[:, 1],
pos_pw_now_one[:, 2], s=400, alpha = 0.5, c="darkgrey")
p_ball_one = ax1_one.scatter(pos_ball_now_one[:,0], pos_ball_now_one[:,1],
pos_ball_now_one[:,2], s=100, alpha = 0.5, c="red")
for j, xyz_ in enumerate(pos_pb_now_one):
annotate3D(ax1_one, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right',va='bottom')
for j, xyz_ in enumerate(pos_pw_now_one):
annotate3D(ax1_one, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right', va='bottom')
Frame = 10
ani1_one = animation.FuncAnimation(fig_one, animate_one, frames=Frame, interval=600, blit=False, repeat=True,
repeat_delay=500)
#
plt.pause(0.001)
plt.show()
def animate(i):
global pos_pb_now, pos_pb_now_shared, pos_pb_target, p_b, pos_pb_deltamove
global pos_pw_now, pos_pw_now_shared, pos_pw_target, p_w, pos_pw_deltamove
global pos_ball_now, pos_ball_now_shared, pos_ball_target, p_ball, pos_ball_deltamove
global Frame
global count_iter
global video_page_iter
global azimut_shared
global elevation_shared
global video_file_name
# global EmitPosOneWin
# global EmitPosFourWin
global ax1
global free_sphere
#
azimut, elevation = ax1.azim, ax1.elev
# print ("azimut from main",azimut)
azimut_shared.value = azimut
# print ("azimut_shared value from main",azimut_shared.value)
elevation_shared.value = elevation
pos_ball_now[0,0] += (1. / Frame) * pos_ball_deltamove[0,0]
pos_ball_now[0,1] += (1. / Frame) * pos_ball_deltamove[0,1]
pos_ball_now[0,2] += (1. / Frame) * pos_ball_deltamove[0,2]
#
# EmitPosOneWin.put(['bp', 0, pos_ball_now[0,0], pos_ball_now[0,1], pos_ball_now[0,2]])
# EmitPosFourWin.put(['bp', 0, pos_ball_now[0,0], pos_ball_now[0,1], pos_ball_now[0,2]])
pos_ball_now_shared[0] = pos_ball_now[0, 0]
pos_ball_now_shared[1] = pos_ball_now[0, 1]
pos_ball_now_shared[2] = pos_ball_now[0, 2]
for j in range(6):
pos_pb_now[j, 0] += (1. / Frame) * pos_pb_deltamove[j, 0]
pos_pb_now[j, 1] += (1. / Frame) * pos_pb_deltamove[j, 1]
pos_pb_now[j, 2] += (1. / Frame) * pos_pb_deltamove[j, 2]
pos_pw_now[j, 0] += (1. / Frame) * pos_pw_deltamove[j, 0]
pos_pw_now[j, 1] += (1. / Frame) * pos_pw_deltamove[j, 1]
pos_pw_now[j, 2] += (1. / Frame) * pos_pw_deltamove[j, 2]
#
# feed the queue; queue because that animation could be paused
# EmitPosOneWin.put(['pb', j, pos_pb_now[j, 0], pos_pb_now[j, 1], pos_pb_now[j, 2]])
# EmitPosOneWin.put(['pw', j, pos_pw_now[j, 0], pos_pw_now[j, 1], pos_pw_now[j, 2]])
# EmitPosFourWin.put(['pb', j, pos_pb_now[j, 0], pos_pb_now[j, 1], pos_pb_now[j, 2]])
# EmitPosFourWin.put(['pw', j, pos_pw_now[j, 0], pos_pw_now[j, 1], pos_pw_now[j, 2]])
pos_pb_now_shared[j*3] = pos_pb_now[j,0]
pos_pb_now_shared[j*3+1] = pos_pb_now[j,1]
pos_pb_now_shared[j*3+2] = pos_pb_now[j,2]
pos_pw_now_shared[j*3] = pos_pw_now[j,0]
pos_pw_now_shared[j*3+1] = pos_pw_now[j,1]
pos_pw_now_shared[j*3+2] = pos_pw_now[j,2]
#
p_b._offsets3d = pos_pb_now[:, 0], pos_pb_now[:, 1], pos_pb_now[:, 2]
p_w._offsets3d = pos_pw_now[:, 0], pos_pw_now[:, 1], pos_pw_now[:, 2]
p_ball._offsets3d = pos_ball_now[:,0],pos_ball_now[:,1],pos_ball_now[:,2]
#
video_page_iter = video_page_iter+1 # if video is on
plt.savefig("/home/family/Bilder" + "/file%03d.png" % video_page_iter) # if video is on
#
if video_page_iter==100: # or if command store video
os.chdir("/home/family/Bilder")
subprocess.call([
'ffmpeg', '-framerate', '8', '-i', 'file%03d.png', '-r', '30', '-pix_fmt', 'yuv420p',
# 'video_name.mp4'
video_file_name
]) # add -y to overwrite test this
for file_name in glob.glob("*.png"):
os.remove(file_name)
video_page_iter = 0
# simulate the deletion of the free domain. Will be activated later by a GUI
free_sphere.remove()
# fig.canvas.draw()
if i == (Frame - 1):
# reset the deltamove to a clean zero for last position in case of rounding elements
# or set to next step of dynamic move
count_iter = count_iter+1
m, s = divmod(count_iter, 2)
if s == 1:
free_sphere.remove()
fig.canvas.draw()
pos_ball_deltamove[0,0] = -1.
pos_ball_deltamove[0,1] = -1.
pos_ball_deltamove[0,2] = -1.
for k in range(6):
pos_pb_deltamove[k, 0] = -1.
pos_pb_deltamove[k, 1] = -1.
pos_pb_deltamove[k, 2] = -1.
pos_pw_deltamove[k, 0] = -1.
pos_pw_deltamove[k, 1] = -1.
pos_pw_deltamove[k, 2] = -1.
else:
free_sphere = draw_halfsphere(ax1, 5., 9., 4., 2.)
pos_ball_deltamove[0,0] = 1.
pos_ball_deltamove[0,1] = 1.
pos_ball_deltamove[0,2] = 1.
for k in range(6):
pos_pb_deltamove[k, 0] = 1.
pos_pb_deltamove[k, 1] = 1.
pos_pb_deltamove[k, 2] = 1.
pos_pw_deltamove[k, 0] = 1.
pos_pw_deltamove[k, 1] = 1.
pos_pw_deltamove[k, 2] = 1.
pos_ball_now[0,0] = pos_ball_target[0,0]
pos_ball_now[0,1] = pos_ball_target[0,1]
pos_ball_now[0,2] = pos_ball_target[0,2]
pos_ball_now_shared[0] = pos_ball_now[0, 0]
pos_ball_now_shared[1] = pos_ball_now[0, 1]
pos_ball_now_shared[2] = pos_ball_now[0, 2]
for k in range(6):
pos_pb_now[k, 0] = pos_pb_target[k, 0]
pos_pb_now[k, 1] = pos_pb_target[k, 1]
pos_pb_now[k, 2] = pos_pb_target[k, 2]
pos_pw_now[k, 0] = pos_pw_target[k, 0]
pos_pw_now[k, 1] = pos_pw_target[k, 1]
pos_pw_now[k, 2] = pos_pw_target[k, 2]
pos_pb_now_shared[k * 3] = pos_pb_now[k, 0]
pos_pb_now_shared[k * 3 + 1] = pos_pb_now[k, 1]
pos_pb_now_shared[k * 3 + 2] = pos_pb_now[k, 2]
pos_pw_now_shared[k * 3] = pos_pw_now[k, 0]
pos_pw_now_shared[k * 3 + 1] = pos_pw_now[k, 1]
pos_pw_now_shared[k * 3 + 2] = pos_pw_now[k, 2]
#
if __name__=="__main__":
#
######## define the queues for the 2 detached plot processes
mp.set_start_method('spawn')
#
s_w = 10.0
# s_w_shared = Value('d', 10.0)
s_w_shared = mp.Value('f', 10.0)
#
s_d = 4.0
s_d_shared = mp.Value('f', 4.0)
#
s_l = 18.0
s_l_shared = mp.Value('f', 18.0)
# exchange lane width
el_w = 1.0 # normally 3
el_w_shared = mp.Value('f', 1.0) # just 1m in order to show the side
# ball radius
# b_r = 0.53 / (2 * math.pi)
# b_r_shared = Value('d', 0.53 / (2 * math.pi))
#
elevation_shared = mp.Value('f', 10.)
azimut_shared = mp.Value('f', 30.)
#
# define/initiate teams blue and white; array
pos_pb_now = []
pos_pb_now_shared = mp.Array('f',3*6)
pos_pb_target = []
pos_pw_now = []
pos_pw_now_shared = mp.Array('f',3*6)
pos_pw_target = []
pos_pb_deltamove = []
pos_pw_deltamove = []
#
pos_ball_now = []
pos_ball_now_shared = mp.Array('f',3)
pos_ball_target = []
pos_ball_deltamove = []
#
clicked_coord = [] # matrix 2x3 for storing coord of clicked points for distance calculation
clicked_coord.append([0., 0., 0.])
clicked_coord.append([0., 0., 0.])
#
selected_coord = [0., 0., 0.]
#
numb_seq = 0
video_page_iter = 0
video_file_name = "test_video_name.mp4"
#
pos_ball_now.append([5.,9.,0.2]) # ball in the middle
pos_ball_target.append([5.,9.,0.2])
pos_ball_deltamove.append([0., 0., 0.])
#
for i in range(6):
# distribute the players at the side with the same distance
# at game start
pos_pb_now.append([((s_w/6)/2)+i*(s_w/6),1.0, s_d])
pos_pb_target.append([((s_w/6)/2)+i*(s_w/6),1.0, s_d])
pos_pw_now.append([s_w - ((s_w / 6) / 2) - i * (s_w / 6), s_l - 1.0, s_d])
pos_pw_target.append([s_w - ((s_w / 6) / 2) - i * (s_w / 6), s_l - 1.0, s_d])
pos_pb_deltamove.append([0., 0., 0.])
pos_pw_deltamove.append([0., 0., 0.])
#
# Define numpy array which is faster to work with
pos_pb_now = np.array(pos_pb_now, dtype='f')
pos_pb_target = np.array(pos_pb_target, dtype='f')
pos_pw_now = np.array(pos_pw_now, dtype='f')
pos_pw_target = np.array(pos_pw_target, dtype='f')
pos_pb_deltamove = np.array(pos_pb_deltamove, dtype='f')
pos_pw_deltamove = np.array(pos_pw_deltamove, dtype='f')
#
pos_ball_now = np.array(pos_ball_now, dtype='f')
pos_ball_target = np.array(pos_ball_target, dtype='f')
pos_ball_deltamove = np.array(pos_ball_deltamove, dtype='f')
#
clicked_coord = np.array(clicked_coord, dtype='f')
selected_coord = np.array(selected_coord, dtype='f')
#
fig = plt.figure()
ax1 = fig.add_subplot(111,projection='3d')
# field
xG = [0,s_w,s_w,0,0, 0,s_w,s_w,s_w,s_w,s_w, 0, 0,0, 0,s_w]
yG = [0, 0, 0,0,0,s_l,s_l, 0, 0,s_l,s_l,s_l,s_l,0,s_l,s_l]
zG = [0, 0, s_d,s_d,0, 0, 0, 0, s_d, s_d, 0, 0, s_d,s_d, s_d, s_d]
ax1.plot_wireframe (xG,yG,zG,colors= (0,0,1,1)) # blue line game area
# exchange area
xW = [s_w,s_w+el_w,s_w+el_w,s_w,s_w,s_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w+el_w,s_w,s_w,s_w,s_w,s_w+el_w]
yW = [0, 0, 0, 0, 0,s_l,s_l, 0, 0,s_l,s_l,s_l,s_l, 0,s_l,s_l]
zW = [0, 0, s_d, s_d, 0, 0, 0, 0, s_d, s_d, 0, 0, s_d, s_d, s_d, s_d]
ax1.plot_wireframe (xW,yW,zW,colors= (0,1,1,1)) # light blue line exchange area
#
ax1.set_xlabel('Wide')
ax1.set_ylabel('Length')
ax1.set_zlabel('Water')
#
# draw the 2 lines which show the depth
xG1 = [0, s_w]
yG1 = [s_d, s_d]
zG1 = [0, 0]
ax1.plot_wireframe(xG1, yG1, zG1, colors=(0, 0, 1, 1),linestyle=':') # blue line
xG2 = [0, s_w]
yG2 = [s_l-s_d, s_l-s_d]
zG2 = [0, 0]
ax1.plot_wireframe(xG2, yG2, zG2, colors=(0, 0, 1, 1),linestyle=':') # blue line
#
# put the axis fix
ax1.set_xlim3d(0, s_w+el_w)
ax1.set_ylim3d(0, s_l)
ax1.set_zlim3d(0, s_d)
ax1.set_aspect(aspect=0.15) # the best
draw_basket(ax1, s_w / 2, 0.24, 0., 0.45)
draw_basket(ax1, s_w / 2, s_l - 0.24, 0., 0.45)
free_sphere = draw_halfsphere(ax1, 5., 9., 4., 2.)
p_b = ax1.scatter(pos_pb_now[:, 0], pos_pb_now[:, 1], pos_pb_now[:, 2],
s=400, alpha = 0.5, c=(0, 0, 1, 1))
p_w = ax1.scatter(pos_pw_now[:, 0], pos_pw_now[:, 1],
pos_pw_now[:, 2], s=400, alpha = 0.5, c="darkgrey")
p_ball = ax1.scatter(pos_ball_now[:,0], pos_ball_now[:,1],
pos_ball_now[:,2], s=100, alpha = 0.5, c="red")
for j, xyz_ in enumerate(pos_pb_now):
annotate3D(ax1, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right',va='bottom')
for j, xyz_ in enumerate(pos_pw_now):
annotate3D(ax1, s=str(j+1), xyz=xyz_, fontsize=10, xytext=(-3,3),
textcoords='offset points', ha='right', va='bottom')
Frame = 5
for j in range(6):
pos_pb_deltamove[j, 0] = 1.
pos_pb_deltamove[j, 1] = 1.
pos_pb_deltamove[j, 2] = 1.
pos_pw_deltamove[j, 0] = 1.
pos_pw_deltamove[j, 1] = 1.
pos_pw_deltamove[j, 2] = 1.
pos_ball_deltamove[0,0] = 1.
pos_ball_deltamove[0,1] = 1.
pos_ball_deltamove[0,2] = 1.
count_iter = 0
ani1 = animation.FuncAnimation(fig, animate, frames=Frame, interval=1000, blit=False, repeat=True, repeat_delay=1000)
plt.pause(0.001)
p1 = mp.Process(target=OneWindow, args=(s_w_shared, s_d_shared, s_l_shared, el_w_shared,elevation_shared,
azimut_shared, pos_pb_now_shared, pos_pw_now_shared, pos_ball_now_shared))
p1.start()
fig.canvas.mpl_connect('motion_notify_event', onMouseMotion)
fig.canvas.mpl_connect('button_press_event', OnClick)
plt.show()
EDIT1:
"python3 field_basket_design_uwr.py" works.
error which is still coming; perhaps subject to a new thread (not disturbing for the moment); anyway, any comment ho to take this away is welcome. Thanks.
/usr/lib/python3/dist-packages/matplotlib/backend_bases.py:2445: MatplotlibDeprecationWarning: Using default event loop until function specific to this GUI is implemented
warnings.warn(str, mplDeprecation)
/usr/lib/python3/dist-packages/cairocffi/surfaces.py:651: UserWarning: implicit cast from 'char *' to a different pointer type: will be forbidden in the future (check that the types are as you expect; use an explicit ffi.cast() if they are correct)
ffi.cast('char*', address), format, width, height, stride)
The set_start_method in multiprocessing was introduced in Python version 3.4
The error you are facing is due to the fact that you are using an older version of Python. Upgrading to Python 3.4 and above will fix the error.
For more information, refer to -
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.set_start_method
I had the same issue, but it was not a version problem.
The problem was the file name which is multiprocessing.py in my own library.
When I import multiprocessing, it was importing the wrong file (my own file). So, I just changed the file name. I know it is a bit silly, but it may help others...
Edit: Here is an example. If you have multiprocessing.py file, and cat multiprocessing.py output is:
import multiprocessing
if __name__ == '__main__':
multiprocessing.set_start_method('fork')
you get this error. This is obviously because you include your own file instead of the real multiprocessing library. The solution is simply to change your file name to a different one.
My goal is to transform an image in such a way that three source points are mapped to three target points in an empty array. I have solved the finding of the correct affine matrix, however I cannot apply an affine transformation on a color image.
More specifically, I am struggling with the correct use of the scipy.ndimage.interpolation.affine_transform method. As this question and it's anwers point out, the affine_transform-method can be somewhat unintuitive (especially regarding offset calculation), however, user timday shows how apply a rotation and a shearing on an image and position it in another array, while user geodata gives more background information.
My problem is to generalize the approach shown there (1) to color images and (2) to an arbitrary transformation which I calculated myself.
This is my code (which should run as is on your computer):
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
def calcAffineMatrix(sourcePoints, targetPoints):
# For three source- and three target points, find the affine transformation
# Function works correctly, not part of the question
A = []
b = []
for sp, trg in zip(sourcePoints, targetPoints):
A.append([sp[0], 0, sp[1], 0, 1, 0])
A.append([0, sp[0], 0, sp[1], 0, 1])
b.append(trg[0])
b.append(trg[1])
result, resids, rank, s = np.linalg.lstsq(np.array(A), np.array(b))
a0, a1, a2, a3, a4, a5 = result
# Ignoring offset here, later use timday's suggested offset calculation
affineTrafo = np.array([[a0, a1, 0], [a2, a3, 0], [0, 0, 1]], 'd')
# Testing the correctness of transformation matrix
for i, _ in enumerate(sourcePoints):
src = sourcePoints[i]
src.append(1.)
trg = targetPoints[i]
trg.append(1.)
at = affineTrafo.copy()
at[2, 0:2] = [a4, a5]
assert(np.array_equal(np.round(np.array(src).dot(at)), np.array(trg)))
return affineTrafo
# Prepare source image
sourcePoints = [[162., 112.], [130., 112.], [162., 240.]]
targetPoints = [[180., 102.], [101., 101.], [190., 200.]]
image = np.empty((300, 300, 3), dtype='uint8')
image[:] = 255
# Mark border for better visibility
image[0:2, :] = 0
image[-3:-1, :] = 0
image[:, 0:2] = 0
image[:, -3:-1] = 0
# Mark source points in red
for sp in sourcePoints:
sp = [int(u) for u in sp]
image[sp[1] - 5:sp[1] + 5, sp[0] - 5:sp[0] + 5, :] = np.array([255, 0, 0])
# Show image
plt.subplot(3, 1, 1)
plt.imshow(image)
# Prepare array in which the image is placed
array = np.empty((400, 300, 3), dtype='uint8')
array[:] = 255
a2 = array.copy()
# Mark target points in blue
for tp in targetPoints:
tp = [int(u) for u in tp]
a2[tp[1] - 2:tp[1] + 2, tp[0] - 2:tp[0] + 2] = [0, 0, 255]
# Show array
plt.subplot(3, 1, 2)
plt.imshow(a2)
# Next 5 program lines are actually relevant for question:
# Calculate affine matrix
affineTrafo = calcAffineMatrix(sourcePoints, targetPoints)
# This follows the c_in-c_out method proposed in linked stackoverflow issue
# extended for color channel (no translation here)
c_in = np.array([sourcePoints[0][0], sourcePoints[0][1], 0])
c_out = np.array([targetPoints[0][0], targetPoints[0][1], 0])
offset = (c_in - np.dot(c_out, affineTrafo))
# Affine transform!
ndimage.interpolation.affine_transform(image, affineTrafo, order=2, offset=offset,
output=array, output_shape=array.shape,
cval=255)
# Mark blue target points in array, expected to be above red source points
for tp in targetPoints:
tp = [int(u) for u in tp]
array[tp[1] - 2:tp[1] + 2, tp[0] - 2:tp[0] + 2] = [0, 0, 255]
plt.subplot(3, 1, 3)
plt.imshow(array)
plt.show()
Other approaches I tried include working with the inverse, transpose or both of affineTrafo:
affineTrafo = np.linalg.inv(affineTrafo)
affineTrafo = affineTrafo.T
affineTrafo = np.linalg.inv(affineTrafo.T)
affineTrafo = np.linalg.inv(affineTrafo).T
In his answer, geodata shows how to calculate the matrix that affine_trafo needs to do a scaling and rotation:
If one wants a scaling S first and then a rotation R it holds that T=R*S and therefore T.inv=S.inv*R.inv (note the reversed order).
Which I tried to copy using matrix decomposition (decomposing my affine transformation into a rotation, a shearing and another rotation):
u, s, v = np.linalg.svd(affineTrafo[:2,:2])
uInv = np.linalg.inv(u)
sInv = np.linalg.inv(np.diag((s)))
vInv = np.linalg.inv(v)
affineTrafo[:2, :2] = uInv.dot(sInv).dot(vInv)
Again, without success.
For all of my results, it's not (only) an offset problem. It is clearly visible from the pictures that the relative positions of source and target points do not correspond.
I searched the web and stackoverflow and did not find an answer for my problem. Please help me! :)
I finally got it working thanks to AlexanderReynolds hint to use another library. This is of course a workaround; I could not get it working using scipy's affine_transform, so I used OpenCVs cv2.warpAffine instead. In case this is helpful to anyone else, this is my code:
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Prepare source image
sourcePoints = [[162., 112.], [130., 112.], [162., 240.]]
targetPoints = [[180., 102.], [101., 101.], [190., 200.]]
image = np.empty((300, 300, 3), dtype='uint8')
image[:] = 255
# Mark border for better visibility
image[0:2, :] = 0
image[-3:-1, :] = 0
image[:, 0:2] = 0
image[:, -3:-1] = 0
# Mark source points in red
for sp in sourcePoints:
sp = [int(u) for u in sp]
image[sp[1] - 5:sp[1] + 5, sp[0] - 5:sp[0] + 5, :] = np.array([255, 0, 0])
# Show image
plt.subplot(3, 1, 1)
plt.imshow(image)
# Prepare array in which the image is placed
array = np.empty((400, 300, 3), dtype='uint8')
array[:] = 255
a2 = array.copy()
# Mark target points in blue
for tp in targetPoints:
tp = [int(u) for u in tp]
a2[tp[1] - 2:tp[1] + 2, tp[0] - 2:tp[0] + 2] = [0, 0, 255]
# Show array
plt.subplot(3, 1, 2)
plt.imshow(a2)
# Calculate affine matrix and transform image
M = cv2.getAffineTransform(np.float32(sourcePoints), np.float32(targetPoints))
array = cv2.warpAffine(image, M, array.shape[:2], borderValue=[255, 255, 255])
# Mark blue target points in array, expected to be above red source points
for tp in targetPoints:
tp = [int(u) for u in tp]
array[tp[1] - 2:tp[1] + 2, tp[0] - 2:tp[0] + 2] = [0, 0, 255]
plt.subplot(3, 1, 3)
plt.imshow(array)
plt.show()
Comments:
Interesting how it worked almost immediately after changing the library. After having spent more than a day trying to get it work with scipy, this is a lesson for myself to change libraries faster.
In case someone wants to find an (least squares) approximation for an affine transformation based on more than three points, this is how you get the matrix that works with cv2.warpAffine:
Code:
def calcAffineMatrix(sourcePoints, targetPoints):
# For three or more source and target points, find the affine transformation
A = []
b = []
for sp, trg in zip(sourcePoints, targetPoints):
A.append([sp[0], 0, sp[1], 0, 1, 0])
A.append([0, sp[0], 0, sp[1], 0, 1])
b.append(trg[0])
b.append(trg[1])
result, resids, rank, s = np.linalg.lstsq(np.array(A), np.array(b))
a0, a1, a2, a3, a4, a5 = result
affineTrafo = np.float32([[a0, a2, a4], [a1, a3, a5]])
return affineTrafo