Not sure if that is the correct terminology. Basically trying to take a black and white image and first transform it such that all the white pixels that border black-pixels remain white, else turn black. That part of the program works fine, and is done in find_edges. Next I need to calculate the distance from each element in the image to the closest white-pixel. Right now I am doing it by using a for-loop that is insanely slow. Is there a way to make the find_nearest_edge function written solely with numpy without the need for a for-loop to call it on each element? Thanks.
####
from PIL import Image
import numpy as np
from scipy.ndimage import binary_erosion
####
def find_nearest_edge(arr, point):
w, h = arr.shape
x, y = point
xcoords, ycoords = np.meshgrid(np.arange(w), np.arange(h))
target = np.sqrt((xcoords - x)**2 + (ycoords - y)**2)
target[arr == 0] = np.inf
shortest_distance = np.min(target[target > 0.0])
return shortest_distance
def find_edges(img):
img = img.convert('L')
img_np = np.array(img)
kernel = np.ones((3,3))
edges = img_np - binary_erosion(img_np, kernel)*255
return edges
a = Image.open('a.png')
x, y = a.size
edges = find_edges(a)
out = Image.fromarray(edges.astype('uint8'), 'L')
out.save('b.png')
dists =[]
for _x in range(x):
for _y in range(y):
dist = find_nearest_edge(edges,(_x,_y))
dists.append(dist)
print(dists)
Images:
You can use KDTree to compute distances fast.
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import binary_erosion
from scipy.spatial import KDTree
def find_edges(img):
img_np = np.array(img)
kernel = np.ones((3,3))
edges = img_np - binary_erosion(img_np, kernel)*255
return edges
def find_closest_distance(img):
# NOTE: assuming input is binary image and white is any non-zero value!
white_pixel_points = np.array(np.where(img))
tree = KDTree(white_pixel_points.T)
img_meshgrid = np.array(np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]))).T
distances, _ = tree.query(img_meshgrid)
return distances
test_image = np.zeros((200, 200))
rectangle = np.ones((30, 80))
test_image[20:50, 60:140] = rectangle
test_image[150:180, 60:140] = rectangle
test_image[60:140, 20:50] = rectangle.T
test_image[60:140, 150:180] = rectangle.T
test_image = test_image * 255
edge_image = find_edges(test_image)
distance_image = find_closest_distance(edge_image)
fig, axes = plt.subplots(1, 3, figsize=(12, 5))
axes[0].imshow(test_image, cmap='Greys_r')
axes[1].imshow(edge_image, cmap='Greys_r')
axes[2].imshow(distance_image, cmap='Greys_r')
plt.show()
You can make your code 25X faster by just changing find_nearest_edge as follows. Many other optimizations are possible, but this is the biggest bottleneck in your code.
from numba import njit
#njit
def find_nearest_edge(arr, point):
x, y = point
shortest_distance = np.inf
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
if arr[i,j] == 0: continue
shortest_distance = min(shortest_distance, (i-x)**2 + (j-y)**2)
return np.sqrt(shortest_distance)
Related
So I suppose to calculate the convolution between Fourier Transformed image and the mask.
from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
import cv2
import matplotlib.pyplot as plt
import math
from scipy.ndimage.filters import convolve
input_image = Image.open('....image....')
input_image=np.array(input_image)
M,N = input_image.shape[0],input_image.shape[1]
FT_img = fftpack.fftshift(fftpack.fft2(input_image))
n = 2; # order value can change this value accordingly
D0 = 60; # cut-off frequency can change this value accordingly
# Designing filter
u = np.arange(0, M)
idx = u > M/2
u[idx] = u[idx] - M
v = np.arange(0, N)
idy = v > N/2
v[idy] = v[idy] - N
V,U = np.meshgrid(v,u)
# Calculating Euclidean Distance
D=np.linalg.norm(V-U)
# determining the filtering mask
H = 1/(1 + (D0/D)**(2*n));
# Convolution between the Fourier Transformed image and the mask
G = convolve(H, FT_img)
And I get "Runtime error:filter weights array has incorrect shape." error at the last line when I run this code snippet. What I understand is H is float and FT_img is array so I cannot perform convolution on these. But I don't know how to solve that.
How can I solve this problem?
calculating distance D, and filter H for each (u, v) this will yield an array with same size of input image, multiplying that array(H the Filter) with the image in Fourier Domain will be equivalent to convolution in the Time domain, and the results will be as following:
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Read Image as Grayscale
img = cv2.imread('input.png', 0)
# Designing filter
#------------------------------------------------------
def butterworth_filter(shape, n=2, D0=60):
'''
n = 2; # order value can change this value accordingly
D0 = 60; # cut-off frequency can change this value accordingly
'''
M, N = shape
# Initialize filter with zeros
H = np.zeros((M, N))
# Traverse through filter
for u in range(0, M):
for v in range(0, N):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - M / 2) ** 2 + (v - N / 2) ** 2)
# determining the filtering mask
H[u, v] = 1/(1 + (D0/D_uv)**(2*n))
return H
#-----------------------------------------------------
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
phase_spectrumR = np.angle(fshift)
magnitude_spectrum = 20*np.log(np.abs(fshift))
# Generate Butterworth Filter
H = butterworth_filter(img.shape)
# Convolution between the Fourier Transformed image and the mask
G = H * fshift
# Obtain the Result
result = np.abs(np.fft.ifft2(np.fft.ifftshift((G))))
plt.subplot(222)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
plt.subplot(221)
plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('magnitude spectrum')
plt.axis('off')
plt.subplot(223)
plt.imshow(H, "gray")
plt.title("Butterworth Filter")
plt.axis('off')
plt.subplot(224)
plt.imshow(result, "gray")
plt.title("Result")
plt.axis('off')
plt.show()
I have the following peak detection that I did using hilbert transform that gets the envelop of a signal, then I detect the peaks out of it.
I would like to cut out those peaks from peak 1, to peak 2, and from peak 2, to peak 3, and from peak 3 to peak 4, and so on.
That's a sample image:
sample image
and That's result of the process
peaks
and that's my code:
import cv2
import numpy as np
from PIL import Image
from scipy import signal
from math import factorial
from matplotlib import pyplot as plt
from scipy.signal import savgol_filter
import scipy.signal.signaltools as sigtool
from sklearn.preprocessing import normalize
from scipy.signal import find_peaks, peak_widths, find_peaks_cwt
from scipy.signal import argrelextrema
# ---------------------------------------Functions---------------------------------------------------#
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
'''
reduces the photo to a vector representing its pixel freuqeuncy at each column
'''
def image_reduce(img):
col_counts = cv2.reduce(img, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32SC1)
column = col_counts.flatten().tolist()
# print("Column counts:\n\n", column)
return column
def histogram_plot(convluted_word, peaks, fit):
plt.plot(convluted_word)
plt.plot(fit, 'r')
plt.plot(peaks, fit[peaks], "x")
plt.show()
# ---------------------------------------Main Code Flow---------------------------------------------------#
# ---------------------------an example on how to use this package----------------------------------------#
def slice_digits(image_name):
img = cv2.imread(image_name, 0)
column_frequency = image_reduce(cv2.bitwise_not(img))
column_frequency = normalize(column_frequency)
env = np.abs(sigtool.hilbert(column_frequency))
square_sig = (env > 0.1)
square_sig = square_sig.astype(float)
square_sig = np.divide(square_sig, 15.0)
square_sig = np.where((column_frequency > 0), 0.1, 0)
peaks, _ = find_peaks(env > 0.1)
plt.plot(env)
plt.scatter(peaks, env[peaks], s = 50, c = 'r')
edges = np.nonzero(np.diff(square_sig))[0]
plt.scatter(edges, env[edges], c = 'g')
plt.show()
all_slices = []
for i in range(len(peaks) - 1):
x0, x1 = peaks[i:i + 2]
image_slice = img[x0:x1]
# Now do something with the slice, e.g.
cv2.imshow("slice",image_slice)
all_slices.append(image_slice)
# used for debugging
#histogram_plot(column_frequency, peaks, square_sig)
# segements the picture
#listt, image_final = char_slicer(edges, img)
plt.show()
# display result
#return image_final
if __name__ == '__main__':
image = r"c:\ahmed\doc.png"
res_image = slice_digits(image)
What I want is something like that:
desired
To slice an image from coordinates in a list, which you have in peaks, you can use:
all_slices = []
for i in range(len(peaks)-1):
x0, x1 = peaks[i:i+2]
image_slice = img[:, x0:x1]
# Now do something with the slice, e.g.
all_slices.append(image_slice)
For your specific case the complete listing is
import cv2
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal.signaltools as sigtool
from scipy.signal import find_peaks, peak_widths, find_peaks_cwt
def image_reduce(img):
col_counts = cv2.reduce(img, 0, cv2.REDUCE_SUM, dtype=cv2.CV_32SC1)
column = col_counts.flatten().tolist()
# print("Column counts:\n\n", column)
return column
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def slice_digits(image_name):
img = cv2.imread(image_name, 0)
column_frequency = image_reduce(cv2.bitwise_not(img))
column_frequency = normalize(column_frequency)
env = np.abs(sigtool.hilbert(column_frequency))
square_sig = (env > 0.1)
square_sig = square_sig.astype(float)
square_sig = np.divide(square_sig, 15.0)
square_sig = np.where((column_frequency > 0), 0.1, 0)
peaks, _ = find_peaks(env > 0.1)
plt.plot(env)
plt.scatter(peaks, env[peaks], s=50, c='r')
edges = np.nonzero(np.diff(square_sig))[0]
plt.scatter(edges, env[edges], c='g')
all_slices = []
for i in range(len(peaks) - 1):
x0, x1 = peaks[i:i + 2]
image_slice = img[:, x0:x1]
print("coords:", x0, x1)
# Now do something with the slice, e.g.
all_slices.append(image_slice)
plt.figure("Slice %d)" % i)
plt.imshow(image_slice)
plt.show()
if __name__ == '__main__':
image = r"c:\ahmed\doc.png"
res_image = slice_digits(image)
I'd like to generate Voronoi regions, based on a list of centers and an image size.
I'm tryed the next code, based on https://rosettacode.org/wiki/Voronoi_diagram
def generate_voronoi_diagram(width, height, centers_x, centers_y):
image = Image.new("RGB", (width, height))
putpixel = image.putpixel
imgx, imgy = image.size
num_cells=len(centers_x)
nx = centers_x
ny = centers_y
nr,ng,nb=[],[],[]
for i in range (num_cells):
nr.append(randint(0, 255));ng.append(randint(0, 255));nb.append(randint(0, 255));
for y in range(imgy):
for x in range(imgx):
dmin = math.hypot(imgx-1, imgy-1)
j = -1
for i in range(num_cells):
d = math.hypot(nx[i]-x, ny[i]-y)
if d < dmin:
dmin = d
j = i
putpixel((x, y), (nr[j], ng[j], nb[j]))
image.save("VoronoiDiagram.png", "PNG")
image.show()
I have the desired output:
But it takes too much to generate the output.
I also tried https://stackoverflow.com/a/20678647
It is fast, but I didn't find the way to translate it to numpy array of img_width X img_height. Mostly, because I don't know how to give image size parameter to scipy Voronoi class.
Is there any faster way to have this output? No centers or polygon edges are needed
Thanks in advance
Edited 2018-12-11:
Using #tel "Fast Solution"
The code execution is faster, it seems that the centers have been transformed. Probably this method is adding a margin to the image
Fast solution
Here's how you can convert the output of the fast solution based on scipy.spatial.Voronoi that you linked to into a Numpy array of arbitrary width and height. Given the set of regions, vertices that you get as output from the voronoi_finite_polygons_2d function in the linked code, here's a helper function that will convert that output to an array:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
def vorarr(regions, vertices, width, height, dpi=100):
fig = plt.Figure(figsize=(width/dpi, height/dpi), dpi=dpi)
canvas = FigureCanvas(fig)
ax = fig.add_axes([0,0,1,1])
# colorize
for region in regions:
polygon = vertices[region]
ax.fill(*zip(*polygon), alpha=0.4)
ax.plot(points[:,0], points[:,1], 'ko')
ax.set_xlim(vor.min_bound[0] - 0.1, vor.max_bound[0] + 0.1)
ax.set_ylim(vor.min_bound[1] - 0.1, vor.max_bound[1] + 0.1)
canvas.draw()
return np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(height, width, 3)
Testing it out
Here's a complete example of vorarr in action:
from scipy.spatial import Voronoi
# get random points
np.random.seed(1234)
points = np.random.rand(15, 2)
# compute Voronoi tesselation
vor = Voronoi(points)
# voronoi_finite_polygons_2d function from https://stackoverflow.com/a/20678647/425458
regions, vertices = voronoi_finite_polygons_2d(vor)
# convert plotting data to numpy array
arr = vorarr(regions, vertices, width=1000, height=1000)
# plot the numpy array
plt.imshow(arr)
Output:
As you can see, the resulting Numpy array does indeed have a shape of (1000, 1000), as specified in the call to vorarr.
If you want to fix up your existing code
Here's how you could alter your current code to work with/return a Numpy array:
import math
import matplotlib.pyplot as plt
import numpy as np
def generate_voronoi_diagram(width, height, centers_x, centers_y):
arr = np.zeros((width, height, 3), dtype=int)
imgx,imgy = width, height
num_cells=len(centers_x)
nx = centers_x
ny = centers_y
randcolors = np.random.randint(0, 255, size=(num_cells, 3))
for y in range(imgy):
for x in range(imgx):
dmin = math.hypot(imgx-1, imgy-1)
j = -1
for i in range(num_cells):
d = math.hypot(nx[i]-x, ny[i]-y)
if d < dmin:
dmin = d
j = i
arr[x, y, :] = randcolors[j]
plt.imshow(arr.transpose(1, 0, 2))
plt.scatter(cx, cy, c='w', edgecolors='k')
plt.show()
return arr
Example usage:
np.random.seed(1234)
width = 500
cx = np.random.rand(15)*width
height = 300
cy = np.random.rand(15)*height
arr = generate_voronoi_diagram(width, height, cx, cy)
Example output:
A fast solution without using matplotlib is also possible. Your solution is slow because you're iterating over all pixels, which incurs a lot of overhead in Python. A simple solution to this is to compute all distances in a single numpy operation and assigning all colors in another single operation.
def generate_voronoi_diagram_fast(width, height, centers_x, centers_y):
# Create grid containing all pixel locations in image
x, y = np.meshgrid(np.arange(width), np.arange(height))
# Find squared distance of each pixel location from each center: the (i, j, k)th
# entry in this array is the squared distance from pixel (i, j) to the kth center.
squared_dist = (x[:, :, np.newaxis] - centers_x[np.newaxis, np.newaxis, :]) ** 2 + \
(y[:, :, np.newaxis] - centers_y[np.newaxis, np.newaxis, :]) ** 2
# Find closest center to each pixel location
indices = np.argmin(squared_dist, axis=2) # Array containing index of closest center
# Convert the previous 2D array to a 3D array where the extra dimension is a one-hot
# encoding of the index
one_hot_indices = indices[:, :, np.newaxis, np.newaxis] == np.arange(centers_x.size)[np.newaxis, np.newaxis, :, np.newaxis]
# Create a random color for each center
colors = np.random.randint(0, 255, (centers_x.size, 3))
# Return an image where each pixel has a color chosen from `colors` by its
# closest center
return (one_hot_indices * colors[np.newaxis, np.newaxis, :, :]).sum(axis=2)
Running this function on my machine obtains a ~10x speedup relative to the original iterative solution (not taking plotting and saving the result to disk into account). I'm sure there are still a lot of other tweaks which could further accelerate my solution.
I am new to python and am trying to divide an image into 'n' different polygon using python. My target is to convert an image into n random polygon shaped images. I tried Voronoi algorithm but its kind of messy. I would really appreciate any help. Any other segmentation method etc.
My previous Code:
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
img = plt.imread("abc.jpg")
fig, ax = plt.subplots()
ax.imshow(img)
def points(radius,rangeX,rangeY,qty):
deltas = set()
for x in range(-radius, radius+1):
for y in range(-radius, radius+1):
if x*x + y*y <= radius*radius:
deltas.add((x,y))
randPoints = []
excluded = set()
i = 0
while i<qty:
x = random.randrange(*rangeX)
y = random.randrange(*rangeY)
if (x,y) in excluded: continue
randPoints.append((x,y))
i += 1
excluded.update((x+dx, y+dy) for (dx,dy) in deltas)
return randPoints
def plot1(randPoints,fig):
points = np.array(randPoints)
vor = Voronoi(points)
print vor.vertices
voronoi_plot_2d(vor,ax = fig.gca())
#plt.savefig('abc.png')
plt.show()
radius = 20
rangeX = (0, 960)
rangeY = (0, 480)
qty = 9
points = points(radius, rangeX, rangeY, qty)
plot1(points,fig)
My Input:
My output:
This is for n = 9 I would appreciate any help I can get.
I have an image that I try to rotate around an x, y and z axis (rectification). This works fine, but I loose a lot of data. This is the script I use:
# import libraries
import numpy as np
# import dateutil
# import pyparsing
import matplotlib.pyplot as plt
import cv2
import sys
from scipy import *
import Image
import matrotation as rmat
import math
from scipy.interpolate import griddata
# set variable with location of files
working_dir = 'C:\Users\Yorian\Desktop\TU\Stage Shore\python_files\Rectification'
sys.path.append(working_dir)
# C is 3x1 matrix met (Xc, Yc, Zc).transpose()
# neem voor nu: C is nulvector
C = np.zeros((3,1), dtype='float32')
# 3x3 Identity matrix
I = np.identity(3)
# k matrix 3x3, LOAD the center pixel automatically as the point to rate around
K = np.array([[1.49661077e+04, -4.57744650e-13, 0.0],
[0.0, -1.49661077e+04, 0.0],
[0.0, 0.0, 1.0]])
# rotatie matrix 1 (3x3) 0 graden om zowel x, y als z as
R1 = rmat.getR(25.0, 45.0, 0.0)
# [I|-C] (Zie Sierds paper) =
I_extended = np.hstack((I,C))
# P = K*R*I
P1 = K.dot(R1).dot(I_extended)
# rotatie matrix 2
R2 = rmat.getR(0.0, 0.0, 0.0)
P2 = K.dot(R2).dot(I_extended)
# Homography Matrix = H = P_rect * pinv(P) => P2 * pinv(P1)
H = P2.dot(np.linalg.pinv(P1))
# do image transform: x_uv_new = H * x_uv_original
# load image and convert it to grayscale (L)
img = Image.open('c5.jpg').convert('L')
# img.show()
img_array = np.array(img)
height = img_array.shape[0]
width = img_array.shape[1]
U, V = np.meshgrid(range(img_array.shape[1]),
range(img_array.shape[0]))
UV = np.vstack((U.flatten(),
V.flatten())).T
UV_warped = cv2.perspectiveTransform(np.array([UV]).astype(np.float32), H)
UV_warped = UV_warped[0]
UV_warped = UV_warped.astype(np.int)
x_translation = min(UV_warped[:,0])
y_translation = min(UV_warped[:,1])
new_width = np.amax(UV_warped[:,0])-np.amin(UV_warped[:,0])
new_height = np.amax(UV_warped[:,1])-np.amin(UV_warped[:,1])
# new_img_2 = cv2.warpPerspective(img_array, H, (new_height+1, new_width+1))
UV_warped[:,0] = UV_warped[:,0] - int(x_translation)
UV_warped[:,1] = UV_warped[:,1] - int(y_translation)
# create box for image
new_img = np.zeros((new_height+1, new_width+1)) # 0 = black 255 - white background
for uv_pix, UV_warped_pix in zip(UV, UV_warped):
x_orig = uv_pix[0] # x in origineel
y_orig = uv_pix[1] # y in origineel
color = img_array[y_orig, x_orig]
x_new = UV_warped_pix[0] # new x
y_new = UV_warped_pix[1] # new y
new_img[y_new, x_new] = np.array(color)
img = Image.fromarray(np.uint8(new_img))
img.save("testje.jpg")
This works fine. However I miss a lot of information. The larger the rotations the more information I loose. To get more information back I want to: interpolate the missing points. I tried to do this using grid(), but it returns an array that looks like this:
[nan]
The code for this:
# import libraries
import numpy as np
# import dateutil
# import pyparsing
import matplotlib.pyplot as plt
import cv2
import sys
from scipy import *
import Image
import matrotation as rmat
import math
from scipy.interpolate import griddata
# set variable with location of files
working_dir = 'C:\Users\Yorian\Desktop\TU\Stage Shore\python_files\Rectification'
sys.path.append(working_dir)
# C is 3x1 matrix met (Xc, Yc, Zc).transpose()
# neem voor nu: C is nulvector
C = np.zeros((3,1), dtype='float32')
# 3x3 Identity matrix
I = np.identity(3)
# k matrix 3x3, LOAD the center pixel automatically as the point to rate around
K = np.array([[1.49661077e+04, -4.57744650e-13, 0.0],
[0.0, -1.49661077e+04, 0.0],
[0.0, 0.0, 1.0]])
# rotatie matrix 1 (3x3) 0 graden om zowel x, y als z as
R1 = rmat.getR(25.0, 45.0, 0.0)
# [I|-C] (Zie Sierds paper) =
I_extended = np.hstack((I,C))
# P = K*R*I
P1 = K.dot(R1).dot(I_extended)
# rotatie matrix 2
R2 = rmat.getR(0.0, 0.0, 0.0)
P2 = K.dot(R2).dot(I_extended)
# Homography Matrix = H = P_rect * pinv(P) => P2 * pinv(P1)
H = P2.dot(np.linalg.pinv(P1))
# do image transform: x_uv_new = H * x_uv_original
# load image and convert it to grayscale (L)
img = Image.open('c5.jpg').convert('L')
# img.show()
img_array = np.array(img)
height = img_array.shape[0]
width = img_array.shape[1]
U, V = np.meshgrid(range(img_array.shape[1]),
range(img_array.shape[0]))
UV = np.vstack((U.flatten(),
V.flatten())).T
UV_warped = cv2.perspectiveTransform(np.array([UV]).astype(np.float32), H)
UV_warped = UV_warped[0]
UV_warped = UV_warped.astype(np.int)
x_translation = min(UV_warped[:,0])
y_translation = min(UV_warped[:,1])
new_width = np.amax(UV_warped[:,0])-np.amin(UV_warped[:,0])
new_height = np.amax(UV_warped[:,1])-np.amin(UV_warped[:,1])
UV_warped[:,0] = UV_warped[:,0] - int(x_translation)
UV_warped[:,1] = UV_warped[:,1] - int(y_translation)
# create box for image
data = np.zeros((len(UV_warped),1))
for i, uv_pix in enumerate(UV):
data[i,0] = img_array[uv_pix[1], uv_pix[0]]
grid = griddata(UV_warped, data, (new_width+1, new_height+1), method='linear')
Can anybody help me to get an image from this that is interpolated?
BTW: I used the function warpPerspective as someone has told me, but this stretches the image but doesn't "rotate" it.
I also looked at cv2.inpaint() but can't get that to work either. I found this: http://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_photo/py_inpainting/py_inpainting.html but that plots it. I want to make an image of it.
EDIT:
The code I used to do this with warpTransform:
#Importing modules
import json
import urllib2
import numpy as np
import cv2
from scipy import *
import Image
# data is now a dictionairy containing list with dictionairies with the x, y, z, U, V
# example:
# data[cameraID][listnumber] = {'x': x, 'y': y, 'z': z, 'U': U, 'V': V}
T = {} # H is a list of Translation matrices, one for each camera
for cam in data:
if len(cam) > 4:
xyz_ar = np.array([[data[cam][0]['x'], data[cam][0]['y']],
[data[cam][1]['x'], data[cam][1]['y']],
[data[cam][2]['x'], data[cam][2]['y']],
[data[cam][3]['x'], data[cam][3]['y']]],np.float32)
UV_ar = np.array([[data[cam][0]['U'], data[cam][0]['V']],
[data[cam][1]['U'], data[cam][1]['V']],
[data[cam][2]['U'], data[cam][2]['V']],
[data[cam][3]['U'], data[cam][3]['V']]], np.float32)
T[cam] = cv2.getPerspectiveTransform(UV_ar, xyz_ar)
else:
print('niet genoeg meetpunten voor de camera')
# load image
img = cv2.imread('c5.jpg')
rows, cols, channels = img.shape
# warp voor camera 5
dst = cv2.warpPerspective(img, T[u'KDXX05C'], (rows, cols))
new_img = Image.fromarray(np.uint8(dst))
new_img.save('testje.jpg')
I am still convinced that warpPerspective does exactly what you want (jedi mind trick). Seriously, it should do in one line what you are trying to achieve with the meshgrid, vstack and griddata.
Can you try the following code ? (I am not familiar with Python, so this might require some adjustements) :
# load image and convert it to grayscale (L)
img = cv2.imread('c5.jpg')
rows, cols, channels = img.shape
# img.show()
# Homography Matrix = H = P_rect * pinv(P) => P2 * pinv(P1)
H = P2.dot(np.linalg.pinv(P1))
cv2.warpPerspective(img, H, (rows, cols), dst, cv2.INTER_LINEAR)
new_img = Image.fromarray(np.uint8(dst))
new_img.save('testje.jpg')
where H is the exact same matrix as you use in the first code sample you gave.
The third argument of griddata is a (M,D) shaped array of locations that need interpolation (D=2 here). You are inputting a tuple of (width, height), so that is probably why you get a [nan] array.