Problem with stereo rectification using OpenCV and Python - python

I am working on a stereo camera rig with 4 discrete cameras (of the same type) but at the moment only one pair (cam1 and cam2) are necessary.
The aim is to calibrate the stereo pair and get 3D information about the scene. I am using Python 3.6 with OpenCV 3.4.3 in Visual Studio 2017.
I took 28 images of a chessboard calibration pattern and calibrated the cameras individually as well as stereoscopic with the standard OpenCV procedure.
Since the calibration data seems good and the distortion correction is working fine, the next step is the rectification of the images.
This is where things become weird. I spent the last 3 weeks working on this and read a lot, tried a lot and always got crappy results. I used cv2.stereoCalibrate (also tried with different flags, as suggested in different topics), cv2.stereoRectify (also with different alpha values), cv2.initUndistortRectifyMap and cv2.remap for the actual remapping of the images (method 1). But the results are never as wanted.
I recently managed to get rectified images which look like they are actually rectified with cv2.uncalibratedRectification. Therefore I did not use matched points (since SURF and SIFT are unfree...) but a slightly different approach. The edges of the calibration pattern in my 28 calibration images are used as input points. This works good, but the rectified images don't look perfect.
Here are my images (these are no calibration images) so you can imagine what I'm talking about:
original left and right images
undistorted images
rectified with method 1, alpha=1
rectified with method 1, alpha=0
rectified uncalibrated, best result I got by now
Can anybody give me a hint whats wrong with my usage of method 1? I've seen a lot posts to similar problems but I couldn't find the solution in the comments there. Or is this a bug in OpenCV?
Or has anyone an idea how to improve the uncalibrated rectification?
Here is a code snippet with the relevant calls:
# imports
import numpy as np
import cv2
import glob
import argparse
import sys
import os
# size calib array
numEdgeX = 10
numEdgeY = 7
# preface
exitCode = 0
# get directories
pathDir = str(os.path.dirname(os.path.realpath(__file__)))
pathDir = pathDir[:-17]
pathCalib = pathDir + "\\CalibData" + "\\chess"
try:
# define pair
p = 1
cal_path = pathCalib + "\\pair" + str(p)
images_right = glob.glob(cal_path + '\RIGHT/*.bmp')
images_left = glob.glob(cal_path + '\LEFT/*.bmp')
images_left.sort()
images_right.sort()
# termination criteria
criteria = (cv2.TermCriteria_EPS +
cv2.TermCriteria_MAX_ITER, 30, 0.001)
criteria_cal = (cv2.TermCriteria_EPS +
cv2.TermCriteria_MAX_ITER, 30, 1e-5)
# prepare object points, like (0,0,0); (1,0,0); ...; (6,5,0)
objp = np.zeros((numEdgeX*numEdgeY, 3), np.float32)
objp[:, :2] = np.mgrid[0:numEdgeX, 0:numEdgeY].T.reshape(-1, 2)
objpoints = [] # 3d points in real world space
imgpoints_l = [] # 2d points in image plane for calibration
imgpoints_r = [] # 2d points in image plane for calibration
for i, fname in enumerate(images_right):
print(str(i+1) + " out of " + str(len(images_right)))
img_l = cv2.imread(images_left[i])
img_r = cv2.imread(images_right[i])
# convert to cv2
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
# find the chess board corners
ret_l, corners_l = cv2.findChessboardCorners(img_l, (numEdgeX, numEdgeY), None)
ret_r, corners_r = cv2.findChessboardCorners(img_r, (numEdgeX, numEdgeY), None)
objpoints.append(objp)
if ret_l is True:
print("image " + str(i+1) + "left - io")
rt = cv2.cornerSubPix(img_l, corners_l, (11, 11),
(-1, -1), criteria)
imgpoints_l.append(corners_l)
if ret_r is True:
print("image " + str(i+1) + "right - io")
rt = cv2.cornerSubPix(img_r, corners_r, (11, 11),
(-1, -1), criteria)
imgpoints_r.append(corners_r)
# get shape
img_shape = img_l.shape[::-1]
### CALIBRATION ###
# calibrate left camera
rt, M1, d1, r1, t1 = cv2.calibrateCamera(
objpoints, imgpoints_l, img_shape, None, None)
# calibrate right camera
rt, M2, d2, r2, t2 = cv2.calibrateCamera(
objpoints, imgpoints_r, img_shape, None, None)
# stereo calibration
flags = (cv2.CALIB_FIX_K5 + cv2.CALIB_FIX_K6)
stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +
cv2.TERM_CRITERIA_EPS, 100, 1e-5)
#flags = 0
#flags = cv2.CALIB_USE_INTRINSIC_GUESS
#flags = cv2.CALIB_FIX_PRINCIPAL_POINT
#flags = cv2.CALIB_FIX_ASPECT_RATIO
#flags = cv2.CALIB_ZERO_TANGENT_DIST
#flags = cv2.CALIB_FIX_INTRINSIC
#flags = cv2.CALIB_FIX_FOCAL_LENGTH
#flags = cv2.CALIB_FIX_K1...6
#flags = cv2.CALIB_RATIONAL_MODEL
#flags = cv2.CALIB_THIN_PRISM_MODEL
#flags = cv2.CALIB_SAME_FOCAL_LENGTH
#flags = cv2.CALIB_FIX_S1_S2_S3_S4
flags = (cv2.CALIB_FIX_PRINCIPAL_POINT | cv2.CALIB_FIX_ASPECT_RATIO | cv2.CALIB_FIX_FOCAL_LENGTH |
cv2.CALIB_FIX_INTRINSIC | cv2.CALIB_FIX_K3 | cv2.CALIB_FIX_K4 | cv2.CALIB_FIX_K5 |
cv2.CALIB_FIX_K6)
T = np.zeros((3, 1), dtype=np.float64)
R = np.eye(3, dtype=np.float64)
ret, M1, d1, M2, d2, R, T, E, F = cv2.stereoCalibrate(
objpoints, imgpoints_l,
imgpoints_r, M1, d1, M2,
d2, img_shape,
criteria = stereocalib_criteria,
flags=flags)
# get new optimal camera matrix
newCamMtx1, roi1 = cv2.getOptimalNewCameraMatrix(M1, d1, img_shape, 0, img_shape)
newCamMtx2, roi2 = cv2.getOptimalNewCameraMatrix(M2, d2, img_shape, 0, img_shape)
# rectification and undistortion maps which can be used directly to correct the stereo pair
(rectification_l, rectification_r, projection_l,
projection_r, disparityToDepthMap, ROI_l, ROI_r) = cv2.stereoRectify(
M1, d1, M2, d2, img_shape, R, T,
None, None, None, None, None,
#cv2.CALIB_ZERO_DISPARITY, # principal points of each camera have the same pixel coordinates in rect views
alpha=0) # alpha=1 no pixels lost, alpha=0 pixels lost
leftMapX, leftMapY = cv2.initUndistortRectifyMap(
M1, d1, rectification_l, projection_l,
img_shape, cv2.CV_32FC1)
rightMapX, rightMapY = cv2.initUndistortRectifyMap(
M2, d2, rectification_r, projection_r,
img_shape, cv2.CV_32FC1)
### UNCALIBRATED RECTIFICATION ###
imgpoints_l_undis = []
imgpoints_r_undis = []
for i, fname in enumerate(images_right):
img_l = cv2.imread(images_left[i])
img_r = cv2.imread(images_right[i])
# convert to cv2
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
# undistort
img_l_undis = cv2.undistort(img_l, M1, d1, None, newCamMtx1)
img_r_undis = cv2.undistort(img_r, M2, d2, None, newCamMtx2)
# find the chess board corners in undistorted image
ret_l_undis, corners_l_undis = cv2.findChessboardCorners(img_l_undis, (numEdgeX, numEdgeY), None)
ret_r_undis, corners_r_undis = cv2.findChessboardCorners(img_r_undis, (numEdgeX, numEdgeY), None)
if ret_l_undis is True:
rt = cv2.cornerSubPix(img_l_undis, corners_l_undis, (11, 11), (-1, -1), criteria)
for j in range(0, len(rt)):
x = rt[j][0,:]
imgpoints_l_undis.append(x)
if ret_r_undis is True:
rt = cv2.cornerSubPix(img_r_undis, corners_r_undis, (11, 11), (-1, -1), criteria)
for j in range(0, len(rt)):
x = rt[j][0,:]
imgpoints_r_undis.append(x)
# convert to np array
imgpoints_l_undis = np.array(imgpoints_l_undis)
imgpoints_r_undis = np.array(imgpoints_r_undis)
# compute rectification uncalibrated
ret, uncRectMtx1, uncRectMtx2 = cv2.stereoRectifyUncalibrated(imgpoints_l_undis, imgpoints_r_undis, F, img_shape)
### REMAPPING ###
# load images and convert to cv2 format
img_l = cv2.imread(images_left[0])
img_l = cv2.cvtColor(img_l, cv2.COLOR_BGR2GRAY)
img_l_undis = cv2.undistort(img_l, M1, d1, None, newCamMtx1)
img_r = cv2.imread(images_right[0])
img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)
img_r_undis = cv2.undistort(img_r, M2, d2, None, newCamMtx2)
# remap
imglCalRect = cv2.remap(img_l, leftMapX, leftMapY, cv2.INTER_LINEAR)
imgrCalRect = cv2.remap(img_r, rightMapX, rightMapY, cv2.INTER_LINEAR)
numpyHorizontalCalibRect = np.hstack((imglCalRect, imgrCalRect))
# warp for uncalibrated rectification
imglUncalRect = cv2.warpPerspective(img_l_undis, uncRectMtx1, img_shape)
imgrUncalRect = cv2.warpPerspective(img_r_undis, uncRectMtx2, img_shape)
numpyHorizontalUncalibRect = np.hstack((imglUncalRect, imgrUncalRect))
### SHOW RESULTS ###
# calculate point arrays for epipolar lines
lineThickness = 5
lineColor = [0, 255, 0]
numLines = 20
interv = round(img_shape[0] / numLines)
x1 = np.zeros((numLines, 1))
y1 = np.zeros((numLines, 1))
x2 = np.full((numLines, 1), (3*img_shape[1]))
y2 = np.zeros((numLines, 1))
for jj in range(0, numLines):
y1[jj] = jj * interv
y2 = y1
for jj in range(0, numLines):
cv2.line(numpyHorizontalCalibRect, (x1[jj], y1[jj]), (x2[jj], y2[jj]),
lineColor, lineThickness)
cv2.line(numpyHorizontalUncalibRect, (x1[jj], y1[jj]), (x2[jj], y2[jj]),
lineColor, lineThickness)
cv2.namedWindow("calibRect", cv2.WINDOW_NORMAL)
cv2.namedWindow("uncalibRect", cv2.WINDOW_NORMAL)
cv2.imshow("calibRect", numpyHorizontalCalibRect)
cv2.imshow("uncalibRect", numpyHorizontalUncalibRect)
cv2.waitKey()
except (IOError, ValueError):
print("An I/O error or a ValueError occurred")
except:
print("An unexpected error occurred")
raise
Thanks!

Done!
The issue was that OpenCV interpreted my images as a vertical stereo system, I just looked at it as it was horizontal.

Related

OpenCV, why the calibration of my fisheye camera is so bad?

I'm trying to set up a fish-eye camera for object localisation with respect to a particular frame of reference.
I tried both the OpenCV fisheye module and the rational model from calibrateCamera() to calibrate. I obtained this result.
I collected 2 different datasets, one with calibration images taken mostly close to the camera (ds1) and a second one with images taken from afar (ds2). ds12 is a dataset obtained merging the two.
nok4 indicates the fisheye model with k4 fixed to 0.
rm is the rational model from cv2.calibrateCamera()
The camera has 178° horizontal FOV and 101° vertical FOV, the distortion is corrected mostly in the center of the image with disappointing results in the outermost parts of the image.
Am I doing something wrong? What could I do to improve the results?
Edit
Here's the code I'm using for both the calibration processes:
import cv2 as cv
import os
import numpy as np
cwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(cwd)
number = None
folder_name = "merged_images"
if number is not None:
folder_name += "_" + str(number)
points_path = os.path.join(folder_name, "dataset", "good_detections")
npz = np.load(os.path.join(points_path, "points.npz"))
square_size = 0.02435
imgpoints = npz["imgpoints"]
objpoints = npz["objpoints"] * square_size
file_names = npz["file_names"]
shuffle = True
if shuffle:
if "indices.npz" in os.listdir(points_path):
p = np.load(os.path.join(points_path, "indices.npz"))["indices"]
else:
print("Random indices assigned")
p = np.random.permutation(len(imgpoints))
imgpoints = imgpoints[p]
objpoints = objpoints[p]
file_names = file_names[p]
img = cv.imread(os.path.join(points_path, file_names[2].replace("detected_", "")))
flag_list = [
cv.CALIB_RATIONAL_MODEL,
# cv.CALIB_ZERO_TANGENT_DIST,
]
calibration_flags = 0
for flag in flag_list:
calibration_flags += flag
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
shape = gray.shape[::-1]
ret, mtx, dist, rvecs, tvecs = cv.calibrateCamera(objpoints,
imgpoints,
shape,
None,
None,
flags = calibration_flags
)
h, w = img.shape[:2]
newcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))
dst = cv.undistort(img, mtx, dist, None, newcameramtx)
# fisheye model
flag_list = [
cv.fisheye.CALIB_RECOMPUTE_EXTRINSIC,
cv.fisheye.CALIB_CHECK_COND,
cv.fisheye.CALIB_FIX_SKEW,
# cv.fisheye.CALIB_FIX_K4,
# cv.fisheye.CALIB_FIX_K3,
# cv.fisheye.CALIB_FIX_K2,
# cv.fisheye.CALIB_FIX_K1,
]
calibration_flags = 0
for flag in flag_list:
calibration_flags += flag
N_OK = len(objpoints)
K = np.zeros((3, 3))
D = np.zeros((4, 1))
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(N_OK)]
n_objpoints = [np.expand_dims(objp, 0) for objp in objpoints]
all_true_points = list(n_objpoints)
all_image_points = list(imgpoints)
all_frames = list(file_names)
rejected = []
counter = 0
while True:
try:
rms, mtx, dist, rvecs, tvecs = \
cv.fisheye.calibrate(
all_true_points,
all_image_points,
gray.shape[::-1],
K,
D,
rvecs,
tvecs,
calibration_flags,
(cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 30, 1e-3)
)
print('Found a calibration based on {} well-conditioned images.'.format(len(all_true_points)))
break
except cv.error as err:
try:
idx = int(str(err).split('array ')[1][0]) # Parse index of invalid image from error message
all_true_points.pop(idx)
all_image_points.pop(idx)
rejected.append(all_frames.pop(idx))
print(f"{counter}. Removed ill-conditioned image {idx} from the data. Trying again...".format(idx))
counter += 1
except IndexError:
raise err
h,w = img.shape[:2]
DIM = (w, h)
dim1 = img.shape[:2][::-1] # dim1 is the dimension of input image to un-distort
dim2 = None
dim3 = None
balance = 1
assert dim1[0]/dim1[1] == DIM[0]/DIM[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
if not dim2:
dim2 = dim1
if not dim3:
dim3 = dim1
scaled_K = K * dim1[0] / DIM[0] # The values of K is to scale with image dimension.
scaled_K[2][2] = 1.0 # Except that K[2][2] is always 1.0
# This is how scaled_K, dim2 and balance are used to determine the final K used to un-distort image. OpenCV document failed to make this clear!
new_K = cv.fisheye.estimateNewCameraMatrixForUndistortRectify(scaled_K, D, dim2, np.eye(3), balance=balance)
map1, map2 = cv.fisheye.initUndistortRectifyMap(scaled_K, D, np.eye(3), new_K, dim3, cv.CV_16SC2)
undistorted_img = cv.remap(img, map1, map2, interpolation=cv.INTER_LINEAR, borderMode=cv.BORDER_CONSTANT)
new_img = cv.hconcat([undistorted_img, img])
Corner extraction is performed with the following code:
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
file_names = [] # files analyzed
for file in os.listdir(detections_path):
if file.startswith("hd_frame"):
frame = cv.imread(os.path.join(detections_path, file))
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
found, corners = cv.findChessboardCorners(gray, (9,6), None)
if found:
file_name = "detected_" + file
objpoints.append(objp)
imgpoints.append(corners)
# save file_name
file_names.append(file_name)
corners2 = cv.cornerSubPix(gray, corners, (11,11), (-1,-1), criteria)
# Draw and display the corners
cv.drawChessboardCorners(frame, (9,6), corners2, found)
# save image and points
cv.imwrite(os.path.join(detections_path, file_name), frame)
You can use a single image for calibration as described in details here: https://discorpy.readthedocs.io/en/latest/usage/demo_06.html . The correction model used in this package may give you better results than the model used by opencv: https://discorpy.readthedocs.io/en/latest/tutorials/methods.html

Getting a 3D map on Meshlab with my disparity map

I have a program that allows me to find the disparity map with 2 images from two non-stereocalibrated cameras. The disparity map looks good but when I add a piece of program to get a 3D map via meshlab, I get some scattered points (see photo result_clou.png)
On the other topics, I saw that I had to change the type and divide the disparity map by 16. I tried with a new map called disparity_SGBM2 as follows: disparity_SGBM2 = disparity_SGBM.astype(np.float32) / 16.0
I took a screenshot of the .ply with his error message (see result_disparity_SGBM2.png)
Does anyone have an idea how to unblock me please?
I also joined my python program below (because I can't send a python file) and the images used with the program (clou-l.png and clou-r.png).
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
# Read both images and convert to grayscale
img1 = cv.imread('clou-l.png', cv.IMREAD_GRAYSCALE)
img2 = cv.imread('clou-r.png', cv.IMREAD_GRAYSCALE)
# ------------------------------------------------------------
# PREPROCESSING
# Compare unprocessed images
#fig, axes = plt.subplots(1, 2, figsize=(15, 10))
#axes[0].imshow(img1, cmap="gray")
#axes[1].imshow(img2, cmap="gray")
#axes[0].axhline(250)
#axes[1].axhline(250)
#axes[0].axhline(450)
#axes[1].axhline(450)
#plt.suptitle("Original images")
#plt.savefig("original_images.png")
#plt.show()
# 1. Detect keypoints and their descriptors
# Based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
# Initiate SIFT detector
sift = cv.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# Visualize keypoints
imgSift = cv.drawKeypoints(
img1, kp1, None, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
#cv.imshow("SIFT Keypoints", imgSift)
#cv.imwrite("sift_keypoints.png", imgSift)
# Match keypoints in both images
# Based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# Keep good matches: calculate distinctive image features
# Lowe, D.G. Distinctive Image Features from Scale-Invariant Keypoints. International Journal of Computer Vision 60, 91–110 (2004). https://doi.org/10.1023/B:VISI.0000029664.99615.94
# https://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf
matchesMask = [[0, 0] for i in range(len(matches))]
good = []
pts1 = []
pts2 = []
for i, (m, n) in enumerate(matches):
if m.distance < 0.7*n.distance:
# Keep this keypoint pair
matchesMask[i] = [1, 0]
good.append(m)
pts2.append(kp2[m.trainIdx].pt)
pts1.append(kp1[m.queryIdx].pt)
# Draw the keypoint matches between both pictures
# Still based on: https://docs.opencv.org/master/dc/dc3/tutorial_py_matcher.html
draw_params = dict(matchColor=(0, 255, 0),
singlePointColor=(255, 0, 0),
matchesMask=matchesMask[300:500],
flags=cv.DrawMatchesFlags_DEFAULT)
keypoint_matches = cv.drawMatchesKnn(
img1, kp1, img2, kp2, matches[300:500], None, **draw_params)
#cv.imshow("Keypoint matches", keypoint_matches)
#cv.imwrite("keypoint_matches.png", keypoint_matches)
# ------------------------------------------------------------
# STEREO RECTIFICATION
# Calculate the fundamental matrix for the cameras
# https://docs.opencv.org/master/da/de9/tutorial_py_epipolar_geometry.html
pts1 = np.int32(pts1)
pts2 = np.int32(pts2)
fundamental_matrix, inliers = cv.findFundamentalMat(pts1, pts2, cv.FM_RANSAC)
# We select only inlier points
pts1 = pts1[inliers.ravel() == 1]
pts2 = pts2[inliers.ravel() == 1]
# Visualize epilines
# Adapted from: https://docs.opencv.org/master/da/de9/tutorial_py_epipolar_geometry.html
def drawlines(img1src, img2src, lines, pts1src, pts2src):
''' img1 - image on which we draw the epilines for the points in img2
lines - corresponding epilines '''
r, c = img1src.shape
img1color = cv.cvtColor(img1src, cv.COLOR_GRAY2BGR)
img2color = cv.cvtColor(img2src, cv.COLOR_GRAY2BGR)
# Edit: use the same random seed so that two images are comparable!
np.random.seed(0)
for r, pt1, pt2 in zip(lines, pts1src, pts2src):
color = tuple(np.random.randint(0, 255, 3).tolist())
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
img1color = cv.line(img1color, (x0, y0), (x1, y1), color, 1)
img1color = cv.circle(img1color, tuple(pt1), 5, color, -1)
img2color = cv.circle(img2color, tuple(pt2), 5, color, -1)
return img1color, img2color
# Find epilines corresponding to points in right image (second image) and
# drawing its lines on left image
lines1 = cv.computeCorrespondEpilines(
pts2.reshape(-1, 1, 2), 2, fundamental_matrix)
lines1 = lines1.reshape(-1, 3)
img5, img6 = drawlines(img1, img2, lines1, pts1, pts2)
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv.computeCorrespondEpilines(
pts1.reshape(-1, 1, 2), 1, fundamental_matrix)
lines2 = lines2.reshape(-1, 3)
img3, img4 = drawlines(img2, img1, lines2, pts2, pts1)
#plt.subplot(121), plt.imshow(img5)
#plt.subplot(122), plt.imshow(img3)
#plt.suptitle("Epilines in both images")
#plt.savefig("epilines.png")
#plt.show()
# Stereo rectification (uncalibrated variant)
# Adapted from: https://stackoverflow.com/a/62607343
h1, w1 = img1.shape
h2, w2 = img2.shape
_, H1, H2 = cv.stereoRectifyUncalibrated(
np.float32(pts1), np.float32(pts2), fundamental_matrix, imgSize=(w1, h1)
)
# Rectify (undistort) the images and save them
# Adapted from: https://stackoverflow.com/a/62607343
img1_rectified = cv.warpPerspective(img1, H1, (w1, h1))
img2_rectified = cv.warpPerspective(img2, H2, (w2, h2))
cv.imwrite("rectified_1.png", img1_rectified)
cv.imwrite("rectified_2.png", img2_rectified)
# Draw the rectified images
#fig, axes = plt.subplots(1, 2, figsize=(15, 10))
#axes[0].imshow(img1_rectified, cmap="gray")
#axes[1].imshow(img2_rectified, cmap="gray")
#axes[0].axhline(250)
#axes[1].axhline(250)
#axes[0].axhline(450)
#axes[1].axhline(450)
#plt.suptitle("Rectified images")
#plt.savefig("rectified_images.png")
#plt.show()
# ------------------------------------------------------------
# CALCULATE DISPARITY (DEPTH MAP)
# Adapted from: https://github.com/opencv/opencv/blob/master/samples/python/stereo_match.py
# and: https://docs.opencv.org/master/dd/d53/tutorial_py_depthmap.html
# StereoSGBM Parameter explanations:
# https://docs.opencv.org/4.5.0/d2/d85/classcv_1_1StereoSGBM.html
# Matched block size. It must be an odd number >=1 . Normally, it should be somewhere in the 3..11 range.
block_size = 11
min_disp = -128
max_disp = 128
# Maximum disparity minus minimum disparity. The value is always greater than zero.
# In the current implementation, this parameter must be divisible by 16.
num_disp = max_disp - min_disp
# Margin in percentage by which the best (minimum) computed cost function value should "win" the second best value to consider the found match correct.
# Normally, a value within the 5-15 range is good enough
uniquenessRatio = 5
# Maximum size of smooth disparity regions to consider their noise speckles and invalidate.
# Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the 50-200 range.
speckleWindowSize = 200
# Maximum disparity variation within each connected component.
# If you do speckle filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
# Normally, 1 or 2 is good enough.
speckleRange = 2
disp12MaxDiff = 0
stereo = cv.StereoSGBM_create(
minDisparity=min_disp,
numDisparities=num_disp,
blockSize=block_size,
uniquenessRatio=uniquenessRatio,
speckleWindowSize=speckleWindowSize,
speckleRange=speckleRange,
disp12MaxDiff=disp12MaxDiff,
P1=8 * 1 * block_size * block_size,
P2=32 * 1 * block_size * block_size,
)
disparity_SGBM = stereo.compute(img1_rectified, img2_rectified)
#disparity_SGBM2 = disparity_SGBM.astype(np.float32) / 16.0
#plt.imshow(disparity_SGBM, cmap='plasma')
#plt.colorbar()
#plt.show()
#Normalize the values to a range from 0..255 for a grayscale image
disparity_SGBM = cv.normalize(disparity_SGBM, disparity_SGBM, alpha=255,
beta=0, norm_type=cv.NORM_MINMAX)
disparity_SGBM = np.uint8(disparity_SGBM)
#cv.imshow("Disparity", disparity_SGBM)
#cv.imwrite("disparity_SGBM_norm.png", disparity_SGBM)
#cv.waitKey()
#cv.destroyAllWindows()
# ---------------------------------------------------------------
"""That's the new part of the program for reconstructing the 3D map from the disparity map.
For seeing the 3D result, you need to open the clou.ply folder with Meshlab"""
def create_output(vertices, colors, filename):
colors = colors.reshape(-1, 3)
vertices = np.hstack([vertices.reshape(-1,3), colors])
ply_header = '''ply
format ascii 1.0
element vertex %(vert_num)d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header
'''
with open(filename, 'w') as f:
f.write(ply_header % dict(vert_num=len(vertices)))
np.savetxt(f, vertices, '%f %f %f %d %d %d')
print("\nGenerating the 3D map ...")
h, w = img1.shape[:2]
focal_length = 0.8*w
#Perspective transformation matrix
Q = np.float32([[1, 0, 0, -w/2.0],
[0,-1, 0, h/2.0],
[0, 0, 0, -focal_length],
[0, 0, 1, 0]])
output_file = 'clou' + '.ply'
points_3D = cv.reprojectImageTo3D(disparity_SGBM, Q, handleMissingValues=0)
colors = cv.cvtColor(img1, cv.COLOR_BGR2RGB)
mask_map = disparity_SGBM > disparity_SGBM.min()
output_points = points_3D[mask_map]
output_colors = colors[mask_map]
print("\nCreating the output file ...\n")
create_output(output_points, output_colors, output_file)
clou-l.png
clou-r.png
result_clou.png
result_disparity_SGBM2.png
I think the problem is that you're using very shiny objects, which are typically hard to match in stereo images and photogrammetry. You could try moving the illuminating lights, possibly to a more oblique angle, or fit polarizers over each lens, then illuminate with polarized light. Another technique you can employ is to cover the subject in white powder to create a matt/diffused surface, which can work better.
I've used DMAG (http://3dstereophoto.blogspot.com/2013/04/depth-map-automatic-generator-dmag.html) to produce depth maps (with varying degrees of success) but it can produce intermediate files that firstly show the features it can find, then another step to show which features match between images. I've run your script to produce the rectified images to get an epipolar projection, then I ran those through DMAG. It shows very few matches, Features L Features R Matches. With so few matches you're not going to produce much of a mesh.

OpenCV: undistort (for images) and undistortPoints are inconsistent

For testing I generate a grid image as matrix and again the grid points as point array:
This represents a "distorted" camera image along with some feature points.
When I now undistort both the image and the grid points, I get the following result:
(Note that the fact that the "distorted" image is straight and the "undistorted" image is morphed is not the point, I'm just testing the undistortion functions with a straight test image.)
The grid image and the red grid points are totally misaligned now. I googled and found that some people forget to specify the "new camera matrix" parameter in undistortPoints but I didn't. The documentation also mentions a normalization but I still have the problem when I use the identity matrix as camera matrix. Also, in the central region it fits perfectly.
Why is this not identical, do I use something in a wrong way?
I use cv2 (4.1.0) in Python. Here is the code for testing:
import numpy as np
import matplotlib.pyplot as plt
import cv2
w = 401
h = 301
# helpers
#--------
def plotImageAndPoints(im, pu, pv):
plt.imshow(im, cmap="gray")
plt.scatter(pu, pv, c="red", s=16)
plt.xlim(0, w)
plt.ylim(0, h)
plt.show()
def cv2_undistortPoints(uSrc, vSrc, cameraMatrix, distCoeffs):
uvSrc = np.array([np.matrix([uSrc, vSrc]).transpose()], dtype="float32")
uvDst = cv2.undistortPoints(uvSrc, cameraMatrix, distCoeffs, None, cameraMatrix)
uDst = [uv[0] for uv in uvDst[0]]
vDst = [uv[1] for uv in uvDst[0]]
return uDst, vDst
# test data
#----------
# generate grid image
img = np.ones((h, w), dtype = "float32")
img[0::20, :] = 0
img[:, 0::20] = 0
# generate grid points
uPoints, vPoints = np.meshgrid(range(0, w, 20), range(0, h, 20), indexing='xy')
uPoints = uPoints.flatten()
vPoints = vPoints.flatten()
# see if points align with the image
plotImageAndPoints(img, uPoints, vPoints) # perfect!
# undistort both image and points individually
#---------------------------------------------
# camera matrix parameters
fx = 1
fy = 1
cx = w/2
cy = h/2
# distortion parameters
k1 = 0.00003
k2 = 0
p1 = 0
p2 = 0
# convert for opencv
mtx = np.matrix([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype = "float32")
dist = np.array([k1, k2, p1, p2], dtype = "float32")
# undistort image
imgUndist = cv2.undistort(img, mtx, dist)
# undistort points
uPointsUndist, vPointsUndist = cv2_undistortPoints(uPoints, vPoints, mtx, dist)
# test if they still match
plotImageAndPoints(imgUndist, uPointsUndist, vPointsUndist) # awful!
Any help appreciated!
A bit late to the party, but to help others running into this issue:
The problem is that UndistortPoints is an iterative calculation which in some cases exits before a stable solution has been reached. This can be fixed by modifying the termination criteria for the calculation, which can be done by using UndistortPointsIter. You should replace:
uvDst = cv2.undistortPoints(uvSrc, cameraMatrix, distCoeffs, None, cameraMatrix)
with:
uvDst = cv2.undistortPointsIter(uvSrc, cameraMatrix, distCoeffs, None, cameraMatrix,(cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS, 40, 0.03))
Now, it tries 40 iterations to find a solution, rather than the default 5 iterations.

Image Rectification from single camera using two points

I'm trying to implement image rectification. I was using a software which is not available anymore. To rectify the image, the software used the height of the camera (h), the distance of two points (d1, d2) from the camera and the correspond lines in the image to the reference points (Line1, Line2).
So the variables are:
h (camera elevation);
Line1, Line2 (row pixel)
d1, d2 (Distance in meters from the camera)
Configuration:
I tried to implement few code using OpenCV (Python) but the final result is not the same of the software. I wrote a code to calibrate the camera and a second to undistort the image and then I want to apply the rectification.
The problem is that I'm using a single camera (take photos of a landscape) that is fixed with a fixed focal length and focus which I can't change anymore.
Can someone tell me a good way to execute the rectification using the same way of the software or an another valid solution?
My code for the calibration is
# Numbers of corners
n_w = 9
n_h = 6
patternSize = (n_w, n_h)
# SIZE OF THE WINDOW TO IMPROVE THE COORDINATES OF CORNERS
windowSize = (11, 11)
# TERMINATION CRITERIA
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((n_h * n_w, 3), dtype=np.float32)
objp[:, :2] = np.mgrid[0:n_w, 0:n_h].T.reshape(-1, 2)
# LIST OF POINT
objpoints = []
imgpoints = []
# GET ALL IMAGES
images = glob.glob('*.jpg')
for fname in images:
img = cv2.imread(fname)
# IMAGE ON GRAY SACLE
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# fIND CORNERS
retval, corners = cv2.findChessboardCorners(gray_img, patternSize, None)
if retval == True:
print 'Looping through image %s' % fname
objpoints.append(objp)
cv2.cornerSubPix(gray_img, corners, windowSize, (-1, -1), criteria)
imgpoints.append(corners)
cv2.drawChessboardCorners(img, patternSize, corners, retval)
cv2.imshow('ChessBoard Image %s' % fname, img)
cv2.waitKey(500)
cv2.destroyAllWindows()
print "------START CALIBRATION....."
ret, cameraMatrix, distCoeffs, revcs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray_img.shape[::-1],
None, None)
print ret
print cameraMatrix
print distCoeffs
print '---SAVING CALIBRATION DATA'
np.savez('calibration_data', RMS=ret, distCoeffs=distCoeffs, cameraMatrix=cameraMatrix)
if ret <= 1.0:
print '''-----GOOD CALIBRATION'''
The code to remove the distortion is:
# LOAD CALIBRATION DATA
calibrationData = np.load('calibration_data.npz')
distCoeffs = calibrationData['distCoeffs']
cameraMatrix = calibrationData['cameraMatrix']
calibrationData.close()
# LOAD IMAGES
images = glob.glob('/*.jpg')
for i, fname in enumerate(images):
img = cv2.imread(fname)
# UNDISTORT
undistorted_img = cv2.undistort(img, cameraMatrix, distCoeffs, None)
# SAVE IMAGE
cv2.imwrite(os.path.join(dirname, 'Undistorted_%05d.jpg' % i), undistorted_img)
cv2.imshow('Undistorted Image %s' % fname, undistorted_img)
The first idea to rectify the image was to find the 4 corners inside the real world image of a trapezoid (A4 paper) and compute a transformation matrix given 4 points of a rectangle (real dimension of an A4). But I think that is an wrong approce.
To do this I wrote this code:
#load image
img_Trap = cv2.imread('image.png', cv2.IMREAD_GRAYSCALE)
#points on the image (corners of an A4 paper)
ptsTrap = np.array(((1556, 1050), (1556, 1050), (2189, 1677), (1425, 1723)), dtype=np.float32)
img_Rect = cv2.imread('image2.png', cv2.IMREAD_GRAYSCALE)
# corner of a A4 (saving the aspect ratio)
ptsRect = np.array(((1980, 1381), (2189, 1381), (2189, 1677), (1980, 1677)), dtype=np.float32)
#transformation matrix
T = cv2.getPerspectiveTransform(ptsTrap, ptsRect)
print T
# warp perspective
warp = cv2.warpPerspective(img_Trap, T, img_Rect.shape[:2])
cv2.imwrite('warpimage.png', warp)

How can I transform an image so that projected image is same as original

Problem statement: An image A is projected through a projector, goes through a microscope and the projected image is captured via a camera through the same microscope as image B. Due to the optical elements, the B is rotated, sheared and distorted with respect to A. Now, I need to transform A into A' before projection such that B is as close to A as possible.
Initial approach: I took a checkerboard pattern and rotated it at various angles (36, 72, 108, ... 324 degrees) and projected to get a series of A images and B images. I used OpenCV's CalibrateCamera2, InitUndistortMap and Remap functions to convert B into B'. But B' is nowhere near A and rather similar to B (especially there is a significant amount of rotation and shearing that is not getting corrected).
The code (in Python) is below. I am not sure if I am doing something stupid. Any ideas for the correct approach?
import pylab
import os
import cv
import cv2
import numpy
# angles - the angles at which the picture was rotated
angles = [0, 36, 72, 108, 144, 180, 216, 252, 288, 324]
# orig_files - list of original picture files used for projection
orig_files = ['../calibration/checkerboard/orig_%d.png' % (angle) for angle in angles]
# img_files - projected image captured by camera
img_files = ['../calibration/checkerboard/imag_%d.bmp' % (angle) for angle in angles]
# Load the images
images = [cv.LoadImage(filename) for filename in img_files]
orig_images = [cv.LoadImage(filename) for filename in orig_files]
# Convert to grayscale
gray_images = [cv.CreateImage((src.height, src.width), cv.IPL_DEPTH_8U, 1) for src in images]
for ii in range(len(images)):
cv.CvtColor(images[ii], gray_images[ii], cv.CV_RGB2GRAY)
gray_orig = [cv.CreateImage((src.height, src.width), cv.IPL_DEPTH_8U, 1) for src in orig_images]
for ii in range(len(orig_images)):
cv.CvtColor(orig_images[ii], gray_orig[ii], cv.CV_RGB2GRAY)
# The number of ranks and files in the chessboard. OpenCV considers
# the height and width of the chessboard to be one less than these,
# respectively.
rank_count = 11
file_count = 10
# Try to detect the corners of the chessboard. For each image,
# FindChessboardCorners returns (found, corner_points). found is True
# even if it managed to detect only a subset of the actual corners.
img_corners = [cv.FindChessboardCorners(img, (rank_count-1, file_count-1)) for img in gray_images]
orig_corners = [cv.FindChessboardCorners(img, (rank_count-1,file_count-1)) for img in gray_orig]
# The total number of corners will be (rank_count-1)x(file_count-1),
# but if some parts of the image are too blurred/distorted,
# FindChessboardCorners detects only a subset of the corners. In that
# case, DrawChessboardCorners will raise a TypeError.
orig_corner_success = []
ii = 0
for (found, corners) in orig_corners:
if found and (len(corners) == (rank_count - 1) * (file_count - 1)):
orig_corner_success.append(ii)
else:
print orig_files[ii], ': could not find correct corners: ', len(corners)
ii += 1
ii = 0
img_corner_success = []
for (found, corners) in img_corners:
if found and (len(corners) == (rank_count-1) * (file_count-1)) and (ii in orig_corner_success):
img_corner_success.append(ii)
else:
print img_files[ii], ': Number of corners detected is wrong:', len(corners)
ii += 1
# Here we compile all the corner coordinates into single arrays
image_points = []
obj_points = []
for ii in img_corner_success:
obj_points.extend(orig_corners[ii][1])
image_points.extend(img_corners[ii][2])
image_points = cv.fromarray(numpy.array(image_points, dtype='float32'))
obj_points = numpy.hstack((numpy.array(obj_points, dtype='float32'), numpy.zeros((len(obj_points), 1), dtype='float32')))
obj_points = cv.fromarray(numpy.array(obj_points, order='C'))
point_counts = numpy.ones((len(img_corner_success), 1), dtype='int32') * ((rank_count-1) * (file_count-1))
point_counts = cv.fromarray(point_counts)
# Create the output parameters
cam_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
cv.Set2D(cam_mat, 0, 0, 1.0)
cv.Set2D(cam_mat, 1, 1, 1.0)
dist_mat = cv.CreateMat(5, 1, cv.CV_32FC1)
rot_vecs = cv.CreateMat(len(img_corner_success), 3, cv.CV_32FC1)
tran_vecs = cv.CreateMat(len(img_corner_success), 3, cv.CV_32FC1)
# Do the camera calibration
x = cv.CalibrateCamera2(obj_points, image_points, point_counts, cv.GetSize(gray_images[0]), cam_mat, dist_mat, rot_vecs, tran_vecs)
# Create the undistortion map
xmap = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F, 1)
ymap = cv.CreateImage(cv.GetSize(images[0]), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(cam_mat, dist_mat, xmap, ymap)
# Now undistort all the images and same them
ii = 0
for tmp in images:
print img_files[ii]
image = cv.GetImage(tmp)
t = cv.CloneImage(image)
cv.Remap(t, image, xmap, ymap, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS, cv.ScalarAll(0))
corrected_file = os.path.join(os.path.dirname(img_files[ii]), 'corrected_%s' % (os.path.basename(img_files[ii])))
cv.SaveImage(corrected_file, image)
print 'Saved corrected image to', corrected_file
ii += 1
Here are the images - A, B and B' Actually I don't think the Remap is really doing anything!
I got it resolved finally. There were several issues:
The original images were not of the same size. Nor were the captured images. Hince, the affine transform from one pair was not applicable to the other. I resized them all to the same size.
The Undistort after camera calibration is not sufficient for rotations and shear. The appropriate thing to do is affine transform. And it is better to take three corners of the chessboard as the points for computing the transformation matrix (less relative error).
Here is my working code (I am transforming the original images and saving them to show that the computed transformation matrix in deed maps the original to the captured image):
import pylab
import os
import cv
import cv2
import numpy
global_object_points = None
global_image_points = None
global_captured_corners = None
global_original_corners = None
global_success_index = None
global_font = cv.InitFont(cv.CV_FONT_HERSHEY_PLAIN, 1.0, 1.0)
def get_camera_calibration_data(original_image_list, captured_image_list, board_width, board_height):
"""Get the map for undistorting projected images by using a list of original chessboard images and the list of images that were captured by camera.
original_image_list - list containing the original images (loaded as OpenCV image).
captured_image_list - list containing the captured images.
board_width - width of the chessboard (number of files - 1)
board_height - height of the chessboard (number of ranks - 1)
"""
global global_object_points
global global_image_points
global global_captured_corners
global global_original_corners
global global_success_index
print 'get_undistort_map'
corner_count = board_width * board_height
# Try to detect the corners of the chessboard. For each image,
# FindChessboardCorners returns (found, corner_points). found is
# True even if it managed to detect only a subset of the actual
# corners. NOTE: according to
# http://opencv.willowgarage.com/wiki/documentation/cpp/calib3d/findChessboardCorners,
# no need for FindCornerSubPix after FindChessBoardCorners
captured_corners = [cv.FindChessboardCorners(img, (board_width, board_height)) for img in captured_image_list]
original_corners = [cv.FindChessboardCorners(img, (board_width, board_height)) for img in original_image_list]
success_captured = [index for index in range(len(captured_image_list))
if captured_corners[index][0] and len(captured_corners[index][1]) == corner_count]
success_original = [index for index in range(len(original_image_list))
if original_corners[index][0] and len(original_corners[index][2]) == corner_count]
success_index = [index for index in success_captured if (len(captured_corners[index][3]) == corner_count) and (index in success_original)]
global_success_index = success_index
print global_success_index
print 'Successfully found corners in image #s.', success_index
cv.NamedWindow('Image', cv.CV_WINDOW_AUTOSIZE)
for index in success_index:
copy = cv.CloneImage(original_image_list[index])
cv.DrawChessboardCorners(copy, (board_width, board_height), original_corners[index][4], corner_count)
cv.ShowImage('Image', copy)
a = cv.WaitKey(0)
copy = cv.CloneImage(captured_image_list[index])
cv.DrawChessboardCorners(copy, (board_width, board_height), captured_corners[index][5], corner_count)
cv.ShowImage('Image', copy)
a = cv.WaitKey(0)
cv.DestroyWindow('Image')
if not success_index:
return
global_captured_corners = [captured_corners[index][6] for index in success_index]
global_original_corners = [original_corners[index][7] for index in success_index]
object_points = cv.CreateMat(len(success_index) * (corner_count), 3, cv.CV_32FC1)
image_points = cv.CreateMat(len(success_index) * (corner_count), 2, cv.CV_32FC1)
global_object_points = object_points
global_image_points = image_points
point_counts = cv.CreateMat(len(success_index), 1, cv.CV_32SC1)
for ii in range(len(success_index)):
for jj in range(corner_count):
cv.Set2D(object_points, ii * corner_count + jj, 0, float(jj/board_width))
cv.Set2D(object_points, ii * corner_count + jj, 1, float(jj%board_width))
cv.Set2D(object_points, ii * corner_count + jj, 2, float(0.0))
cv.Set2D(image_points, ii * corner_count + jj, 0, captured_corners[success_index[ii]][8][jj][0])
cv.Set2D(image_points, ii * corner_count + jj, 1, captured_corners[success_index[ii]][9][jj][10])
cv.Set1D(point_counts, ii, corner_count)
# Create the output parameters
camera_intrinsic_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
cv.Set2D(camera_intrinsic_mat, 0, 0, 1.0)
cv.Set2D(camera_intrinsic_mat, 1, 1, 1.0)
distortion_mat = cv.CreateMat(5, 1, cv.CV_32FC1)
rotation_vecs = cv.CreateMat(len(success_index), 3, cv.CV_32FC1)
translation_vecs = cv.CreateMat(len(success_index), 3, cv.CV_32FC1)
print 'Before camera clibration'
# Do the camera calibration
cv.CalibrateCamera2(object_points, image_points, point_counts, cv.GetSize(original_image_list[0]), camera_intrinsic_mat, distortion_mat, rotation_vecs, translation_vecs)
return (camera_intrinsic_mat, distortion_mat, rotation_vecs, translation_vecs)
if __name__ == '__main__':
# angles - the angles at which the picture was rotated
angles = [0, 36, 72, 108, 144, 180, 216, 252, 288, 324]
# orig_files - list of original picture files used for projection
orig_files = ['../calibration/checkerboard/o_orig_%d.png' % (angle) for angle in angles]
# img_files - projected image captured by camera
img_files = ['../calibration/checkerboard/captured_imag_%d.bmp' % (angle) for angle in angles]
# orig_files = ['o%d.png' % (angle) for angle in range(10, 40, 10)]
# img_files = ['d%d.png' % (angle) for angle in range(10, 40, 10)]
# Load the images
print 'Loading images'
captured_images = [cv.LoadImage(filename) for filename in img_files]
orig_images = [cv.LoadImage(filename) for filename in orig_files]
# Convert to grayscale
gray_images = [cv.CreateImage((src.height, src.width), cv.IPL_DEPTH_8U, 1) for src in captured_images]
for ii in range(len(captured_images)):
cv.CvtColor(captured_images[ii], gray_images[ii], cv.CV_RGB2GRAY)
cv.ShowImage('win', gray_images[ii])
cv.WaitKey(0)
cv.DestroyWindow('win')
gray_orig = [cv.CreateImage((src.height, src.width), cv.IPL_DEPTH_8U, 1) for src in orig_images]
for ii in range(len(orig_images)):
cv.CvtColor(orig_images[ii], gray_orig[ii], cv.CV_RGB2GRAY)
# The number of ranks and files in the chessboard. OpenCV considers
# the height and width of the chessboard to be one less than these,
# respectively.
rank_count = 10
file_count = 11
camera_intrinsic_mat, distortion_mat, rotation_vecs, translation_vecs, = get_camera_calibration_data(gray_orig, gray_images, file_count-1, rank_count-1)
xmap = cv.CreateImage(cv.GetSize(captured_images[0]), cv.IPL_DEPTH_32F, 1)
ymap = cv.CreateImage(cv.GetSize(captured_images[0]), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(camera_intrinsic_mat, distortion_mat, xmap, ymap)
# homography = cv.CreateMat(3, 3, cv.CV_32F)
map_matrix = cv.CreateMat(2, 3, cv.CV_32F)
source_points = (global_original_corners[0][0], global_original_corners[0][file_count-2], global_original_corners[0][(rank_count-1) * (file_count-1) -1])
image_points = (global_captured_corners[0][0], global_captured_corners[0][file_count-2], global_captured_corners[0][(rank_count-1) * (file_count-1) -1])
# cv.GetPerspectiveTransform(source, target, homography)
cv.GetAffineTransform(source_points, image_points, map_matrix)
ii = 0
cv.NamedWindow('OriginaImage', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('CapturedImage', cv.CV_WINDOW_AUTOSIZE)
cv.NamedWindow('FixedImage', cv.CV_WINDOW_AUTOSIZE)
for image in gray_images:
# The affine transform should be ideally calculated once
# outside this loop, but as the transform looks different for
# each image, I'll just calculate it independently to see the
# applicability
try:
# Try to find ii in the list of successful corner
# detection indices and if found, use the corners for
# computing the affine transformation matrix. This is only
# required when the optics changes between two
# projections, which should not happend.
jj = global_success_index.index(ii)
source_points = [global_original_corners[jj][0], global_original_corners[jj][rank_count-1], global_original_corners[jj][-1]]
image_points = [global_captured_corners[jj][0], global_captured_corners[jj][rank_count-1], global_captured_corners[jj][-1]]
cv.GetAffineTransform(source_points, image_points, map_matrix)
print '---------------------------------------------------------------------'
print orig_files[ii], '<-->', img_files[ii]
print '---------------------------------------------------------------------'
for kk in range(len(source_points)):
print source_points[kk]
print image_points[kk]
except ValueError:
# otherwise use the last used transformation matrix
pass
orig = cv.CloneImage(orig_images[ii])
cv.PutText(orig, '%s: original' % (os.path.basename(orig_files[ii])), (100, 100), global_font, 0.0)
cv.ShowImage('OriginalImage', orig)
target = cv.CloneImage(image)
target.origin = image.origin
cv.SetZero(target)
cv.Remap(image, target, xmap, ymap, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS, cv.ScalarAll(0))
cv.PutText(target, '%s: remapped' % (os.path.basename(img_files[ii])), (100, 100), global_font, 0.0)
cv.ShowImage('CapturedImage', target)
target = cv.CloneImage(orig_images[ii])
cv.SetZero(target)
cv.WarpAffine(orig_images[ii], target, map_matrix, cv.CV_INTER_LINEAR | cv.CV_WARP_FILL_OUTLIERS)
corrected_file = os.path.join(os.path.dirname(img_files[ii]), 'corrected_%s' % (os.path.basename(img_files[ii])))
cv.SaveImage(corrected_file, target)
print 'Saved corrected image to', corrected_file
# cv.WarpPerspective(image, target, homography, cv.CV_INTER_LINEAR | cv.CV_WARP_INVERSE_MAP | cv.CV_WARP_FILL_OUTLIERS)
cv.PutText(target, '%s: perspective-transformed' % (os.path.basename(img_files[ii])), (100, 100), global_font, 0.0)
cv.ShowImage('FixedImage', target)
print '==================================================================='
cv.WaitKey(0)
ii += 1
cv.DestroyWindow('OriginalImage')
cv.DestroyWindow('CapturedImage')
cv.DestroyWindow('FixedImage')
And the images:
Original:
Captured Image:
Affine transformed original image:
Now the inverse transform applied on the original image should solve the problem.

Categories