I am a python beginner, we have a project where we need to detect and crop out the desired pattern from a photo:
This is the original picture:
Each picture has such a regular border, but it is not closed, in order to detect the direction (similar to a QR code)
The image to be detected is taken by a mobile phone and may be deformed, rotated and scaled, we need to get the pattern in the middle of the box:
My code:
import cv2 as cv
import numpy as np
import math
def getdistance(p1,p2):
v1=np.array(p1)
v2=np.array(p2)
v3=v2-v1
v4=math.hypot(v3[0],v3[1])
return v4
img_object = cv.imread('small.png', cv.IMREAD_GRAYSCALE)
img_scene = cv.imread('img2.png', cv.IMREAD_GRAYSCALE)
if img_object is None or img_scene is None:
print('Unable to read image!')
exit(0)
minHessian = 400
detector = cv.SIFT_create()
keypoints_obj, descriptors_obj = detector.detectAndCompute(img_object, None)
keypoints_scene, descriptors_scene = detector.detectAndCompute(img_scene, None)
img_2 = cv.drawKeypoints(img_scene,keypoints_scene,img_scene)
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_FLANNBASED)
knn_matches = matcher.knnMatch(descriptors_obj, descriptors_scene, 2)
ratio_thresh = 0.95
good_matches = []
for m,n in knn_matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
img_matches = np.empty((max(img_object.shape[0], img_scene.shape[0]), img_object.shape[1]+img_scene.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img_object, keypoints_obj, img_scene, keypoints_scene, good_matches, img_matches, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
obj = np.empty((len(good_matches),2), dtype=np.float32)
scene = np.empty((len(good_matches),2), dtype=np.float32)
for i in range(len(good_matches)):
obj[i,0] = keypoints_obj[good_matches[i].queryIdx].pt[0]
obj[i,1] = keypoints_obj[good_matches[i].queryIdx].pt[1]
scene[i,0] = keypoints_scene[good_matches[i].trainIdx].pt[0]
scene[i,1] = keypoints_scene[good_matches[i].trainIdx].pt[1]
H, _ = cv.findHomography(obj, scene, cv.RANSAC)
obj_corners = np.empty((4,1,2), dtype=np.float32)
obj_corners[0,0,0] = 0
obj_corners[0,0,1] = 0
obj_corners[1,0,0] = img_object.shape[1]
obj_corners[1,0,1] = 0
obj_corners[2,0,0] = img_object.shape[1]
obj_corners[2,0,1] = img_object.shape[0]
obj_corners[3,0,0] = 0
obj_corners[3,0,1] = img_object.shape[0]
scene_corners = cv.perspectiveTransform(obj_corners, H)
q1=(int(scene_corners[0,0,0]), int(scene_corners[0,0,1]))
q2=(int(scene_corners[1,0,0]), int(scene_corners[1,0,1]))
q3=(int(scene_corners[2,0,0]), int(scene_corners[2,0,1]))
q4=(int(scene_corners[3,0,0]), int(scene_corners[3,0,1]))
dist=getdistance(q1, q2)
disti=int(dist)
print('q1: %s, q2: %s, q3: %s, q4: %s, dist: %s'%(q1, q2, q3, q4, dist))
src_points = np.array([q3, q4, q1, q2], dtype = "float32")
dst_points = np.array([[disti,disti], [0, disti], [0, 0], [disti, 0]], dtype = "float32")
M = cv.getPerspectiveTransform(src_points, dst_points)
perspective = cv.warpPerspective(img_scene, M, (disti, disti), cv.INTER_LINEAR)
_, result = cv.threshold(perspective, 0, 255,cv.THRESH_BINARY+cv.THRESH_OTSU)
cv.imshow('result', result)
cv.waitKey()
I have researched for a long time, but the results are still not ideal. I would like to ask if there are better ideas to achieve our needs?
Related
I am working with VSCode 1.68.1, Ubuntu 20.04.
I am following link:
https://courses.ece.cornell.edu/ece5990/ECE5725_Fall2020_Projects/Dec_21_Demo/Drawing%20Robot/eam348_mm2994_W/index.html
from unittest import result
import numpy as np
import time
import cv2
import sys
import cv2.aruco as aruco
import socket
import datetime
import glob
import math
import multiprocessing as mp
port = 30003
IP = '192.11.0.25'
robot_ID = 20185500976
robot = 'ur-20185500976'
marker_dimension =0.06
worldx = 390
worldy = 260
bottom_left = 31 #this is the origin - positivex: towards bottom right - positivey: towards top left
bottom_right = 32
top_left = 9
top_right = 20
#camera dist, matrix and newcameramatrix
dist=np.array(([[5.0164361897882787e-02, 6.6308284023737640e-01, 2.5493975084043882e-03, -6.0403656948007376e-03, -2.9652221208277720e+00]]))
mtx=np.array([[6.1618286891135097e+02, 0., 3.2106366551961219e+02],
[0 , 6.1595924417559945e+02, 2.4165645046034246e+02],
[0. , 0. , 1. ]])
found_dict_pixel_space = {}
found_dict_camera_space = {}
found_dict_world_space = {}
found_dict_homography_space = {}
final_string = ""
originRvec = np.array([0,0,1])
markerRvec= np.array([0,0,0])
def UDP(IP,port,message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #IPv4 DNS server - UDP protocol
sock.sendto(bytes(message, "utf-8"), (IP,port)) #self, data, address
def getMarkerCenter(corners):
px = (corners[0][0] + corners[1][0] + corners[2][0]+ corners[3][0]) * 0.25
py = (corners[0][1] + corners[1][1] + corners[2][1]+ corners[3][1]) * 0.25
return [px,py]
def getMarkerRotation(corners):
unit_x_axis = [1.,0.]
center = getMarkerCenter(corners)
right_edge_midpoint = (corners[1]+corners[2])/2.
unit_vec = (right_edge_midpoint-center)/np.linalg.norm(right_edge_midpoint-center)
angle = np.arccos(np.dot(unit_x_axis,unit_vec))
return angle
def inversePerspective(rvec, tvec):
R, _ = cv2.Rodrigues(rvec)
R = np.array(R).T #this was np.matrix but had error
invTvec = np.dot(-R, np.array(tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def normalize(v):
if np.linalg.norm(v) == 0 : return v
return v / np.linalg.norm(v)
def findWorldCoordinate(originCorners, point):
zero = np.array(originCorners[3]) #bottom left as the origin - check the data structure
print(zero)
x = (np.array(originCorners[0]) - zero) # bottom right - Green Axis -- throw out z
y = (np.array(originCorners[1]) - zero) # top left - Red Axis -- throw out z
x = x[0][0:2]
y = y[0][0:2]
x = normalize(x)
y = normalize(y)
#print("x", x)
vec = (point - zero)[0][0:2]
#print("vec", vec)
vecNormal = normalize(vec)
cosX = np.dot(x,vecNormal)
cosY = np.dot(y,vecNormal)
xW = np.linalg.norm(vec) * cosX
yW = np.linalg.norm(vec) * cosY
return [xW, yW]
cap=cv2.VideoCapture(4)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
while True:
t0 = time.time()
ret, frame = cap.read()
h, w = frame.shape[:2]
#new image size to generate192.0.1.25
h1, w1 = h, w
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w1,h1))
#print(newcameramtx)
#mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w1,h1), 5)
#dst1 = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
x, y, w1, h1 = roi
dst1 = dst1[y:y + h1, x:x + w1]
frame=dst1
t1 = time.time()-t0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
arucoParameters = aruco.DetectorParameters_create()
t2 = time.time()-t0
data = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
t3 = time.time()-t0
corners = data[0]
ids = data[1]
originIDglobal = 0
# If you can't find it, type id
if ids is not None:
t4 = time.time()-t0
result = aruco.estimatePoseSingleMarkers(corners, marker_dimension, newcameramtx, dist)
rvecs = result[0] # rotation vectors of markers
tvecs = result[1] # translation vector of markers
#setting bottom_left as the origin
if bottom_left in ids:
originID = np.where(ids == bottom_left)[0][0]
originIDglobal = originID
else:
originID = originIDglobal
originCorners = corners[originID] # corners of the tag set as the origin
originCornersCamera = getCornerInCameraWorld(marker_dimension, rvecs[originID], tvecs[originID])[0] # origin tag corners in camera space
originRvec = rvecs[originID] # rotation vec of origin tag
originTvec = tvecs[originID] # translation vec of origin tag
display = aruco.drawDetectedMarkers(frame, corners,ids) #Draw a square around the markers
t5 = time.time()-t0
for i in range(len(ids)):
ID = ids[i]
rvec = rvecs[i]
tvec = tvecs[i]
corners4 = corners[i]
display = cv2.drawFrameAxes(frame, newcameramtx, dist, rvec, tvec,0.03)#Draw 3D Axis, 3cm(0.03)
found_dict_pixel_space[""+str(ids[i][0])] = corners4 # put the corners of this tag in the dictionary
# Homography
zero = found_dict_pixel_space[str(bottom_left)][0][3] #bottom left - 3
x = found_dict_pixel_space[str(bottom_right)][0][2] #bottom right - 27
y = found_dict_pixel_space[str(top_left)][0][0] #top left - 22
xy = found_dict_pixel_space[str(top_right)][0][1] #top right - 24
workspace_world_corners = np.array([[0.0, 0.0], [worldx, 0.0], [0.0, worldy], [worldx, worldy]], np.float32) # 4 corners in millimeters
workspace_pixel_corners = np.array([zero,x,y,xy], np.float32) # 4 corners in pixels
# Homography Matrix
h, status = cv2.findHomography(workspace_pixel_corners, workspace_world_corners) #perspective matrix
t6=time.time()-t0
im_out = cv2.warpPerspective(frame, h, (worldx,worldy)) #showing that it works
t7 = time.time()-t0
for i in range(len(ids)):
j = ids[i][0]
corners_pix = found_dict_pixel_space[str(j)]#[0]
corners_pix_transformed = cv2.perspectiveTransform(corners_pix,h)
found_dict_homography_space[str(j)] = corners_pix_transformed
print(found_dict_homography_space)
robot = found_dict_homography_space[str(robot_ID)][0]
print(getMarkerCenter(robot))
cv2.imshow('Warped Source Image', im_out)
t8=time.time()-t0
print("t1: %8.4f t2: %8.4f t3: %8.4f t4: %8.4f t5: %8.4f t6: %8.4f t7: %8.4f t8: %8.4f" %(t1,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))
else:
display = frame
cv2.imshow('Display', display)
# Display result frame
cv2.imshow("frame",frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I am getting Error: "NameError: name 'getCornerInCameraWorld' is not defined"
I am unable to find anything related to getCornerInCameraWorld.
Please provide some help.
I got reply from the author of the code. She confirmed the code was missing some part and provided that.
The code missing was:
def getCornerInCameraWorld(size, rvec, tvec):
half_size = size * 0.5
rotMatrix, jacobian = cv2.Rodrigues(rvec) #convert rot vector from marker space to camera space
X = half_size * rotMatrix[:,0]
Y = half_size * rotMatrix[:,1]
c1 = np.add(np.add(-1*X,Y), tvec) #top left
c2 = np.add(np.add(X, Y), tvec) #top right
c3 = np.add(np.add(X, -1*Y), tvec) # bottom right
c4 = np.add(np.add(-1*X, -1*Y), tvec) # bottom left
cornersInCameraWorld = [c1,c2,c3,c4]
cornersInCameraWorld = np.array(cornersInCameraWorld, dtype=np.float32)
return cornersInCameraWorld, rotMatrix
I have checked and it is working for me
I am trying to implement this Wiener filter to reduced the impact of a camera out of focus: https://docs.opencv.org/4.x/de/d3c/tutorial_out_of_focus_deblur_filter.html. The original code is in C++, and I rewrote it in python, but I do not get the same output as the example.
Here is my code:
import cv2 as cv
import numpy as np
def calcPSF(size, R):
h = np.zeros(size, dtype=np.float32)
cv.circle(h,(size[1]//2,size[0]//2), R, 1, -1)
psf = h/np.sum(h)
return psf
def calcWnrFilter(psf, SNR):
h_psf = np.fft.fftshift(psf)
h_planes = [np.float32(h_psf), np.zeros(h_psf.shape, np.float32)]
h_complexI = cv.merge(h_planes)
h_complexI = cv.dft(h_complexI)
h_planes = cv.split(h_complexI)
denom = np.power(np.abs(h_planes[0]),2) + (1/SNR)
wiener = np.divide(h_planes[0], denom, dtype = np.float32)
return wiener
def filter2DFreq(img, wiener):
planes = [np.float32(img), np.zeros(img.shape, np.float32)]
complexI = cv.merge(planes)
complexI = np.divide(cv.dft(complexI), complexI.size, dtype = np.float32)
planesH = [np.float32(wiener), np.zeros(wiener.shape, np.float32)]
complexH = cv.merge(planesH)
complexIH = cv.mulSpectrums(complexI, complexH, 0)
complexIH = cv.idft(complexIH)
planes = cv.split(complexIH)
out = planes[0]
return out
def deBlur(img, R, SNR):
rows, cols = img.shape
m = cv.getOptimalDFTSize( rows )
n = cv.getOptimalDFTSize( cols )
img = (cv.copyMakeBorder(img, 0, m - rows, 0, n - cols, cv.BORDER_CONSTANT, value=[0, 0, 0])/255).astype(np.float32)
h = calcPSF((m,n), R)
Hw = calcWnrFilter(h, SNR)
out = filter2DFreq(img, Hw)
return out
img = cv.imread("original_blur.jpg")[:,:,0]
while True:
v = deBlur(img,53,5200)
cv.imshow("in", img)
cv.imshow("out", v)
key = cv.waitKey(1) & 0xFF
if key == 27:
break
cv.destroyAllWindows()
And here are my imput and output
This is what I am supposed to get:
I verified the dtype of all my variable, everything is in np.float32, and spent hours looking for differences with the original code.
Using openCV in python to tag an object in a video and write that new output and save as .avi file. Everything is working fine except for the vide writer. I can not get it to write and/or save the output. It looks the same as I've done before so this is driving me insane. Any ideas on what I might be doing wrong are greatly appreciated.
import numpy as np
import cv2
def draw_pyramid(pyramid, H):
pyramid_new = []
i=0
for p in pyramid:
if i !=4:
temp_p = p
temp_p = np.append(temp_p, 1)
M_ext = H[0:3, :]
p = M_ext # temp_p
pyramid_new.append((p[0], p[1], p[2]))
i += 1
else:
pyramid_new.append((p[0], p[1], p[2]))
i +=1
return pyramid_new
cap = cv2.VideoCapture('hw4.avi')
got_image, img = cap.read()
videoWriter = cv2.VideoWriter("switch.avi",
fourcc=cv2.VideoWriter_fourcc('M','J','P','G'),
fps=30.0,
frameSize=(img.shape[1], img.shape[0]))
fx, fy, cx, cy = 675.0, 675.0, 320.0, 240.0
k = np.array([[fx,0.0,cx],[0.0,fy,cy],[0.0,0.0,1.0]])
while True:
got_img, img = cap.read()
if not got_img:
break
else:
gray_image = cv2.cvtColor(src=img,
code=cv2.COLOR_BGR2GRAY) # convert BGR to grayscale
output_thresh, binary_image = cv2.threshold(src=gray_image,
maxval=255,
type=cv2.THRESH_OTSU, # determine threshold automatically from image
thresh=0) # ignore this if using THRESH_OTSU
arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)
corners, ids, _ = cv2.aruco.detectMarkers(image=img,
dictionary=arucoDict)
if ids is not None and corners:
cv2.aruco.drawDetectedMarkers(image=img,
corners=corners,
ids=ids,
borderColor=(0, 255, 0))
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners=corners,
markerLength=2.0,
cameraMatrix=k,
distCoeffs=None)
cv2.aruco.drawAxis(image=img,
cameraMatrix=k,
distCoeffs=None,
rvec=rvecs,
tvec=tvecs,
length=2.0)
if ids[0][0]==1:
pyr = np.array(((-3.5/3,-1.5/3,-1.5/3,-3.5/3,-2.5),
(-1, -1, -1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -5))).T
R = cv2.Rodrigues(rvecs)[0]
r = R[0]
# rvec_m_c = rvecs[0] # This is a 1x3 rotation vector
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[-2.5, -2, -5]]).T
H = np.block([[R, t], [0, 0, 0, 1]])
if ids[0][0] == 0:
pyr = np.array(((1.5/3, 3.5/3, 3.5/3, 1.5/3, 2.5),
(-1,-1,-1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -1))).T
R = cv2.Rodrigues(rvecs)[0]
# r = R[0]
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[2.5, -2, -1]]).T
H = np.block([[R, t], [0,0,0,1]])
pyramid_new = draw_pyramid(pyr, H)
pyramid_new = np.asarray(pyramid_new)
pyramid_new = pyramid_new.reshape(-1, 3)
dist_coeff = np.zeros((4,1))
pyramid_points, jacobian = cv2.projectPoints(pyramid_new,
rvecs,
tvecs,
np.array(k),
distCoeffs=dist_coeff)
pyramid_points = pyramid_points.astype(int)
froml = [1,2,3,0,0,1,2,3]
fromr = [4,4,4,4,1,2,3,0]
color = (0,0,255)
for i, j in zip(froml, fromr):
cv2.line(img, tuple(pyramid_points[i][0]),
tuple(pyramid_points[j][0]), color, 1)
videoWriter.write(img)
cv2.imshow('switch', img)
cv2.waitKey(30)
videoWriter.release()
I am trying to calculate the exact( 3 cm error rate is acceptable ) distance between aruco marker and camera. I use python, opencv and aruco. I can detect them ( marker side is 0.023 meters which is 2.3 cm ) but I can't interpret the distance because for 40 cm distance the norm of the translation vector gives 1 meter. I am so confused about this. Can anyone help? Full code ( sorry , not documented well ):
import numpy as np
import cv2
import cv2.aruco as aruco
import glob
import argparse
import math
# Marker id infos. Global to access everywhere. It is unnecessary to change it to local.
firstMarkerID = None
secondMarkerID = None
cap = cv2.VideoCapture(0)
image_width = 0
image_height = 0
#hyper parameters
distanceBetweenTwoMarkers = 0.0245 # in meters, 2.45 cm
oneSideOfTheMarker = 0.023 # in meters, 2.3 cm
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def calibrate(dirpath):
""" Apply camera calibration operation for images in the given directory path. """
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(dirpath+'/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return [ret, mtx, dist, rvecs, tvecs]
def saveCoefficients(mtx, dist, path):
""" Save the camera matrix and the distortion coefficients to given path/file. """
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)
cv_file.write("camera_matrix", mtx)
cv_file.write("dist_coeff", dist)
# note you *release* you don't close() a FileStorage object
cv_file.release()
def loadCoefficients(path):
""" Loads camera matrix and distortion coefficients. """
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode("camera_matrix").mat()
dist_matrix = cv_file.getNode("dist_coeff").mat()
# Debug: print the values
# print("camera_matrix : ", camera_matrix.tolist())
# print("dist_matrix : ", dist_matrix.tolist())
cv_file.release()
return [camera_matrix, dist_matrix]
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
rvec, tvec = rvec.reshape((3, 1)), tvec.reshape((3, 1))
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
invTvec = invTvec.reshape((3, 1))
invTvec = invTvec.reshape((3, 1))
return invRvec, invTvec
def make_1080p():
global image_width
global image_height
image_width = 1920
image_height = 1080
change_res(image_width, image_height)
def make_720p():
global image_width
global image_height
image_width = 1280
image_height = 720
change_res(image_width, image_height)
def make_480p():
global image_width
global image_height
image_width = 640
image_height = 480
change_res(image_width, image_height)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
def euclideanDistanceOfTvecs(tvec1, tvec2):
return math.sqrt(math.pow(tvec1[0]-tvec2[0], 2) + math.pow(tvec1[1]-tvec2[1], 2) + math.pow(tvec1[2]-tvec2[2], 2))
def euclideanDistanceOfTvec(tvec):
return euclideanDistanceOfTvecs(tvec, [0, 0, 0])
def draw(img, imgpts, color):
""" draw a line between given two points. """
imgpts = np.int32(imgpts).reshape(-1, 2)
for pointf in range(len(imgpts)):
for points in range(len(imgpts)):
img = cv2.line(img, tuple(imgpts[pointf]), tuple(imgpts[points]), color, 3)
return img
def track(matrix_coefficients, distortion_coefficients):
global image_width
global image_height
""" Real time ArUco marker tracking. """
needleComposeRvec, needleComposeTvec = None, None # Composed for needle
ultraSoundComposeRvec, ultraSoundComposeTvec = None, None # Composed for ultrasound
savedNeedleRvec, savedNeedleTvec = None, None # Pure Composed
savedUltraSoundRvec, savedUltraSoundTvec = None, None # Pure Composed
TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None
make_480p()
while True:
isFirstMarkerDetected = False
isSecondMarkerDetected = False
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250) # Use 5x5 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
if np.all(ids is not None): # If there are markers found by detector
zipped = zip(ids, corners)
ids, corners = zip(*(sorted(zipped)))
# print(ids)
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], oneSideOfTheMarker, matrix_coefficients,
distortion_coefficients)
if ids[i] == firstMarkerID:
firstRvec = rvec
firstTvec = tvec
isFirstMarkerDetected = True
firstMarkerCorners = corners[i]
elif ids[i] == secondMarkerID:
secondRvec = rvec
secondTvec = tvec
isSecondMarkerDetected = True
secondMarkerCorners = corners[i]
(rvec - tvec).any() # get rid of that nasty numpy value array error
# aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
''' First try
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
info = cv2.composeRT(composedRvec, composedTvec, secondRvec.T, secondTvec.T)
composedRvec, composedTvec = info[0], info[1]
composedRvec, composedTvec = composedRvec.T, composedTvec.T
differenceRvec, differenceTvec = composedRvec-secondRvec, composedTvec-secondTvec
# print infos
print("composed Rvec: ", composedRvec)
print("composed Tvec: ", composedTvec)
print("Second marker Rvec: ", secondRvec)
print("Second marker Tvec: ", secondTvec)
print("differenceRvec: ", differenceRvec)
print("differenceTvec: ", differenceTvec)
print("real difference: ", euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0]))
# draw axis to estimated location
aruco.drawAxis(frame, mtx, dist, composedRvec, composedTvec, 0.0115)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
difference = euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / (secondTvec[0][0][2] - firstTvec[0][0][2]))
print(calculatedDistance)
'''
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
camerafirstRvec, cameraFirstTvec = inversePerspective(firstRvec, firstTvec)
camerasecondRvec, camerasecondTvec = inversePerspective(secondRvec, secondTvec)
differenceRvec, differenceTvec = camerafirstRvec - camerasecondRvec, cameraFirstTvec - camerasecondTvec
# print infos
print("first Rvec: ", camerafirstRvec)
print("first Tvec: ", cameraFirstTvec)
print("Second marker Rvec: ", camerasecondRvec)
print("Second marker Tvec: ", camerasecondTvec)
# print("differenceRvec: ", differenceRvec)
# print("differenceTvec: ", differenceTvec)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
print(cv2.norm(secondTvec[0][0]))
difference = euclideanDistanceOfTvecs(composedTvec.T[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
# print(calculatedDistance)
# Display the resulting frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame', image_width, image_height)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3) & 0xFF
if key == ord('q'): # Quit
break
elif key == ord('p'): # print necessary information here
pass # Insert necessary print here
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Aruco Marker Tracking')
parser.add_argument('--coefficients', metavar='bool', required=True,
help='File name for matrix coefficients and distortion coefficients')
parser.add_argument('--firstMarker', metavar='int', required=True,
help='first')
parser.add_argument('--secondMarker', metavar='int', required=True,
help='second')
# Parse the arguments and take action for that.
args = parser.parse_args()
firstMarkerID = int(args.firstMarker)
secondMarkerID = int(args.secondMarker)
if args.coefficients == '1':
mtx, dist = loadCoefficients("test.yaml")
ret = True
else:
ret, mtx, dist, rvecs, tvecs = calibrate("calib_images")
saveCoefficients(mtx, dist, "calibrationCoefficients.yaml")
print("Calibration is completed. Starting tracking sequence.")
if ret:
track(mtx, dist)
I got the answer. Problem is in the calibration. When calibration with chessboard, I gave the object points like this:
(0,0,0), (1,0,0) and so on.
The thing is when doing pose estimation, camera should be calibrated nicely. My chessboard square size was 1.5 centimeters which means 0.015 meters. I changed the object point matrix as:
(0,0,0), (0.015,0,0) and so on.
So I said to program that the calibration should be in meters. If you do the calibration with different object points matrix than it should be, the pose estimation fails. That was included in the opencv documentation but I couldn't see it. In the docs it was said like "you can pass them like that." and I didn't think that it fails at the pose estimation.
I am trying to do an image stitching project with OpenCV in Python where I use point matches calculated by tracking points between frames of a video using the Lucas Kanade algorithm to find homography matrices. After writing the program and it came time for stitching together the frames of a video, I decided to run a test where I simply display the perspective warped versions of each image onto a black canvas to see how the Homography matrix had warped them. When I did this, instead of moving over bit by bit between frames, frames were translated further and further distances, way off from a slight nudge between frames
[----------------------------------------------------------------------------Empty Space------------------------------------]
[Frame0------------------------------------------------------------------------------------------------------------------------]
[------------Frame1----------------------------------------------------------------------------------------------------------- ]
[-------------------------------------------Frame 2----------------------------------------------------------------------------]
[------------------------------------------------------------------------------------------------------------Frame 3-----------]
Subsequent frames would be out of visual range. I am not quiet sure why this is happening. I implemented a back-projection error check to make sure only points with accurate optical flow calculations were passed on. I also set the back-projection threshold for findHomography to be 10, 1, and then 0.5, all to no avail. I am stitching multiple images, so I am multiplying my homography matrices between frames. This seems to be compounding the error. Why is this happening and how can I fix my homography matrices? Here is my code (ignore commented out tests. Also, some of the indentation formatting might have been messed with while copying over to the forum):
import numpy as np
import sys
import cv2
import math
lastFeatures = None
currentFeatures = None
opticFlow = None
panRow = None
Rows = None
finalPanorama = None
def loadRow(dirPath, fType, numImages, column):
imageRow = []
for i in range(0, numImages):
imageRow.append(cv2.imread("%s/%i_%i.%s" % (dirPath, column, i, fType), cv2.IMREAD_COLOR))
return imageRow
def findNthFeatures(prevImg, prevPnts, nxtImg):
back_threshold = 0.5
nxtDescriptors = []
prevGrey = None
nxtGrey = None
nxtPnts = prevPnts[:]
prevGrey = cv2.cvtColor(prevImg, cv2.COLOR_BGR2GRAY)
nxtGrey = cv2.cvtColor(nxtImg, cv2.COLOR_BGR2GRAY)
lucasKanadeParams = dict(winSize = (19,19), maxLevel = 100, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
nxtPnts, status, err = cv2.calcOpticalFlowPyrLK(prevGrey, nxtGrey, prevPnts, None, **lucasKanadeParams)
backProjections, status, err = cv2.calcOpticalFlowPyrLK(nxtGrey, prevGrey, nxtPnts, None, **lucasKanadeParams)
d = abs(prevPnts - backProjections).reshape(-1, 2).max(-1)
status = d < back_threshold
goodNew = nxtPnts[status].copy()
goodLast = prevPnts[status].copy()
return goodLast, goodNew
def getHomographies(videoName):
color = np.random.randint(0,255,(100,3))
lastFrame = None
currentFrame = None
lastKeypoints = None
currentKeypoints = None
firstImage = True
featureRefreshRate = 5
feature_params = dict( maxCorners = 100,
qualityLevel = 0.1,
minDistance = 8,
blockSize = 15)
frameCount = 0
Homographies = []
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
cap = cv2.VideoCapture(videoName)
flags, frame = cap.read()
while flags:
if firstImage:
firstImage = False
lastFrame = frame[:,:].copy()
lastGray = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(lastGray, mask = None, **feature_params)
flags, frame = cap.read()
frameCount += 1
else:
mask = np.zeros_like(lastFrame)
currentFrame = frame[:,:].copy()
frameCount += 1
lastKeypoints, currentKeypoints = findNthFeatures(lastFrame, lastKeypoints, currentFrame)
# for i,(new,old) in enumerate(zip(currentKeypoints, lastKeypoints)):
# a, b = new.ravel()
# c, d = old.ravel()
# mask = cv2.line(mask, (a,b), (c,d), color[i].tolist(), 2)
# frame = cv2.circle(frame, (a,b), 5, color[i].tolist(), -1)
# img = cv2.add(frame,mask)
# cv2.imshow('display', img)
# cv2.waitKey(0)
homographyMatrix, homographyStatus = cv2.findHomography(currentKeypoints, lastKeypoints, cv2.RANSAC, 0.5)
Homographies.append(homographyMatrix)
lastFrame = currentFrame
lastKeypoints = currentKeypoints
if frameCount % featureRefreshRate == 0:
grayBuf = cv2.cvtColor(lastFrame, cv2.COLOR_BGR2GRAY)
lastKeypoints = cv2.goodFeaturesToTrack(grayBuf, mask = None, **feature_params)
flags, frame = cap.read()
return Homographies
def stitchRow(videoName):
cv2.namedWindow('display', cv2.WINDOW_NORMAL)
frameCount = 0
cap = cv2.VideoCapture(videoName)
ret, initialImage = cap.read()
homographyMatrices = []
homographyMatrices = getHomographies(videoName)
warpHMat = homographyMatrices[frameCount]
while ret:
ret, nextImg = cap.read()
frameCount += 1
result = cv2.warpPerspective(nextImg, warpHMat, (initialImage.shape[1] + nextImg.shape[1], nextImg.shape[0]))
#result[0:initialImage.shape[0], 0:initialImage.shape[1]] = initialImage
cv2.imshow('display', result)
cv2.waitKey(0)
# cv2.imshow('display', initialImage)
# cv2.waitKey(0)
warpHMat = homographyMatrices[frameCount]
for j in range(frameCount, 0, -1):
warpHMat = warpHMat * homographyMatrices[j-1]
# initialImage = result[:, :].copy()
stitchRow(sys.argv[1])