3d reconstruction and distance measurment - python

hi guys I have been working on a small program in python using the opencv library and two webcams so that I can measure the distance between this last tow and the object right in front of them(using the disparity map) ,so when I run the program at the end I normally I get the result in a matrix but what I get is not only one number(which is supposed to be the distance) but many different numbers,besides I always get one number which doesn't change even if I change the view can any body tell me why?!
here is the code:
import numpy as np
import cv2 as cv
import cv2.cv as cv1
from VideoCapture import Device
import os
def caliLeftCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/img*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsL = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgL = cv.imread(fn, 0)
h, w = imgL.shape[:2]
found, corners = cv.findChessboardCorners(imgL, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgL, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(imgL, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsL.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsL, cameraL_matrix, dist_coefsL, rvecsL, tvecsL = cv.calibrateCamera(obj_points, img_pointsL, (w, h))
print "RMSL:", rmsL
print "Left camera matrix:\n", cameraL_matrix
print "distortion coefficients: ", dist_coefsL.ravel()
newcameramtxL, roi=cv.getOptimalNewCameraMatrix(cameraL_matrix,dist_coefsL,(w,h),1,(w,h))
#undistort
mapxL,mapyL = cv.initUndistortRectifyMap(cameraL_matrix,dist_coefsL,None,newcameramtxL,(w,h),5)
dstL = cv.remap(imgL,mapxL,mapyL,cv.INTER_LINEAR)
return img_pointsL, cameraL_matrix, dist_coefsL
def caliRightCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/ph*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsR = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgR = cv.imread(fn, 0)
h, w = imgR.shape[:2]
found, corners = cv.findChessboardCorners(imgR, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgR, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsR.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsR, cameraR_matrix, dist_coefsR, rvecsR, tvecsR = cv.calibrateCamera(obj_points, img_pointsR, (w, h))
print "RMSR:", rmsR
print "Right camera matrix:\n", cameraR_matrix
print "distortion coefficients: ", dist_coefsR.ravel()
newcameramtxR, roi=cv.getOptimalNewCameraMatrix(cameraR_matrix,dist_coefsR,(w,h),1,(w,h))
# undistort
mapxR,mapyR = cv.initUndistortRectifyMap(cameraR_matrix,dist_coefsR,None,newcameramtxR,(w,h),5)
dstR = cv.remap(imgR,mapxR,mapyR,cv.INTER_LINEAR)
return img_pointsR,obj_points, cameraR_matrix, dist_coefsR
def Pics():
vc = cv.VideoCapture(2)
retVal, frame = vc.read();
while True :
if frame is not None:
imgray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(imgray,127,255,1)
contours, hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
cv.namedWindow("threshold")
cv.namedWindow("Camera")
#cv2.drawContours(frame, contours, -1, (0,255,0), 2)
cv.imshow("Camera", frame)
cv.imshow("threshold", thresh)
rval, frame = vc.read()
if cv.waitKey(1) & 0xFF == 27:
break
cv1.DestroyAllWindows()
def LeftCap():
cam = Device(2)
cam.saveSnapshot('imageL.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageL.jpg'
return fn
def RightCap():
cam = Device(0)
cam.saveSnapshot('imageR.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageR.jpg'
return fn
def Calculate(Li, Ri, Mat):
img_L = cv.pyrDown( cv.imread(Li) )
img_R = cv.pyrDown( cv.imread(Ri) )
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv.StereoSGBM(minDisparity = min_disp,
numDisparities = num_disp,
SADWindowSize = window_size,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32,
disp12MaxDiff = 1,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
fullDP = False
)
print "computing disparity..."
disp = stereo.compute(img_L, img_R).astype(np.float32) / 16.0
print "generating 3d point cloud..."
h, w = img_L.shape[:2]
f = 0.8*w # guess for focal length
points = cv.reprojectImageTo3D(disp, Mat)
colors = cv.cvtColor(img_L, cv.COLOR_BGR2RGB)
mask = disp > disp.min()
cv.imshow('left', img_L)
cv.imshow('disparity', (disp-min_disp)/num_disp)
b=6.50
D=b*f/disp
print "The Distance =", D
cv.waitKey()
cv1.DestroyAllWindows()
if __name__ == '__main__':
import sys, getopt
from glob import glob
Img_pointsL, Cam_MatL, DisL = caliLeftCam()
Img_pointsR,obj_points, Cam_MatR, DisR = caliRightCam()
print "Running stereo calibration..."
retval, Cam_MatL, DisL, Cam_MatR, DisR, R, T, E, F= cv.stereoCalibrate(obj_points, Img_pointsL, Img_pointsR,(384,288))
print"running rectifation..."
RL, Rr, PL, PR, Q, validRoiL, validRoiR = cv.stereoRectify(Cam_MatL, DisL, Cam_MatR, DisR,(384,288), R, T)
Pics()
Li = LeftCap()
Ri = RightCap()
Calculate(Li, Ri, Q)

Not sure if that is a problem, but you call cv.StereoRectify() function using cv2.StereoRectify() not cv2.cv.StefeoRectify() and you provided wrong arguments to it.
From documentation:
cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))
You did it that(wrong) way:
cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2...)

Related

NameError: name 'getCornerInCameraWorld' is not defined

I am working with VSCode 1.68.1, Ubuntu 20.04.
I am following link:
https://courses.ece.cornell.edu/ece5990/ECE5725_Fall2020_Projects/Dec_21_Demo/Drawing%20Robot/eam348_mm2994_W/index.html
from unittest import result
import numpy as np
import time
import cv2
import sys
import cv2.aruco as aruco
import socket
import datetime
import glob
import math
import multiprocessing as mp
port = 30003
IP = '192.11.0.25'
robot_ID = 20185500976
robot = 'ur-20185500976'
marker_dimension =0.06
worldx = 390
worldy = 260
bottom_left = 31 #this is the origin - positivex: towards bottom right - positivey: towards top left
bottom_right = 32
top_left = 9
top_right = 20
#camera dist, matrix and newcameramatrix
dist=np.array(([[5.0164361897882787e-02, 6.6308284023737640e-01, 2.5493975084043882e-03, -6.0403656948007376e-03, -2.9652221208277720e+00]]))
mtx=np.array([[6.1618286891135097e+02, 0., 3.2106366551961219e+02],
[0 , 6.1595924417559945e+02, 2.4165645046034246e+02],
[0. , 0. , 1. ]])
found_dict_pixel_space = {}
found_dict_camera_space = {}
found_dict_world_space = {}
found_dict_homography_space = {}
final_string = ""
originRvec = np.array([0,0,1])
markerRvec= np.array([0,0,0])
def UDP(IP,port,message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #IPv4 DNS server - UDP protocol
sock.sendto(bytes(message, "utf-8"), (IP,port)) #self, data, address
def getMarkerCenter(corners):
px = (corners[0][0] + corners[1][0] + corners[2][0]+ corners[3][0]) * 0.25
py = (corners[0][1] + corners[1][1] + corners[2][1]+ corners[3][1]) * 0.25
return [px,py]
def getMarkerRotation(corners):
unit_x_axis = [1.,0.]
center = getMarkerCenter(corners)
right_edge_midpoint = (corners[1]+corners[2])/2.
unit_vec = (right_edge_midpoint-center)/np.linalg.norm(right_edge_midpoint-center)
angle = np.arccos(np.dot(unit_x_axis,unit_vec))
return angle
def inversePerspective(rvec, tvec):
R, _ = cv2.Rodrigues(rvec)
R = np.array(R).T #this was np.matrix but had error
invTvec = np.dot(-R, np.array(tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def normalize(v):
if np.linalg.norm(v) == 0 : return v
return v / np.linalg.norm(v)
def findWorldCoordinate(originCorners, point):
zero = np.array(originCorners[3]) #bottom left as the origin - check the data structure
print(zero)
x = (np.array(originCorners[0]) - zero) # bottom right - Green Axis -- throw out z
y = (np.array(originCorners[1]) - zero) # top left - Red Axis -- throw out z
x = x[0][0:2]
y = y[0][0:2]
x = normalize(x)
y = normalize(y)
#print("x", x)
vec = (point - zero)[0][0:2]
#print("vec", vec)
vecNormal = normalize(vec)
cosX = np.dot(x,vecNormal)
cosY = np.dot(y,vecNormal)
xW = np.linalg.norm(vec) * cosX
yW = np.linalg.norm(vec) * cosY
return [xW, yW]
cap=cv2.VideoCapture(4)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
while True:
t0 = time.time()
ret, frame = cap.read()
h, w = frame.shape[:2]
#new image size to generate192.0.1.25
h1, w1 = h, w
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w1,h1))
#print(newcameramtx)
#mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w1,h1), 5)
#dst1 = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
x, y, w1, h1 = roi
dst1 = dst1[y:y + h1, x:x + w1]
frame=dst1
t1 = time.time()-t0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
arucoParameters = aruco.DetectorParameters_create()
t2 = time.time()-t0
data = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
t3 = time.time()-t0
corners = data[0]
ids = data[1]
originIDglobal = 0
# If you can't find it, type id
if ids is not None:
t4 = time.time()-t0
result = aruco.estimatePoseSingleMarkers(corners, marker_dimension, newcameramtx, dist)
rvecs = result[0] # rotation vectors of markers
tvecs = result[1] # translation vector of markers
#setting bottom_left as the origin
if bottom_left in ids:
originID = np.where(ids == bottom_left)[0][0]
originIDglobal = originID
else:
originID = originIDglobal
originCorners = corners[originID] # corners of the tag set as the origin
originCornersCamera = getCornerInCameraWorld(marker_dimension, rvecs[originID], tvecs[originID])[0] # origin tag corners in camera space
originRvec = rvecs[originID] # rotation vec of origin tag
originTvec = tvecs[originID] # translation vec of origin tag
display = aruco.drawDetectedMarkers(frame, corners,ids) #Draw a square around the markers
t5 = time.time()-t0
for i in range(len(ids)):
ID = ids[i]
rvec = rvecs[i]
tvec = tvecs[i]
corners4 = corners[i]
display = cv2.drawFrameAxes(frame, newcameramtx, dist, rvec, tvec,0.03)#Draw 3D Axis, 3cm(0.03)
found_dict_pixel_space[""+str(ids[i][0])] = corners4 # put the corners of this tag in the dictionary
# Homography
zero = found_dict_pixel_space[str(bottom_left)][0][3] #bottom left - 3
x = found_dict_pixel_space[str(bottom_right)][0][2] #bottom right - 27
y = found_dict_pixel_space[str(top_left)][0][0] #top left - 22
xy = found_dict_pixel_space[str(top_right)][0][1] #top right - 24
workspace_world_corners = np.array([[0.0, 0.0], [worldx, 0.0], [0.0, worldy], [worldx, worldy]], np.float32) # 4 corners in millimeters
workspace_pixel_corners = np.array([zero,x,y,xy], np.float32) # 4 corners in pixels
# Homography Matrix
h, status = cv2.findHomography(workspace_pixel_corners, workspace_world_corners) #perspective matrix
t6=time.time()-t0
im_out = cv2.warpPerspective(frame, h, (worldx,worldy)) #showing that it works
t7 = time.time()-t0
for i in range(len(ids)):
j = ids[i][0]
corners_pix = found_dict_pixel_space[str(j)]#[0]
corners_pix_transformed = cv2.perspectiveTransform(corners_pix,h)
found_dict_homography_space[str(j)] = corners_pix_transformed
print(found_dict_homography_space)
robot = found_dict_homography_space[str(robot_ID)][0]
print(getMarkerCenter(robot))
cv2.imshow('Warped Source Image', im_out)
t8=time.time()-t0
print("t1: %8.4f t2: %8.4f t3: %8.4f t4: %8.4f t5: %8.4f t6: %8.4f t7: %8.4f t8: %8.4f" %(t1,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))
else:
display = frame
cv2.imshow('Display', display)
# Display result frame
cv2.imshow("frame",frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I am getting Error: "NameError: name 'getCornerInCameraWorld' is not defined"
I am unable to find anything related to getCornerInCameraWorld.
Please provide some help.
I got reply from the author of the code. She confirmed the code was missing some part and provided that.
The code missing was:
def getCornerInCameraWorld(size, rvec, tvec):
half_size = size * 0.5
rotMatrix, jacobian = cv2.Rodrigues(rvec) #convert rot vector from marker space to camera space
X = half_size * rotMatrix[:,0]
Y = half_size * rotMatrix[:,1]
c1 = np.add(np.add(-1*X,Y), tvec) #top left
c2 = np.add(np.add(X, Y), tvec) #top right
c3 = np.add(np.add(X, -1*Y), tvec) # bottom right
c4 = np.add(np.add(-1*X, -1*Y), tvec) # bottom left
cornersInCameraWorld = [c1,c2,c3,c4]
cornersInCameraWorld = np.array(cornersInCameraWorld, dtype=np.float32)
return cornersInCameraWorld, rotMatrix
I have checked and it is working for me

my videowriter in opencv is not saving any output

Using openCV in python to tag an object in a video and write that new output and save as .avi file. Everything is working fine except for the vide writer. I can not get it to write and/or save the output. It looks the same as I've done before so this is driving me insane. Any ideas on what I might be doing wrong are greatly appreciated.
import numpy as np
import cv2
def draw_pyramid(pyramid, H):
pyramid_new = []
i=0
for p in pyramid:
if i !=4:
temp_p = p
temp_p = np.append(temp_p, 1)
M_ext = H[0:3, :]
p = M_ext # temp_p
pyramid_new.append((p[0], p[1], p[2]))
i += 1
else:
pyramid_new.append((p[0], p[1], p[2]))
i +=1
return pyramid_new
cap = cv2.VideoCapture('hw4.avi')
got_image, img = cap.read()
videoWriter = cv2.VideoWriter("switch.avi",
fourcc=cv2.VideoWriter_fourcc('M','J','P','G'),
fps=30.0,
frameSize=(img.shape[1], img.shape[0]))
fx, fy, cx, cy = 675.0, 675.0, 320.0, 240.0
k = np.array([[fx,0.0,cx],[0.0,fy,cy],[0.0,0.0,1.0]])
while True:
got_img, img = cap.read()
if not got_img:
break
else:
gray_image = cv2.cvtColor(src=img,
code=cv2.COLOR_BGR2GRAY) # convert BGR to grayscale
output_thresh, binary_image = cv2.threshold(src=gray_image,
maxval=255,
type=cv2.THRESH_OTSU, # determine threshold automatically from image
thresh=0) # ignore this if using THRESH_OTSU
arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)
corners, ids, _ = cv2.aruco.detectMarkers(image=img,
dictionary=arucoDict)
if ids is not None and corners:
cv2.aruco.drawDetectedMarkers(image=img,
corners=corners,
ids=ids,
borderColor=(0, 255, 0))
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners=corners,
markerLength=2.0,
cameraMatrix=k,
distCoeffs=None)
cv2.aruco.drawAxis(image=img,
cameraMatrix=k,
distCoeffs=None,
rvec=rvecs,
tvec=tvecs,
length=2.0)
if ids[0][0]==1:
pyr = np.array(((-3.5/3,-1.5/3,-1.5/3,-3.5/3,-2.5),
(-1, -1, -1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -5))).T
R = cv2.Rodrigues(rvecs)[0]
r = R[0]
# rvec_m_c = rvecs[0] # This is a 1x3 rotation vector
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[-2.5, -2, -5]]).T
H = np.block([[R, t], [0, 0, 0, 1]])
if ids[0][0] == 0:
pyr = np.array(((1.5/3, 3.5/3, 3.5/3, 1.5/3, 2.5),
(-1,-1,-1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -1))).T
R = cv2.Rodrigues(rvecs)[0]
# r = R[0]
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[2.5, -2, -1]]).T
H = np.block([[R, t], [0,0,0,1]])
pyramid_new = draw_pyramid(pyr, H)
pyramid_new = np.asarray(pyramid_new)
pyramid_new = pyramid_new.reshape(-1, 3)
dist_coeff = np.zeros((4,1))
pyramid_points, jacobian = cv2.projectPoints(pyramid_new,
rvecs,
tvecs,
np.array(k),
distCoeffs=dist_coeff)
pyramid_points = pyramid_points.astype(int)
froml = [1,2,3,0,0,1,2,3]
fromr = [4,4,4,4,1,2,3,0]
color = (0,0,255)
for i, j in zip(froml, fromr):
cv2.line(img, tuple(pyramid_points[i][0]),
tuple(pyramid_points[j][0]), color, 1)
videoWriter.write(img)
cv2.imshow('switch', img)
cv2.waitKey(30)
videoWriter.release()

Creating trail from YOLO v2 + deep_sort object tracking with tensorflow

I'm trying to create trails and map of trails of all ID like this video: https://www.youtube.com/watch?v=tq0BgncuMhs
So far I haven't been able to, I'm currently using this repo from bendidi https://github.com/bendidi/Tracking-with-darkflow which I modifies to also show the trails.
I did try using cv2.line and extract it from track.to_tlbr() but right now the result look like this:
Here's the code that I modified to get the current result:
darkflow/darkflow/net/yolov2/predict.py
from collections import deque
import numpy as np
import math
import cv2
import os
import json
#from scipy.special import expit
#from utils.box import BoundBox, box_iou, prob_compare
#from utils.box import prob_compare2, box_intersection
from ...utils.box import BoundBox
from ...cython_utils.cy_yolo2_findboxes import box_constructor
ds = True
try :
from deep_sort.application_util import preprocessing as prep
from deep_sort.application_util import visualization
from deep_sort.deep_sort.detection import Detection
except :
ds = False
def expit(x):
return 1. / (1. + np.exp(-x))
def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def findboxes(self, net_out):
# meta
meta = self.meta
boxes = list()
boxes=box_constructor(meta,net_out)
return boxes
def extract_boxes(self,new_im):
cont = []
new_im=new_im.astype(np.uint8)
ret, thresh=cv2.threshold(new_im, 127, 255, 0)
p, contours, hierarchy=cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt=contours[i]
x, y, w, h=cv2.boundingRect(cnt)
if w*h > 30**2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])):
if self.FLAGS.tracker == "sort":
cont.append([x, y, x+w, y+h])
else : cont.append([x, y, w, h])
return cont
def postprocess(self,net_out, im,frame_id = 0,csv_file=None,csv=None,mask = None,encoder=None,tracker=None):
"""
Takes net output, draw net_out, save to disk
"""
boxes = self.findboxes(net_out)
# meta
meta = self.meta
nms_max_overlap = 0.1
threshold = meta['thresh']
colors = meta['colors']
labels = meta['labels']
if type(im) is not np.ndarray:
imgcv = cv2.imread(im)
else: imgcv = im
h, w, _ = imgcv.shape
thick = int((h + w) // 300)
resultsForJSON = []
if not self.FLAGS.track :
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if self.FLAGS.json:
resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}})
continue
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv,
(left, top), (right, bot),
colors[max_indx], thick)
cv2.putText(imgcv, mess, (left, top - 12),
0, 1e-3 * h, colors[max_indx],thick//3)
else :
if not ds :
print("ERROR : deep sort or sort submodules not found for tracking please run :")
print("\tgit submodule update --init --recursive")
print("ENDING")
exit(1)
detections = []
scores = []
lines = deque(maxlen=64)
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if mess not in self.FLAGS.trackObj :
continue
if self.FLAGS.tracker == "deep_sort":
detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))
scores.append(confidence)
elif self.FLAGS.tracker == "sort":
detections.append(np.array([left,top,right,bot]).astype(np.float64))
if len(detections) < 3 and self.FLAGS.BK_MOG:
detections = detections + extract_boxes(self,mask)
detections = np.array(detections)
if detections.shape[0] == 0 :
return imgcv
if self.FLAGS.tracker == "deep_sort":
scores = np.array(scores)
features = encoder(imgcv, detections.copy())
detections = [
Detection(bbox, score, feature) for bbox,score, feature in
zip(detections,scores, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict()
tracker.update(detections)
trackers = tracker.tracks
elif self.FLAGS.tracker == "sort":
trackers = tracker.update(detections)
for track in trackers:
if self.FLAGS.tracker == "deep_sort":
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
id_num = str(track.track_id)
elif self.FLAGS.tracker == "sort":
bbox = [int(track[0]),int(track[1]),int(track[2]),int(track[3])]
id_num = str(int(track[4]))
if self.FLAGS.csv:
csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])
csv_file.flush()
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
(255,255,255), thick//3)
cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
return imgcv
Or just the code that I added:
...
lines = deque(maxlen=64)
...
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
...
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
Could someone help me out on this? Or should I do something with the data first instead of plug in straight into cv2.line? If you also have any suggestion for using external software rather than using Python, that's also welcome (I have frame_id, track_id, x, y, w, h data)

Calculating the distance and yaw between ArUco marker and camera?

I am trying to calculate the exact( 3 cm error rate is acceptable ) distance between aruco marker and camera. I use python, opencv and aruco. I can detect them ( marker side is 0.023 meters which is 2.3 cm ) but I can't interpret the distance because for 40 cm distance the norm of the translation vector gives 1 meter. I am so confused about this. Can anyone help? Full code ( sorry , not documented well ):
import numpy as np
import cv2
import cv2.aruco as aruco
import glob
import argparse
import math
# Marker id infos. Global to access everywhere. It is unnecessary to change it to local.
firstMarkerID = None
secondMarkerID = None
cap = cv2.VideoCapture(0)
image_width = 0
image_height = 0
#hyper parameters
distanceBetweenTwoMarkers = 0.0245 # in meters, 2.45 cm
oneSideOfTheMarker = 0.023 # in meters, 2.3 cm
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def calibrate(dirpath):
""" Apply camera calibration operation for images in the given directory path. """
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(dirpath+'/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return [ret, mtx, dist, rvecs, tvecs]
def saveCoefficients(mtx, dist, path):
""" Save the camera matrix and the distortion coefficients to given path/file. """
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)
cv_file.write("camera_matrix", mtx)
cv_file.write("dist_coeff", dist)
# note you *release* you don't close() a FileStorage object
cv_file.release()
def loadCoefficients(path):
""" Loads camera matrix and distortion coefficients. """
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode("camera_matrix").mat()
dist_matrix = cv_file.getNode("dist_coeff").mat()
# Debug: print the values
# print("camera_matrix : ", camera_matrix.tolist())
# print("dist_matrix : ", dist_matrix.tolist())
cv_file.release()
return [camera_matrix, dist_matrix]
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
rvec, tvec = rvec.reshape((3, 1)), tvec.reshape((3, 1))
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
invTvec = invTvec.reshape((3, 1))
invTvec = invTvec.reshape((3, 1))
return invRvec, invTvec
def make_1080p():
global image_width
global image_height
image_width = 1920
image_height = 1080
change_res(image_width, image_height)
def make_720p():
global image_width
global image_height
image_width = 1280
image_height = 720
change_res(image_width, image_height)
def make_480p():
global image_width
global image_height
image_width = 640
image_height = 480
change_res(image_width, image_height)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
def euclideanDistanceOfTvecs(tvec1, tvec2):
return math.sqrt(math.pow(tvec1[0]-tvec2[0], 2) + math.pow(tvec1[1]-tvec2[1], 2) + math.pow(tvec1[2]-tvec2[2], 2))
def euclideanDistanceOfTvec(tvec):
return euclideanDistanceOfTvecs(tvec, [0, 0, 0])
def draw(img, imgpts, color):
""" draw a line between given two points. """
imgpts = np.int32(imgpts).reshape(-1, 2)
for pointf in range(len(imgpts)):
for points in range(len(imgpts)):
img = cv2.line(img, tuple(imgpts[pointf]), tuple(imgpts[points]), color, 3)
return img
def track(matrix_coefficients, distortion_coefficients):
global image_width
global image_height
""" Real time ArUco marker tracking. """
needleComposeRvec, needleComposeTvec = None, None # Composed for needle
ultraSoundComposeRvec, ultraSoundComposeTvec = None, None # Composed for ultrasound
savedNeedleRvec, savedNeedleTvec = None, None # Pure Composed
savedUltraSoundRvec, savedUltraSoundTvec = None, None # Pure Composed
TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None
make_480p()
while True:
isFirstMarkerDetected = False
isSecondMarkerDetected = False
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250) # Use 5x5 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
if np.all(ids is not None): # If there are markers found by detector
zipped = zip(ids, corners)
ids, corners = zip(*(sorted(zipped)))
# print(ids)
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], oneSideOfTheMarker, matrix_coefficients,
distortion_coefficients)
if ids[i] == firstMarkerID:
firstRvec = rvec
firstTvec = tvec
isFirstMarkerDetected = True
firstMarkerCorners = corners[i]
elif ids[i] == secondMarkerID:
secondRvec = rvec
secondTvec = tvec
isSecondMarkerDetected = True
secondMarkerCorners = corners[i]
(rvec - tvec).any() # get rid of that nasty numpy value array error
# aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
''' First try
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
info = cv2.composeRT(composedRvec, composedTvec, secondRvec.T, secondTvec.T)
composedRvec, composedTvec = info[0], info[1]
composedRvec, composedTvec = composedRvec.T, composedTvec.T
differenceRvec, differenceTvec = composedRvec-secondRvec, composedTvec-secondTvec
# print infos
print("composed Rvec: ", composedRvec)
print("composed Tvec: ", composedTvec)
print("Second marker Rvec: ", secondRvec)
print("Second marker Tvec: ", secondTvec)
print("differenceRvec: ", differenceRvec)
print("differenceTvec: ", differenceTvec)
print("real difference: ", euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0]))
# draw axis to estimated location
aruco.drawAxis(frame, mtx, dist, composedRvec, composedTvec, 0.0115)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
difference = euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / (secondTvec[0][0][2] - firstTvec[0][0][2]))
print(calculatedDistance)
'''
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
camerafirstRvec, cameraFirstTvec = inversePerspective(firstRvec, firstTvec)
camerasecondRvec, camerasecondTvec = inversePerspective(secondRvec, secondTvec)
differenceRvec, differenceTvec = camerafirstRvec - camerasecondRvec, cameraFirstTvec - camerasecondTvec
# print infos
print("first Rvec: ", camerafirstRvec)
print("first Tvec: ", cameraFirstTvec)
print("Second marker Rvec: ", camerasecondRvec)
print("Second marker Tvec: ", camerasecondTvec)
# print("differenceRvec: ", differenceRvec)
# print("differenceTvec: ", differenceTvec)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
print(cv2.norm(secondTvec[0][0]))
difference = euclideanDistanceOfTvecs(composedTvec.T[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
# print(calculatedDistance)
# Display the resulting frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame', image_width, image_height)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3) & 0xFF
if key == ord('q'): # Quit
break
elif key == ord('p'): # print necessary information here
pass # Insert necessary print here
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Aruco Marker Tracking')
parser.add_argument('--coefficients', metavar='bool', required=True,
help='File name for matrix coefficients and distortion coefficients')
parser.add_argument('--firstMarker', metavar='int', required=True,
help='first')
parser.add_argument('--secondMarker', metavar='int', required=True,
help='second')
# Parse the arguments and take action for that.
args = parser.parse_args()
firstMarkerID = int(args.firstMarker)
secondMarkerID = int(args.secondMarker)
if args.coefficients == '1':
mtx, dist = loadCoefficients("test.yaml")
ret = True
else:
ret, mtx, dist, rvecs, tvecs = calibrate("calib_images")
saveCoefficients(mtx, dist, "calibrationCoefficients.yaml")
print("Calibration is completed. Starting tracking sequence.")
if ret:
track(mtx, dist)
I got the answer. Problem is in the calibration. When calibration with chessboard, I gave the object points like this:
(0,0,0), (1,0,0) and so on.
The thing is when doing pose estimation, camera should be calibrated nicely. My chessboard square size was 1.5 centimeters which means 0.015 meters. I changed the object point matrix as:
(0,0,0), (0.015,0,0) and so on.
So I said to program that the calibration should be in meters. If you do the calibration with different object points matrix than it should be, the pose estimation fails. That was included in the opencv documentation but I couldn't see it. In the docs it was said like "you can pass them like that." and I didn't think that it fails at the pose estimation.

Camera Calibration (cv.calibrateCamera and cv.InitUndistortMap)

The cv.InitUndistortMap opencv function is not working.
I get the intrinsic and distortion matrix, but I can not use this values to fix my image.
I get this error:
cv.InitUndistortMap(camera_matrix,distortion_coefs,mapx,mapy)
TypeError: Argument 'cameraMatrix' must be CvMat. Use fromarray() to convert numpy arrays to CvMat
import numpy as np
import cv2
import cv
import os
import sys, getopt
from glob import glob
def calibracion():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
img_mask = 'left*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (9, 6)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = 0, 0
for fn in img_names:
print 'processing %s...' % fn,
img = cv2.imread(fn, 0)
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print 'chessboard not found'
continue
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h))
cv2.destroyAllWindows()
return camera_matrix, dist_coefs
if __name__ == '__main__':
camera_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
dist_coefs = cv.CreateMat(4, 1, cv.CV_32FC1)
camera_matrix , dist_coefs = calibracion()
print "camera matrix:\n", camera_matrix # intrinsic
print "distortion coefficients: ", dist_coefs # distortion
image=cv.LoadImage('dog.jpeg')
mapx = cv.CreateImage( cv.GetSize(image), cv.IPL_DEPTH_8U, 1 )
mapy = cv.CreateImage( cv.GetSize(image), cv.IPL_DEPTH_8U, 1 )
cv.InitUndistortMap(camera_matrix,dist_coefs,mapx,mapy)
t = cv.CloneImage(image)
cv.Remap( t, image, mapx, mapy )
cv.ShowImage('FOTO', t )
cv.WaitKey(0)
I couldn't run the calibration function, but the error, i think, is that you're replacing the value of the variable camera_matrix here: camera_matrix , dist_coefs = calibracion().
camera_matrix needs to be a CvMat like here: camera_matrix = cv.CreateMat(3, 3, cv.CV_32FC1). If you comment this line of code: camera_matrix , dist_coefs = calibracion() you won't have that error message displayed again.
In a nut shell: fix the calibration function

Categories