Camera Calibration (cv.calibrateCamera and cv.InitUndistortMap) - python

The cv.InitUndistortMap opencv function is not working.
I get the intrinsic and distortion matrix, but I can not use this values to fix my image.
I get this error:
cv.InitUndistortMap(camera_matrix,distortion_coefs,mapx,mapy)
TypeError: Argument 'cameraMatrix' must be CvMat. Use fromarray() to convert numpy arrays to CvMat
import numpy as np
import cv2
import cv
import os
import sys, getopt
from glob import glob
def calibracion():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
img_mask = 'left*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (9, 6)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_points = []
h, w = 0, 0
for fn in img_names:
print 'processing %s...' % fn,
img = cv2.imread(fn, 0)
h, w = img.shape[:2]
found, corners = cv2.findChessboardCorners(img, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(img, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print 'chessboard not found'
continue
img_points.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rms, camera_matrix, dist_coefs, rvecs, tvecs = cv2.calibrateCamera(obj_points, img_points, (w, h))
cv2.destroyAllWindows()
return camera_matrix, dist_coefs
if __name__ == '__main__':
camera_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
dist_coefs = cv.CreateMat(4, 1, cv.CV_32FC1)
camera_matrix , dist_coefs = calibracion()
print "camera matrix:\n", camera_matrix # intrinsic
print "distortion coefficients: ", dist_coefs # distortion
image=cv.LoadImage('dog.jpeg')
mapx = cv.CreateImage( cv.GetSize(image), cv.IPL_DEPTH_8U, 1 )
mapy = cv.CreateImage( cv.GetSize(image), cv.IPL_DEPTH_8U, 1 )
cv.InitUndistortMap(camera_matrix,dist_coefs,mapx,mapy)
t = cv.CloneImage(image)
cv.Remap( t, image, mapx, mapy )
cv.ShowImage('FOTO', t )
cv.WaitKey(0)

I couldn't run the calibration function, but the error, i think, is that you're replacing the value of the variable camera_matrix here: camera_matrix , dist_coefs = calibracion().
camera_matrix needs to be a CvMat like here: camera_matrix = cv.CreateMat(3, 3, cv.CV_32FC1). If you comment this line of code: camera_matrix , dist_coefs = calibracion() you won't have that error message displayed again.
In a nut shell: fix the calibration function

Related

my videowriter in opencv is not saving any output

Using openCV in python to tag an object in a video and write that new output and save as .avi file. Everything is working fine except for the vide writer. I can not get it to write and/or save the output. It looks the same as I've done before so this is driving me insane. Any ideas on what I might be doing wrong are greatly appreciated.
import numpy as np
import cv2
def draw_pyramid(pyramid, H):
pyramid_new = []
i=0
for p in pyramid:
if i !=4:
temp_p = p
temp_p = np.append(temp_p, 1)
M_ext = H[0:3, :]
p = M_ext # temp_p
pyramid_new.append((p[0], p[1], p[2]))
i += 1
else:
pyramid_new.append((p[0], p[1], p[2]))
i +=1
return pyramid_new
cap = cv2.VideoCapture('hw4.avi')
got_image, img = cap.read()
videoWriter = cv2.VideoWriter("switch.avi",
fourcc=cv2.VideoWriter_fourcc('M','J','P','G'),
fps=30.0,
frameSize=(img.shape[1], img.shape[0]))
fx, fy, cx, cy = 675.0, 675.0, 320.0, 240.0
k = np.array([[fx,0.0,cx],[0.0,fy,cy],[0.0,0.0,1.0]])
while True:
got_img, img = cap.read()
if not got_img:
break
else:
gray_image = cv2.cvtColor(src=img,
code=cv2.COLOR_BGR2GRAY) # convert BGR to grayscale
output_thresh, binary_image = cv2.threshold(src=gray_image,
maxval=255,
type=cv2.THRESH_OTSU, # determine threshold automatically from image
thresh=0) # ignore this if using THRESH_OTSU
arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)
corners, ids, _ = cv2.aruco.detectMarkers(image=img,
dictionary=arucoDict)
if ids is not None and corners:
cv2.aruco.drawDetectedMarkers(image=img,
corners=corners,
ids=ids,
borderColor=(0, 255, 0))
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners=corners,
markerLength=2.0,
cameraMatrix=k,
distCoeffs=None)
cv2.aruco.drawAxis(image=img,
cameraMatrix=k,
distCoeffs=None,
rvec=rvecs,
tvec=tvecs,
length=2.0)
if ids[0][0]==1:
pyr = np.array(((-3.5/3,-1.5/3,-1.5/3,-3.5/3,-2.5),
(-1, -1, -1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -5))).T
R = cv2.Rodrigues(rvecs)[0]
r = R[0]
# rvec_m_c = rvecs[0] # This is a 1x3 rotation vector
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[-2.5, -2, -5]]).T
H = np.block([[R, t], [0, 0, 0, 1]])
if ids[0][0] == 0:
pyr = np.array(((1.5/3, 3.5/3, 3.5/3, 1.5/3, 2.5),
(-1,-1,-1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -1))).T
R = cv2.Rodrigues(rvecs)[0]
# r = R[0]
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[2.5, -2, -1]]).T
H = np.block([[R, t], [0,0,0,1]])
pyramid_new = draw_pyramid(pyr, H)
pyramid_new = np.asarray(pyramid_new)
pyramid_new = pyramid_new.reshape(-1, 3)
dist_coeff = np.zeros((4,1))
pyramid_points, jacobian = cv2.projectPoints(pyramid_new,
rvecs,
tvecs,
np.array(k),
distCoeffs=dist_coeff)
pyramid_points = pyramid_points.astype(int)
froml = [1,2,3,0,0,1,2,3]
fromr = [4,4,4,4,1,2,3,0]
color = (0,0,255)
for i, j in zip(froml, fromr):
cv2.line(img, tuple(pyramid_points[i][0]),
tuple(pyramid_points[j][0]), color, 1)
videoWriter.write(img)
cv2.imshow('switch', img)
cv2.waitKey(30)
videoWriter.release()

OpenCV Error: Expected Ptr<cv::UMat> for argument '%s'

I am new to OpenCV, and attempting to use OpenCV for Python 3.7 to stitch five images together into one singular image. The source code I originally used only allowed for two images to be stitched together, so I had to modify it to allow five webcam images to be stitched. However, I am presented with this error:
Traceback (most recent call last):
File "C:\Users\colby\OneDrive\Desktop\New folder\photomosaic.py", line 80, in <module>
img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg, kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
TypeError: Expected Ptr<cv::UMat> for argument '%s'
My Code:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import imageio
import imutils
import cameracapture
cv2.ocl.setUseOpenCL(False)
feature_extractor = 'orb' # one of 'sift', 'surf', 'brisk', 'orb'
feature_matching = 'bf'
firstImg = imageio.imread('cap1.jpg')
firstImg_gray = cv2.cvtColor(firstImg, cv2.COLOR_RGB2GRAY)
secondImg = imageio.imread('cap2.jpg')
secondImg_gray = cv2.cvtColor(secondImg, cv2.COLOR_RGB2GRAY)
thirdImg = imageio.imread('cap3.jpg')
thirdImg_gray = cv2.cvtColor(thirdImg, cv2.COLOR_RGB2GRAY)
forthImg = imageio.imread('cap4.jpg')
forthImg_gray = cv2.cvtColor(forthImg, cv2.COLOR_RGB2GRAY)
fifthImg = imageio.imread('cap5.jpg')
fifthImg_gray = cv2.cvtColor(fifthImg, cv2.COLOR_RGB2GRAY)
plt.show()
def detectAndDescribe(image, method=None):
assert method is not None, "You need to define a feature detection
method. Values are: 'sift', 'surf'"
if method == 'sift':
descriptor = cv2.xfeatures2d.SIFT_create()
elif method == 'surf':
descriptor = cv2.xfeatures2d.SURF_create()
elif method == 'brisk':
descriptor = cv2.BRISK_create()
elif method == 'orb':
descriptor = cv2.ORB_create()
# get keypoints and descriptors
(kps, features) = descriptor.detectAndCompute(image, None)
return (kps, features)
kpsA, featuresA = detectAndDescribe(firstImg_gray, method=feature_extractor)
kpsB, featuresB = detectAndDescribe(secondImg_gray, method=feature_extractor)
kpsC, featuresC = detectAndDescribe(thirdImg_gray, method=feature_extractor)
kpsD, featuresD = detectAndDescribe(forthImg_gray, method=feature_extractor)
kpsE, featuresE = detectAndDescribe(fifthImg_gray, method=feature_extractor)
def createMatcher(method,crossCheck):
if method == 'sift' or method == 'surf':
bf = cv2.BFMatcher(cv2.NORM_L2, crossCheck=crossCheck)
elif method == 'orb' or method == 'brisk':
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=crossCheck)
return bf
def matchKeyPointsBF(featuresA, featuresB, featuresC, featuresD, featuresE, method):
bf = createMatcher(method, crossCheck=True)
# Match descriptors.
best_matches = bf.match(featuresA,featuresB)
best_matchs = bf.match(featuresA,featuresB + featuresC + featuresD + featuresE)
# Sort the features in order of distance.
# The points with small distance (more similarity) are ordered first in the vector
rawMatches = sorted(best_matches, key = lambda x:x.distance)
print("Raw matches (Brute force):", len(rawMatches))
return rawMatches
def matchKeyPointsKNN(featuresA, featuresB, featuresC, featuresD, featuresE, ratio, method):
bf = createMatcher(method, crossCheck=False)
# compute the raw matches and initialize the list of actual matches
rawMatches = bf.knnMatch(featuresA,featuresB, featuresC, featuresD, featuresE, 2)
print("Raw matches (knn):", len(rawMatches))
matches = []
# loop over the raw matches
for m,n in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if m.distance < n.distance * ratio:
matches.append(m)
return matches
print("Using: {} feature matcher".format(feature_matching))
fig = plt.figure(figsize=(20,8))
if feature_matching == 'bf':
matches = matchKeyPointsBF(featuresA,featuresB, featuresC, featuresD,
featuresE, method=feature_extractor)
img3 = cv2.drawMatches(kpsA,secondImg,kpsB,thirdImg, kpsC, forthImg,
kpsD, fifthImg, kpsE, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
elif feature_matching == 'knn':
matches = matchKeyPointsKNN(featuresA,featuresB, featuresC, featuresD,
featuresE, ratio=0.75, method=feature_extractor)
img3 = cv2.drawMatches(firstImg,kpsA,secondImg,kpsB,thirdImg, kpsC,
forthImg, kpsD, fifthImg, kpsE, np.random.choice(matches,100),
None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(img3)
plt.show()
def getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB,
featuresC, featuresD, featuresE, matches, reprojThresh):
# convert the keypoints to numpy arrays
kpsA = np.float32([kp.pt for kp in kpsA])
kpsB = np.float32([kp.pt for kp in kpsB])
kpsC = np.float32([kp.pt for kp in kpsC])
kpsD = np.float32([kp.pt for kp in kpsD])
kpsE = np.float32([kp.pt for kp in kpsE])
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[m.firstIdx] for m in matches])
ptsB = np.float32([kpsB[m.secondIdx] for m in matches])
ptsC = np.float32([kpsC[m.thirdIdx] for m in matches])
ptsD = np.float32([kpsD[m.forthIdx] for m in matches])
ptsE = np.float32([kpsE[m.fifthIdx] for m in matches])
# estimate the homography between the sets of points
(H, status) = cv2.findHomography(ptsA,ptsB,ptsC,ptsD,ptsE, cv2.RANSAC, reprojThresh)
return (matches, H, status)
else:
return None
M = getHomography(kpsA, kpsB, kpsC, kpsD, kpsE, featuresA, featuresB,
featuresC, featuresD, featuresE, matches, reprojThresh=4)
if M is None:
print("Error!")
(matches, H, status) = M
print(H)
# Apply panorama correction
width = trainImg.shape[1] + queryImg.shape[1]
height = trainImg.shape[0] + queryImg.shape[0]
result = cv2.warpPerspective(trainImg, H, (width, height))
result[0:queryImg.shape[0], 0:queryImg.shape[1]] = queryImg
plt.figure(figsize=(20,10))
plt.imshow(result)
plt.axis('off')
plt.show()
# transform the panorama image to grayscale and threshold it
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# Finds contours from the binary image
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# get the maximum contour area
c = max(cnts, key=cv2.contourArea)
# get a bbox from the contour area
(x, y, w, h) = cv2.boundingRect(c)
# crop the image to the bbox coordinates
result = result[y:y + h, x:x + w]
# show the cropped image
plt.figure(figsize=(20,10))
plt.imshow(result)
The expected output:
I expect the program to be able to take five pictures with my webcam (which is imported through cameracapture), and then stitch them together to make one image. Any advice or fixes to this error would be greatly appreciated.
firstImg = imageio.imread('cap1.jpg')
print(type(firstImg))
if the output is
<class 'PIL.JpegImagePlugin.JpegImageFile'>
Then covert it to an array using:-
firstImg = np.asarray(firstImg)
Load this firstImg to convert it to gray scale image

Calculating the distance and yaw between ArUco marker and camera?

I am trying to calculate the exact( 3 cm error rate is acceptable ) distance between aruco marker and camera. I use python, opencv and aruco. I can detect them ( marker side is 0.023 meters which is 2.3 cm ) but I can't interpret the distance because for 40 cm distance the norm of the translation vector gives 1 meter. I am so confused about this. Can anyone help? Full code ( sorry , not documented well ):
import numpy as np
import cv2
import cv2.aruco as aruco
import glob
import argparse
import math
# Marker id infos. Global to access everywhere. It is unnecessary to change it to local.
firstMarkerID = None
secondMarkerID = None
cap = cv2.VideoCapture(0)
image_width = 0
image_height = 0
#hyper parameters
distanceBetweenTwoMarkers = 0.0245 # in meters, 2.45 cm
oneSideOfTheMarker = 0.023 # in meters, 2.3 cm
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def calibrate(dirpath):
""" Apply camera calibration operation for images in the given directory path. """
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(dirpath+'/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return [ret, mtx, dist, rvecs, tvecs]
def saveCoefficients(mtx, dist, path):
""" Save the camera matrix and the distortion coefficients to given path/file. """
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)
cv_file.write("camera_matrix", mtx)
cv_file.write("dist_coeff", dist)
# note you *release* you don't close() a FileStorage object
cv_file.release()
def loadCoefficients(path):
""" Loads camera matrix and distortion coefficients. """
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode("camera_matrix").mat()
dist_matrix = cv_file.getNode("dist_coeff").mat()
# Debug: print the values
# print("camera_matrix : ", camera_matrix.tolist())
# print("dist_matrix : ", dist_matrix.tolist())
cv_file.release()
return [camera_matrix, dist_matrix]
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
rvec, tvec = rvec.reshape((3, 1)), tvec.reshape((3, 1))
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
invTvec = invTvec.reshape((3, 1))
invTvec = invTvec.reshape((3, 1))
return invRvec, invTvec
def make_1080p():
global image_width
global image_height
image_width = 1920
image_height = 1080
change_res(image_width, image_height)
def make_720p():
global image_width
global image_height
image_width = 1280
image_height = 720
change_res(image_width, image_height)
def make_480p():
global image_width
global image_height
image_width = 640
image_height = 480
change_res(image_width, image_height)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
def euclideanDistanceOfTvecs(tvec1, tvec2):
return math.sqrt(math.pow(tvec1[0]-tvec2[0], 2) + math.pow(tvec1[1]-tvec2[1], 2) + math.pow(tvec1[2]-tvec2[2], 2))
def euclideanDistanceOfTvec(tvec):
return euclideanDistanceOfTvecs(tvec, [0, 0, 0])
def draw(img, imgpts, color):
""" draw a line between given two points. """
imgpts = np.int32(imgpts).reshape(-1, 2)
for pointf in range(len(imgpts)):
for points in range(len(imgpts)):
img = cv2.line(img, tuple(imgpts[pointf]), tuple(imgpts[points]), color, 3)
return img
def track(matrix_coefficients, distortion_coefficients):
global image_width
global image_height
""" Real time ArUco marker tracking. """
needleComposeRvec, needleComposeTvec = None, None # Composed for needle
ultraSoundComposeRvec, ultraSoundComposeTvec = None, None # Composed for ultrasound
savedNeedleRvec, savedNeedleTvec = None, None # Pure Composed
savedUltraSoundRvec, savedUltraSoundTvec = None, None # Pure Composed
TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None
make_480p()
while True:
isFirstMarkerDetected = False
isSecondMarkerDetected = False
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250) # Use 5x5 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
if np.all(ids is not None): # If there are markers found by detector
zipped = zip(ids, corners)
ids, corners = zip(*(sorted(zipped)))
# print(ids)
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], oneSideOfTheMarker, matrix_coefficients,
distortion_coefficients)
if ids[i] == firstMarkerID:
firstRvec = rvec
firstTvec = tvec
isFirstMarkerDetected = True
firstMarkerCorners = corners[i]
elif ids[i] == secondMarkerID:
secondRvec = rvec
secondTvec = tvec
isSecondMarkerDetected = True
secondMarkerCorners = corners[i]
(rvec - tvec).any() # get rid of that nasty numpy value array error
# aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
''' First try
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
info = cv2.composeRT(composedRvec, composedTvec, secondRvec.T, secondTvec.T)
composedRvec, composedTvec = info[0], info[1]
composedRvec, composedTvec = composedRvec.T, composedTvec.T
differenceRvec, differenceTvec = composedRvec-secondRvec, composedTvec-secondTvec
# print infos
print("composed Rvec: ", composedRvec)
print("composed Tvec: ", composedTvec)
print("Second marker Rvec: ", secondRvec)
print("Second marker Tvec: ", secondTvec)
print("differenceRvec: ", differenceRvec)
print("differenceTvec: ", differenceTvec)
print("real difference: ", euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0]))
# draw axis to estimated location
aruco.drawAxis(frame, mtx, dist, composedRvec, composedTvec, 0.0115)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
difference = euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / (secondTvec[0][0][2] - firstTvec[0][0][2]))
print(calculatedDistance)
'''
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
camerafirstRvec, cameraFirstTvec = inversePerspective(firstRvec, firstTvec)
camerasecondRvec, camerasecondTvec = inversePerspective(secondRvec, secondTvec)
differenceRvec, differenceTvec = camerafirstRvec - camerasecondRvec, cameraFirstTvec - camerasecondTvec
# print infos
print("first Rvec: ", camerafirstRvec)
print("first Tvec: ", cameraFirstTvec)
print("Second marker Rvec: ", camerasecondRvec)
print("Second marker Tvec: ", camerasecondTvec)
# print("differenceRvec: ", differenceRvec)
# print("differenceTvec: ", differenceTvec)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
print(cv2.norm(secondTvec[0][0]))
difference = euclideanDistanceOfTvecs(composedTvec.T[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
# print(calculatedDistance)
# Display the resulting frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame', image_width, image_height)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3) & 0xFF
if key == ord('q'): # Quit
break
elif key == ord('p'): # print necessary information here
pass # Insert necessary print here
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Aruco Marker Tracking')
parser.add_argument('--coefficients', metavar='bool', required=True,
help='File name for matrix coefficients and distortion coefficients')
parser.add_argument('--firstMarker', metavar='int', required=True,
help='first')
parser.add_argument('--secondMarker', metavar='int', required=True,
help='second')
# Parse the arguments and take action for that.
args = parser.parse_args()
firstMarkerID = int(args.firstMarker)
secondMarkerID = int(args.secondMarker)
if args.coefficients == '1':
mtx, dist = loadCoefficients("test.yaml")
ret = True
else:
ret, mtx, dist, rvecs, tvecs = calibrate("calib_images")
saveCoefficients(mtx, dist, "calibrationCoefficients.yaml")
print("Calibration is completed. Starting tracking sequence.")
if ret:
track(mtx, dist)
I got the answer. Problem is in the calibration. When calibration with chessboard, I gave the object points like this:
(0,0,0), (1,0,0) and so on.
The thing is when doing pose estimation, camera should be calibrated nicely. My chessboard square size was 1.5 centimeters which means 0.015 meters. I changed the object point matrix as:
(0,0,0), (0.015,0,0) and so on.
So I said to program that the calibration should be in meters. If you do the calibration with different object points matrix than it should be, the pose estimation fails. That was included in the opencv documentation but I couldn't see it. In the docs it was said like "you can pass them like that." and I didn't think that it fails at the pose estimation.

OpenCV: Error (-215) depth == CV_8U || depth == CV_16U|| depth == CV_32F in function cv:::cvtColor

Doing a motion capture program, ran into this beautiful error. It appears that there is a problem with the depth, however nowhere in the code should this be a problem.
in get_x_centroid
grayscale_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
error: C:\projects\opencv-
python\opencv\modules\imgproc\src\color.cpp:10600: error: (-215) depth
== CV_8U || depth == CV_16U || depth == CV_32F in function cv::cvtColor
I researched this error but no one had a solution for live video feed. Please help, this is due soon. Will attach all of the code below.
import time
import cv2
import numpy as np
cap =cv2.VideoCapture(0)
def on_mouse(event, x, y, flags, params):
if event == cv2.EVENT_LBUTTONDOWN:
# print "Left mouse button clicked at: x =", x, ",y=", y
hsv = hsv_img[y,x]
#print "hue:", hsv_img[0],"saturation:", hsv[1],"value:",hsv[2]
cv2.namedWindow("Orginal")
cv2.namedWindow("Red")
cv2.namedWindow("Blue")
cv2.namedWindow("Green")
cv2.setMouseCallback("Original", on_mouse, param = None)
#Find the actual numbers,, captureimage3 has the clicky
lower_b = [163,180,183]
upper_b = [129,132,255]
lower_g = [109,93,100]
upper_g = [95,98,255]
lower_r = [97,10,24]
upper_r = [180,140,255]
lower_R = [0,70,195]
upper_R = [20,140,255]
lower_y = [122,113,116]
upper_y = [170,65, 255]
lower_b = np.array(lower_b,dtype = "uint8")
upper_b = np.array(upper_b,dtype = "uint8")
lower_g = np.array(lower_g,dtype = "uint8")
upper_g = np.array(upper_g,dtype = "uint8")
lower_r = np.array(lower_r,dtype = "uint8")
upper_r = np.array(upper_r,dtype = "uint8")
lower_R = np.array(lower_R,dtype = "uint8")
upper_R = np.array(upper_R,dtype = "uint8")
lower_y = np.array(lower_y,dtype = "uint8")
upper_y = np.array(upper_y,dtype = "uint8")
kernel = np.ones((0,0), np.uint8)
def show_filtered_image(window_name,lower,upper):
new_mask = cv2.inRange(hsv_img,lower,upper)
filtered_image = cv2.bitwise_or(frame, frame, mask = new_mask)
eroded_image = cv2.erode(filtered_image, kernel, iterations=1)
dilated_image = cv2.dilate(eroded_image, kernel, iterations=1)
cv2.imshow(window_name, dilated_image)
def get_x_centroid(image):
grayscale_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
moment_values = cv2.moments(grayscale_image)
if moment_values["m00"] !=0:
x_centroid = moment_values ["m10"]/moment_values["m00"]
else:
x_centroid = 1
return x_centroid
def get_y_centroid(image):
grayscale_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
moment_values = cv2.moments(grayscale_image)
if moment_values["m00"] !=0:
y_centroid = moment_values ["m01"]/moment_values["m00"]
else:
y_centroid = 1
return y_centroid
k = cv2.waitKey(5) & 0xFF
while (k != 27):
ret, frame = cap.read()
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imshow("Original", frame)
show_filtered_image("Blue", lower_b, upper_b)
show_filtered_image("Green", lower_g, upper_g)
show_filtered_image("Yellow", lower_y, upper_y)
blue_mask = cv2.inRange(hsv_img, lower_b, upper_b)
filtered_b = cv2.bitwise_or(frame, frame, mask = blue_mask)
green_mask = cv2.inRange(hsv_img, lower_g, upper_g)
filtered_g = cv2.bitwise_or(frame, frame, mask = green_mask)
yellow_mask = cv2.inRange(hsv_img, lower_y, upper_y)
filtered_y = cv2.bitwise_or(frame, frame, mask = yellow_mask)
#filtered_combo_red = cv2.bitwise_or(filtered_r, filtered_R)
#cv2.imshow("Red", filtered_combo_red)
#red_x = get_x_centroid(filtered_combo_red)
#red_y = get_y_centroid(filtered_combo_red)
blue_img = get_x_centroid(filtered_b)
blue_img = get_y_centroid(filtered_b)
green_img = get_x_centroid(filtered_g)
green_img = get_y_centroid(filtered_g)
yellow_img = get_x_centroid(filtered_y)
yellow_img = get_y_centroid(filtered_y)
#print "Red X=", red_x," Red Y=", red_y
#red_x = int(get_x_centroid(filtered_combo_red))
#red_y = int(get_y_centroid(filtered_combo_red))
blue_x = int(get_x_centroid(blue_img))
blue_y = int(get_y_centroid(blue_img))
green_x = int(get_x_centroid(green_img))
green_y = int(get_y_centroid(green_img))
yellow_x = int(get_x_centroid(yellow_img))
yellow_y = int(get_y_centroid(yellow_img))
cv2.line(frame,(red_x, red_y),(green_x, green_y), (255,255,255), 3)
cv2.line(frame,(blue_x, blue_y),(yellow_x, yellow_y), (255,255,255), 3)
key_pressed = cv2.waitKey(30)
if key_pressed == 27:
break
The error message implies that cv2.cvtColor expects an image with a (color) depth of 8 or 16 bit unsigned int (8U, 16U)
or 32 bit float (32F).
image needs to be casted or created as cv2.cvtColor expects, for example uint8. CV_8U is an alias for the data type uint8
A conversion is needed. Try:
import numpy as np
image1copy = np.uint8(image)
grayscale_image = cv2.cvtColor(image1copy, cv2.COLOR_HSV2BGR)
BTW:
OpenCV 2.4.13.6 documentation » OpenCV API Reference » core. The Core Functionality »
bitwise_or
Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.
C++: void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask=noArray())
and
Python: cv2.bitwise_or(src1, src2[, dst[, mask]]) → dst

3d reconstruction and distance measurment

hi guys I have been working on a small program in python using the opencv library and two webcams so that I can measure the distance between this last tow and the object right in front of them(using the disparity map) ,so when I run the program at the end I normally I get the result in a matrix but what I get is not only one number(which is supposed to be the distance) but many different numbers,besides I always get one number which doesn't change even if I change the view can any body tell me why?!
here is the code:
import numpy as np
import cv2 as cv
import cv2.cv as cv1
from VideoCapture import Device
import os
def caliLeftCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/img*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsL = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgL = cv.imread(fn, 0)
h, w = imgL.shape[:2]
found, corners = cv.findChessboardCorners(imgL, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgL, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(imgL, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsL.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsL, cameraL_matrix, dist_coefsL, rvecsL, tvecsL = cv.calibrateCamera(obj_points, img_pointsL, (w, h))
print "RMSL:", rmsL
print "Left camera matrix:\n", cameraL_matrix
print "distortion coefficients: ", dist_coefsL.ravel()
newcameramtxL, roi=cv.getOptimalNewCameraMatrix(cameraL_matrix,dist_coefsL,(w,h),1,(w,h))
#undistort
mapxL,mapyL = cv.initUndistortRectifyMap(cameraL_matrix,dist_coefsL,None,newcameramtxL,(w,h),5)
dstL = cv.remap(imgL,mapxL,mapyL,cv.INTER_LINEAR)
return img_pointsL, cameraL_matrix, dist_coefsL
def caliRightCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/ph*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsR = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgR = cv.imread(fn, 0)
h, w = imgR.shape[:2]
found, corners = cv.findChessboardCorners(imgR, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgR, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsR.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsR, cameraR_matrix, dist_coefsR, rvecsR, tvecsR = cv.calibrateCamera(obj_points, img_pointsR, (w, h))
print "RMSR:", rmsR
print "Right camera matrix:\n", cameraR_matrix
print "distortion coefficients: ", dist_coefsR.ravel()
newcameramtxR, roi=cv.getOptimalNewCameraMatrix(cameraR_matrix,dist_coefsR,(w,h),1,(w,h))
# undistort
mapxR,mapyR = cv.initUndistortRectifyMap(cameraR_matrix,dist_coefsR,None,newcameramtxR,(w,h),5)
dstR = cv.remap(imgR,mapxR,mapyR,cv.INTER_LINEAR)
return img_pointsR,obj_points, cameraR_matrix, dist_coefsR
def Pics():
vc = cv.VideoCapture(2)
retVal, frame = vc.read();
while True :
if frame is not None:
imgray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(imgray,127,255,1)
contours, hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
cv.namedWindow("threshold")
cv.namedWindow("Camera")
#cv2.drawContours(frame, contours, -1, (0,255,0), 2)
cv.imshow("Camera", frame)
cv.imshow("threshold", thresh)
rval, frame = vc.read()
if cv.waitKey(1) & 0xFF == 27:
break
cv1.DestroyAllWindows()
def LeftCap():
cam = Device(2)
cam.saveSnapshot('imageL.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageL.jpg'
return fn
def RightCap():
cam = Device(0)
cam.saveSnapshot('imageR.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageR.jpg'
return fn
def Calculate(Li, Ri, Mat):
img_L = cv.pyrDown( cv.imread(Li) )
img_R = cv.pyrDown( cv.imread(Ri) )
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv.StereoSGBM(minDisparity = min_disp,
numDisparities = num_disp,
SADWindowSize = window_size,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32,
disp12MaxDiff = 1,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
fullDP = False
)
print "computing disparity..."
disp = stereo.compute(img_L, img_R).astype(np.float32) / 16.0
print "generating 3d point cloud..."
h, w = img_L.shape[:2]
f = 0.8*w # guess for focal length
points = cv.reprojectImageTo3D(disp, Mat)
colors = cv.cvtColor(img_L, cv.COLOR_BGR2RGB)
mask = disp > disp.min()
cv.imshow('left', img_L)
cv.imshow('disparity', (disp-min_disp)/num_disp)
b=6.50
D=b*f/disp
print "The Distance =", D
cv.waitKey()
cv1.DestroyAllWindows()
if __name__ == '__main__':
import sys, getopt
from glob import glob
Img_pointsL, Cam_MatL, DisL = caliLeftCam()
Img_pointsR,obj_points, Cam_MatR, DisR = caliRightCam()
print "Running stereo calibration..."
retval, Cam_MatL, DisL, Cam_MatR, DisR, R, T, E, F= cv.stereoCalibrate(obj_points, Img_pointsL, Img_pointsR,(384,288))
print"running rectifation..."
RL, Rr, PL, PR, Q, validRoiL, validRoiR = cv.stereoRectify(Cam_MatL, DisL, Cam_MatR, DisR,(384,288), R, T)
Pics()
Li = LeftCap()
Ri = RightCap()
Calculate(Li, Ri, Q)
Not sure if that is a problem, but you call cv.StereoRectify() function using cv2.StereoRectify() not cv2.cv.StefeoRectify() and you provided wrong arguments to it.
From documentation:
cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))
You did it that(wrong) way:
cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2...)

Categories