I am working with VSCode 1.68.1, Ubuntu 20.04.
I am following link:
https://courses.ece.cornell.edu/ece5990/ECE5725_Fall2020_Projects/Dec_21_Demo/Drawing%20Robot/eam348_mm2994_W/index.html
from unittest import result
import numpy as np
import time
import cv2
import sys
import cv2.aruco as aruco
import socket
import datetime
import glob
import math
import multiprocessing as mp
port = 30003
IP = '192.11.0.25'
robot_ID = 20185500976
robot = 'ur-20185500976'
marker_dimension =0.06
worldx = 390
worldy = 260
bottom_left = 31 #this is the origin - positivex: towards bottom right - positivey: towards top left
bottom_right = 32
top_left = 9
top_right = 20
#camera dist, matrix and newcameramatrix
dist=np.array(([[5.0164361897882787e-02, 6.6308284023737640e-01, 2.5493975084043882e-03, -6.0403656948007376e-03, -2.9652221208277720e+00]]))
mtx=np.array([[6.1618286891135097e+02, 0., 3.2106366551961219e+02],
[0 , 6.1595924417559945e+02, 2.4165645046034246e+02],
[0. , 0. , 1. ]])
found_dict_pixel_space = {}
found_dict_camera_space = {}
found_dict_world_space = {}
found_dict_homography_space = {}
final_string = ""
originRvec = np.array([0,0,1])
markerRvec= np.array([0,0,0])
def UDP(IP,port,message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #IPv4 DNS server - UDP protocol
sock.sendto(bytes(message, "utf-8"), (IP,port)) #self, data, address
def getMarkerCenter(corners):
px = (corners[0][0] + corners[1][0] + corners[2][0]+ corners[3][0]) * 0.25
py = (corners[0][1] + corners[1][1] + corners[2][1]+ corners[3][1]) * 0.25
return [px,py]
def getMarkerRotation(corners):
unit_x_axis = [1.,0.]
center = getMarkerCenter(corners)
right_edge_midpoint = (corners[1]+corners[2])/2.
unit_vec = (right_edge_midpoint-center)/np.linalg.norm(right_edge_midpoint-center)
angle = np.arccos(np.dot(unit_x_axis,unit_vec))
return angle
def inversePerspective(rvec, tvec):
R, _ = cv2.Rodrigues(rvec)
R = np.array(R).T #this was np.matrix but had error
invTvec = np.dot(-R, np.array(tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def normalize(v):
if np.linalg.norm(v) == 0 : return v
return v / np.linalg.norm(v)
def findWorldCoordinate(originCorners, point):
zero = np.array(originCorners[3]) #bottom left as the origin - check the data structure
print(zero)
x = (np.array(originCorners[0]) - zero) # bottom right - Green Axis -- throw out z
y = (np.array(originCorners[1]) - zero) # top left - Red Axis -- throw out z
x = x[0][0:2]
y = y[0][0:2]
x = normalize(x)
y = normalize(y)
#print("x", x)
vec = (point - zero)[0][0:2]
#print("vec", vec)
vecNormal = normalize(vec)
cosX = np.dot(x,vecNormal)
cosY = np.dot(y,vecNormal)
xW = np.linalg.norm(vec) * cosX
yW = np.linalg.norm(vec) * cosY
return [xW, yW]
cap=cv2.VideoCapture(4)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
while True:
t0 = time.time()
ret, frame = cap.read()
h, w = frame.shape[:2]
#new image size to generate192.0.1.25
h1, w1 = h, w
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w1,h1))
#print(newcameramtx)
#mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w1,h1), 5)
#dst1 = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
x, y, w1, h1 = roi
dst1 = dst1[y:y + h1, x:x + w1]
frame=dst1
t1 = time.time()-t0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
arucoParameters = aruco.DetectorParameters_create()
t2 = time.time()-t0
data = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
t3 = time.time()-t0
corners = data[0]
ids = data[1]
originIDglobal = 0
# If you can't find it, type id
if ids is not None:
t4 = time.time()-t0
result = aruco.estimatePoseSingleMarkers(corners, marker_dimension, newcameramtx, dist)
rvecs = result[0] # rotation vectors of markers
tvecs = result[1] # translation vector of markers
#setting bottom_left as the origin
if bottom_left in ids:
originID = np.where(ids == bottom_left)[0][0]
originIDglobal = originID
else:
originID = originIDglobal
originCorners = corners[originID] # corners of the tag set as the origin
originCornersCamera = getCornerInCameraWorld(marker_dimension, rvecs[originID], tvecs[originID])[0] # origin tag corners in camera space
originRvec = rvecs[originID] # rotation vec of origin tag
originTvec = tvecs[originID] # translation vec of origin tag
display = aruco.drawDetectedMarkers(frame, corners,ids) #Draw a square around the markers
t5 = time.time()-t0
for i in range(len(ids)):
ID = ids[i]
rvec = rvecs[i]
tvec = tvecs[i]
corners4 = corners[i]
display = cv2.drawFrameAxes(frame, newcameramtx, dist, rvec, tvec,0.03)#Draw 3D Axis, 3cm(0.03)
found_dict_pixel_space[""+str(ids[i][0])] = corners4 # put the corners of this tag in the dictionary
# Homography
zero = found_dict_pixel_space[str(bottom_left)][0][3] #bottom left - 3
x = found_dict_pixel_space[str(bottom_right)][0][2] #bottom right - 27
y = found_dict_pixel_space[str(top_left)][0][0] #top left - 22
xy = found_dict_pixel_space[str(top_right)][0][1] #top right - 24
workspace_world_corners = np.array([[0.0, 0.0], [worldx, 0.0], [0.0, worldy], [worldx, worldy]], np.float32) # 4 corners in millimeters
workspace_pixel_corners = np.array([zero,x,y,xy], np.float32) # 4 corners in pixels
# Homography Matrix
h, status = cv2.findHomography(workspace_pixel_corners, workspace_world_corners) #perspective matrix
t6=time.time()-t0
im_out = cv2.warpPerspective(frame, h, (worldx,worldy)) #showing that it works
t7 = time.time()-t0
for i in range(len(ids)):
j = ids[i][0]
corners_pix = found_dict_pixel_space[str(j)]#[0]
corners_pix_transformed = cv2.perspectiveTransform(corners_pix,h)
found_dict_homography_space[str(j)] = corners_pix_transformed
print(found_dict_homography_space)
robot = found_dict_homography_space[str(robot_ID)][0]
print(getMarkerCenter(robot))
cv2.imshow('Warped Source Image', im_out)
t8=time.time()-t0
print("t1: %8.4f t2: %8.4f t3: %8.4f t4: %8.4f t5: %8.4f t6: %8.4f t7: %8.4f t8: %8.4f" %(t1,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))
else:
display = frame
cv2.imshow('Display', display)
# Display result frame
cv2.imshow("frame",frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I am getting Error: "NameError: name 'getCornerInCameraWorld' is not defined"
I am unable to find anything related to getCornerInCameraWorld.
Please provide some help.
I got reply from the author of the code. She confirmed the code was missing some part and provided that.
The code missing was:
def getCornerInCameraWorld(size, rvec, tvec):
half_size = size * 0.5
rotMatrix, jacobian = cv2.Rodrigues(rvec) #convert rot vector from marker space to camera space
X = half_size * rotMatrix[:,0]
Y = half_size * rotMatrix[:,1]
c1 = np.add(np.add(-1*X,Y), tvec) #top left
c2 = np.add(np.add(X, Y), tvec) #top right
c3 = np.add(np.add(X, -1*Y), tvec) # bottom right
c4 = np.add(np.add(-1*X, -1*Y), tvec) # bottom left
cornersInCameraWorld = [c1,c2,c3,c4]
cornersInCameraWorld = np.array(cornersInCameraWorld, dtype=np.float32)
return cornersInCameraWorld, rotMatrix
I have checked and it is working for me
Related
Using openCV in python to tag an object in a video and write that new output and save as .avi file. Everything is working fine except for the vide writer. I can not get it to write and/or save the output. It looks the same as I've done before so this is driving me insane. Any ideas on what I might be doing wrong are greatly appreciated.
import numpy as np
import cv2
def draw_pyramid(pyramid, H):
pyramid_new = []
i=0
for p in pyramid:
if i !=4:
temp_p = p
temp_p = np.append(temp_p, 1)
M_ext = H[0:3, :]
p = M_ext # temp_p
pyramid_new.append((p[0], p[1], p[2]))
i += 1
else:
pyramid_new.append((p[0], p[1], p[2]))
i +=1
return pyramid_new
cap = cv2.VideoCapture('hw4.avi')
got_image, img = cap.read()
videoWriter = cv2.VideoWriter("switch.avi",
fourcc=cv2.VideoWriter_fourcc('M','J','P','G'),
fps=30.0,
frameSize=(img.shape[1], img.shape[0]))
fx, fy, cx, cy = 675.0, 675.0, 320.0, 240.0
k = np.array([[fx,0.0,cx],[0.0,fy,cy],[0.0,0.0,1.0]])
while True:
got_img, img = cap.read()
if not got_img:
break
else:
gray_image = cv2.cvtColor(src=img,
code=cv2.COLOR_BGR2GRAY) # convert BGR to grayscale
output_thresh, binary_image = cv2.threshold(src=gray_image,
maxval=255,
type=cv2.THRESH_OTSU, # determine threshold automatically from image
thresh=0) # ignore this if using THRESH_OTSU
arucoDict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_100)
corners, ids, _ = cv2.aruco.detectMarkers(image=img,
dictionary=arucoDict)
if ids is not None and corners:
cv2.aruco.drawDetectedMarkers(image=img,
corners=corners,
ids=ids,
borderColor=(0, 255, 0))
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(corners=corners,
markerLength=2.0,
cameraMatrix=k,
distCoeffs=None)
cv2.aruco.drawAxis(image=img,
cameraMatrix=k,
distCoeffs=None,
rvec=rvecs,
tvec=tvecs,
length=2.0)
if ids[0][0]==1:
pyr = np.array(((-3.5/3,-1.5/3,-1.5/3,-3.5/3,-2.5),
(-1, -1, -1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -5))).T
R = cv2.Rodrigues(rvecs)[0]
r = R[0]
# rvec_m_c = rvecs[0] # This is a 1x3 rotation vector
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[-2.5, -2, -5]]).T
H = np.block([[R, t], [0, 0, 0, 1]])
if ids[0][0] == 0:
pyr = np.array(((1.5/3, 3.5/3, 3.5/3, 1.5/3, 2.5),
(-1,-1,-1/3, -1/3, -2),
(-4/3, -4/3, -4/3, -4/3, -1))).T
R = cv2.Rodrigues(rvecs)[0]
# r = R[0]
tm_c = np.array(tvecs[0]).T # This is a 1x3 translation vector
t = np.array([[2.5, -2, -1]]).T
H = np.block([[R, t], [0,0,0,1]])
pyramid_new = draw_pyramid(pyr, H)
pyramid_new = np.asarray(pyramid_new)
pyramid_new = pyramid_new.reshape(-1, 3)
dist_coeff = np.zeros((4,1))
pyramid_points, jacobian = cv2.projectPoints(pyramid_new,
rvecs,
tvecs,
np.array(k),
distCoeffs=dist_coeff)
pyramid_points = pyramid_points.astype(int)
froml = [1,2,3,0,0,1,2,3]
fromr = [4,4,4,4,1,2,3,0]
color = (0,0,255)
for i, j in zip(froml, fromr):
cv2.line(img, tuple(pyramid_points[i][0]),
tuple(pyramid_points[j][0]), color, 1)
videoWriter.write(img)
cv2.imshow('switch', img)
cv2.waitKey(30)
videoWriter.release()
How can I convert an objects position in PyBullet to pixel coordinates & draw a line onto the frame using PyBullet & OpenCV?
We would like to do this because PyBullet native addUserDebugLine() function is not available in DIRECT mode.
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
def capture_frame(base_pos=[0,0,0], _cam_dist=3, _cam_yaw=45, _cam_pitch=-45):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
frame, vmat, pmat = capture_frame()
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
frame, view_matrix, proj_matrix = capture_frame()
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# reshape matrices
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = vmat.T # pmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 640, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
The expected output would be the following frame but with the origin-coordinate frame drawn correctly. E.g. X, Y, and Z axis are colored Red, Blue, and Green respectively.
Since the two R2D2 robots are positioned at [1,0,0] and [0,1,0] respectively, we can see that the coordinate frame is off. (See image below)
We tried the following:
transposing the matrices
not transposing the matrices
changing the order of how we compute fmat e.g. pmat # vmat instead of vmat # pmat etc.
Any help is appreciated.
After a lot of fiddling, I came to a solution.
Playing with it for a while, I came to a point where it looked almost OK except for a rotation of the axes given by the yaw angle. So, I did a second call to computeViewMatrixFromYawPitchRoll but with the opposite yaw in order to compute the transformation for the axes.
Unfortunately, I'm not sure about why this works... But it works!
Note: base_pos, _cam_dist, _cam_yaw and _cam_pitch have been displaced into render() Note also: the up direction has been reversed too (don't ask why... :-) ) A pretty messy explanation, I must admit...
import pybullet as p
import numpy as np
import time
import pybullet_data
import cv2
import os
VIDEO_RESOLUTION = (1280, 720)
MY_COLORS = [(255,0,0), (0,255,0), (0,0,255)]
K=np.array([[1280,0,0],[0,720,0],[0,0,1]])
def capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch):
_render_width, _render_height = VIDEO_RESOLUTION
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = p.computeProjectionMatrixFOV(
fov=90, aspect=float(_render_width) / _render_height,
nearVal=0.01, farVal=100.0)
(_, _, px, _, _) = p.getCameraImage(
width=_render_width, height=_render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=p.ER_TINY_RENDERER) # ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(rgb_array, (_render_height, _render_width, 4))
rgb_array = rgb_array[:, :, :3]
return rgb_array, view_matrix, proj_matrix
def render():
p1, cubeOrn = p.getBasePositionAndOrientation(1)
p2, cubeOrn = p.getBasePositionAndOrientation(2)
base_pos=[0,0,0]
_cam_dist=3
_cam_yaw=45
_cam_pitch=-30
frame, view_matrix, proj_matrix = capture_frame(base_pos, _cam_dist, _cam_yaw, _cam_pitch)
frame = cv2.resize(frame, VIDEO_RESOLUTION)
points = {}
# inverse transform
view_matrix = p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=_cam_dist,
yaw=-_cam_yaw,
pitch=_cam_pitch,
roll=0,
upAxisIndex=2)
my_order = 'C'
pmat = np.array(proj_matrix).reshape((4,4), order=my_order)
vmat = np.array(view_matrix).reshape((4,4), order=my_order)
fmat = pmat # vmat.T
# compute origin from origin point in simulation
origin = np.array([0,0,0,1])
frame_origin = (fmat # origin)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
# define unit vectors
unit_vectors = [ np.array([1,0,0,1]),
np.array([0,1,0,1]),
np.array([0,0,-1,1]) ]
for col_id, unit_vector in enumerate(unit_vectors):
cur_point = (fmat # unit_vector)[:3]*np.array([1280, 720, 0]) + np.array([640, 360, 0])
cv2.line(frame, (640,360), (int(cur_point[0]),int(cur_point[1])), color=MY_COLORS[col_id], thickness=2)
cv2.imwrite("my_rendering.jpg", frame)
print(p1,p2)
if __name__ == '__main__':
physicsClient = p.connect(p.DIRECT)#or p.DIRECT for non-graphical version
#physicsClient = p.connect(p.GUI)#or p.DIRECT for non-graphical version
p.setAdditionalSearchPath(pybullet_data.getDataPath()) #optionally
p.setGravity(0,0,-10)
planeId = p.loadURDF("plane.urdf")
#arrows = p.loadURDF("arrows.urdf")
startPos = [1,0,0.2]
startOrientation = p.getQuaternionFromEuler([0,0,0])
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
startPos = [0,2,0.2]
boxId = p.loadURDF("r2d2.urdf",startPos, startOrientation)
#set the center of mass frame (loadURDF sets base link frame) startPos/Ornp.resetBasePositionAndOrientation(boxId, startPos, startOrientation)
for i in range (2400):
if i == 2399:
render()
p.stepSimulation()
p.disconnect()
Here is the result:
Best regards.
I'm trying to detect the hand and fingers in an image using OpenCV in python.
This is the code I'm using:
import cv2, random, math
import numpy as np
import matplotlib.pyplot as plt
import time
def calculateAngle(far, start, end):
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2) / (2*b*c))
return angle
image = cv2.imread("5_P_hgr1_id09_2.png")
imageHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
Min = np.array([5,55,60],np.uint8)
Max = np.array([13,139,198],np.uint8)
mask = cv2 . inRange ( imageHSV , Min, Max)
kernel_square = np.ones(None,np.uint8)
kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(11,11))
dilation = cv2.dilate(mask,kernel_ellipse,iterations = 1)
closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel_square)
erosion = cv2.erode(closing,kernel_square,iterations = 1)
contours, hierarchy = cv2.findContours(erosion,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
if len(contours)>0:
maxArea = 0
hull = []
fingerList = []
for i in range (len(contours)):
cnt = contours[i]
area = cv2.contourArea(cnt)
if area>maxArea :
maxArea = area
ci = i
cnts = contours[ci]
hull2 = cv2.convexHull(cnts)
hull = cv2.convexHull(cnts, returnPoints=False)
defects = cv2.convexityDefects(cnts, hull)
moments = cv2.moments(contours[ci])
#Central mass
if moments['m00']!=0: #m00 moments spatiaux
cx = int(moments['m10']/moments['m00']) # cx = M10/M00
cy = int(moments['m01']/moments['m00']) # cy = M01/M00
centerMass=(cx,cy)
cv2.circle(image,centerMass,7,[100,0,255],2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,'Center',tuple(centerMass),font,0.5,(255,255,255),1)
D = []
for i in range (len(cnts)):
x = np.array(cnts[i][0][0])
y = np.array(cnts[i][0][1])
xp = np.power(x-cx, 2)
yp = np.power(y-cy, 2)
dist = np.sqrt(xp+yp)
D.append(dist)
dist_min = np.min(D)
closest_d = np.where ( D== dist_min)[0]
closest_p = tuple(cnts[closest_d[0]][0])
cnt = 0
farDefect=[]
Far =[]
if type(defects) != type(None):
for i in range (defects.shape[0]):
s,e,f,d = defects[i, 0]
start = tuple(cnts [s,0])
end = tuple(cnts[e,0])
far = tuple(cnts[f,0])
Far.append(far)
x = far[0]
y = far[0]
angle = calculateAngle (far, start, end)
if angle<= math.pi/1.6 and far != closest_p and d>8000 :
cnt+=1
farDefect.append(far)
for i in range (len(farDefect)):
xd = (farDefect[i][0])
yd = (farDefect[i][1])
listDistance = []
dist = 0
for j in range (defects.shape[0]):
s,e,f,d = defects[j,0]
point = cnts[f][0]
distance = np.sqrt(np.power(point[0]-centerMass[0],2)+np.power(point[1]-centerMass[1],2))
distance2 = np.sqrt(np.power(point[0]-xd,2)+np.power(point[1]-yd,2))
distance3 = np.sqrt(np.power(xd-centerMass[0],2)+np.power(yd-centerMass[1],2))
if dist<distance and distance2<distance and distance3<distance and
distance3+distance2<=distance+50 :
if i==0 :
dist = distance
pn= point
listDistance.append((point[0],point[1]))
if i==1 :
distance3 = np.sqrt(np.power(pn[0]-point[0],2)+np.power(pn[1]-point[1],2))
if distance3>100:
dist = distance
pn2= point
listDistance.append((point[0],point[1]))
if i==2 :
distance3 = np.sqrt(np.power(pn[0]-point[0],2)+np.power(pn[1]-point[1],2))
distance4 = np.sqrt(np.power(pn2[0]-point[0],2)+np.power(pn2[1]-point[1],2))
if distance4>100 and distance3>100:
dist = distance
pn3= point
listDistance.append((point[0],point[1]))
if i==3 :
distance3 = np.sqrt(np.power(pn[0]-point[0],2)+np.power(pn[1]-point[1],2))
distance4 = np.sqrt(np.power(pn2[0]-point[0],2)+np.power(pn2[1]-point[1],2))
distance5 = np.sqrt(np.power(pn3[0]-point[0],2)+np.power(pn3[1]-point[1],2))
if distance4>100 and distance3>100 and distance5>100:
dist = distance
listDistance.append((point[0],point[1]))
dist = 1000
for j in range (len(listDistance)):
point = listDistance[j]
distance = np.sqrt(np.power(point[0]-xd,2)+np.power(point[1]-yd,2))
if distance<dist and distance!=0:
finger = point
cv2.circle(image,(finger),7,[100,0,255],2)
fingerList.append(finger)
dist = 50000
for j in range (len(fingerList)):
point = fingerList[j]
distance = np.sqrt(np.power(point[0]-cx,2)+np.power(point[1]-cy,2))
if distance<dist:
dist = distance
finger = point
x,y,w,h = cv2.boundingRect(cnts)
image = cv2.rectangle(image,(x,y),(x+w,y+h),(0,0,255),2)
for i in range (len(farDefect)):
defaut= farDefect[i]
cv2.circle(image,defaut,7,[100,0,255],2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image,'D',tuple(defaut),font,0.5,(255,255,255),1)
for i in range (len(contours)):
color_con = (0,255,0) #green color for contours
color = (255,0,0) #blue color for convex hull
cv2.drawContours(image, contours, i, color_con, 1,8, hierarchy)
#cv2.drawContours(image,[hull2], i, color, 1,8)
cv2.imshow("image", image)
cv2.waitKey(0)
I'm getting the following results:
However, I can not detect all the fingers (I have 5 fingers and I get 4 that are represented with small red circles). I don't know where is the problem or how should I detect all fingers.
I'm looking for results close to this:
Any help is appreciated.
I'm trying to create trails and map of trails of all ID like this video: https://www.youtube.com/watch?v=tq0BgncuMhs
So far I haven't been able to, I'm currently using this repo from bendidi https://github.com/bendidi/Tracking-with-darkflow which I modifies to also show the trails.
I did try using cv2.line and extract it from track.to_tlbr() but right now the result look like this:
Here's the code that I modified to get the current result:
darkflow/darkflow/net/yolov2/predict.py
from collections import deque
import numpy as np
import math
import cv2
import os
import json
#from scipy.special import expit
#from utils.box import BoundBox, box_iou, prob_compare
#from utils.box import prob_compare2, box_intersection
from ...utils.box import BoundBox
from ...cython_utils.cy_yolo2_findboxes import box_constructor
ds = True
try :
from deep_sort.application_util import preprocessing as prep
from deep_sort.application_util import visualization
from deep_sort.deep_sort.detection import Detection
except :
ds = False
def expit(x):
return 1. / (1. + np.exp(-x))
def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def findboxes(self, net_out):
# meta
meta = self.meta
boxes = list()
boxes=box_constructor(meta,net_out)
return boxes
def extract_boxes(self,new_im):
cont = []
new_im=new_im.astype(np.uint8)
ret, thresh=cv2.threshold(new_im, 127, 255, 0)
p, contours, hierarchy=cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt=contours[i]
x, y, w, h=cv2.boundingRect(cnt)
if w*h > 30**2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])):
if self.FLAGS.tracker == "sort":
cont.append([x, y, x+w, y+h])
else : cont.append([x, y, w, h])
return cont
def postprocess(self,net_out, im,frame_id = 0,csv_file=None,csv=None,mask = None,encoder=None,tracker=None):
"""
Takes net output, draw net_out, save to disk
"""
boxes = self.findboxes(net_out)
# meta
meta = self.meta
nms_max_overlap = 0.1
threshold = meta['thresh']
colors = meta['colors']
labels = meta['labels']
if type(im) is not np.ndarray:
imgcv = cv2.imread(im)
else: imgcv = im
h, w, _ = imgcv.shape
thick = int((h + w) // 300)
resultsForJSON = []
if not self.FLAGS.track :
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if self.FLAGS.json:
resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}})
continue
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv,
(left, top), (right, bot),
colors[max_indx], thick)
cv2.putText(imgcv, mess, (left, top - 12),
0, 1e-3 * h, colors[max_indx],thick//3)
else :
if not ds :
print("ERROR : deep sort or sort submodules not found for tracking please run :")
print("\tgit submodule update --init --recursive")
print("ENDING")
exit(1)
detections = []
scores = []
lines = deque(maxlen=64)
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if mess not in self.FLAGS.trackObj :
continue
if self.FLAGS.tracker == "deep_sort":
detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))
scores.append(confidence)
elif self.FLAGS.tracker == "sort":
detections.append(np.array([left,top,right,bot]).astype(np.float64))
if len(detections) < 3 and self.FLAGS.BK_MOG:
detections = detections + extract_boxes(self,mask)
detections = np.array(detections)
if detections.shape[0] == 0 :
return imgcv
if self.FLAGS.tracker == "deep_sort":
scores = np.array(scores)
features = encoder(imgcv, detections.copy())
detections = [
Detection(bbox, score, feature) for bbox,score, feature in
zip(detections,scores, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict()
tracker.update(detections)
trackers = tracker.tracks
elif self.FLAGS.tracker == "sort":
trackers = tracker.update(detections)
for track in trackers:
if self.FLAGS.tracker == "deep_sort":
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
id_num = str(track.track_id)
elif self.FLAGS.tracker == "sort":
bbox = [int(track[0]),int(track[1]),int(track[2]),int(track[3])]
id_num = str(int(track[4]))
if self.FLAGS.csv:
csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])
csv_file.flush()
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
(255,255,255), thick//3)
cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
return imgcv
Or just the code that I added:
...
lines = deque(maxlen=64)
...
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
...
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
Could someone help me out on this? Or should I do something with the data first instead of plug in straight into cv2.line? If you also have any suggestion for using external software rather than using Python, that's also welcome (I have frame_id, track_id, x, y, w, h data)
I am trying to calculate the exact( 3 cm error rate is acceptable ) distance between aruco marker and camera. I use python, opencv and aruco. I can detect them ( marker side is 0.023 meters which is 2.3 cm ) but I can't interpret the distance because for 40 cm distance the norm of the translation vector gives 1 meter. I am so confused about this. Can anyone help? Full code ( sorry , not documented well ):
import numpy as np
import cv2
import cv2.aruco as aruco
import glob
import argparse
import math
# Marker id infos. Global to access everywhere. It is unnecessary to change it to local.
firstMarkerID = None
secondMarkerID = None
cap = cv2.VideoCapture(0)
image_width = 0
image_height = 0
#hyper parameters
distanceBetweenTwoMarkers = 0.0245 # in meters, 2.45 cm
oneSideOfTheMarker = 0.023 # in meters, 2.3 cm
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def calibrate(dirpath):
""" Apply camera calibration operation for images in the given directory path. """
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(8,6,0)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob(dirpath+'/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
# If found, add object points, image points (after refining them)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9, 6), corners2, ret)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return [ret, mtx, dist, rvecs, tvecs]
def saveCoefficients(mtx, dist, path):
""" Save the camera matrix and the distortion coefficients to given path/file. """
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_WRITE)
cv_file.write("camera_matrix", mtx)
cv_file.write("dist_coeff", dist)
# note you *release* you don't close() a FileStorage object
cv_file.release()
def loadCoefficients(path):
""" Loads camera matrix and distortion coefficients. """
# FILE_STORAGE_READ
cv_file = cv2.FileStorage(path, cv2.FILE_STORAGE_READ)
# note we also have to specify the type to retrieve other wise we only get a
# FileNode object back instead of a matrix
camera_matrix = cv_file.getNode("camera_matrix").mat()
dist_matrix = cv_file.getNode("dist_coeff").mat()
# Debug: print the values
# print("camera_matrix : ", camera_matrix.tolist())
# print("dist_matrix : ", dist_matrix.tolist())
cv_file.release()
return [camera_matrix, dist_matrix]
def inversePerspective(rvec, tvec):
""" Applies perspective transform for given rvec and tvec. """
rvec, tvec = rvec.reshape((3, 1)), tvec.reshape((3, 1))
R, _ = cv2.Rodrigues(rvec)
R = np.matrix(R).T
invTvec = np.dot(R, np.matrix(-tvec))
invRvec, _ = cv2.Rodrigues(R)
invTvec = invTvec.reshape((3, 1))
invTvec = invTvec.reshape((3, 1))
return invRvec, invTvec
def make_1080p():
global image_width
global image_height
image_width = 1920
image_height = 1080
change_res(image_width, image_height)
def make_720p():
global image_width
global image_height
image_width = 1280
image_height = 720
change_res(image_width, image_height)
def make_480p():
global image_width
global image_height
image_width = 640
image_height = 480
change_res(image_width, image_height)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def relativePosition(rvec1, tvec1, rvec2, tvec2):
""" Get relative position for rvec2 & tvec2. Compose the returned rvec & tvec to use composeRT with rvec2 & tvec2 """
rvec1, tvec1 = rvec1.reshape((3, 1)), tvec1.reshape((3, 1))
rvec2, tvec2 = rvec2.reshape((3, 1)), tvec2.reshape((3, 1))
# Inverse the second marker, the right one in the image
invRvec, invTvec = inversePerspective(rvec2, tvec2)
info = cv2.composeRT(rvec1, tvec1, invRvec, invTvec)
composedRvec, composedTvec = info[0], info[1]
composedRvec = composedRvec.reshape((3, 1))
composedTvec = composedTvec.reshape((3, 1))
return composedRvec, composedTvec
def euclideanDistanceOfTvecs(tvec1, tvec2):
return math.sqrt(math.pow(tvec1[0]-tvec2[0], 2) + math.pow(tvec1[1]-tvec2[1], 2) + math.pow(tvec1[2]-tvec2[2], 2))
def euclideanDistanceOfTvec(tvec):
return euclideanDistanceOfTvecs(tvec, [0, 0, 0])
def draw(img, imgpts, color):
""" draw a line between given two points. """
imgpts = np.int32(imgpts).reshape(-1, 2)
for pointf in range(len(imgpts)):
for points in range(len(imgpts)):
img = cv2.line(img, tuple(imgpts[pointf]), tuple(imgpts[points]), color, 3)
return img
def track(matrix_coefficients, distortion_coefficients):
global image_width
global image_height
""" Real time ArUco marker tracking. """
needleComposeRvec, needleComposeTvec = None, None # Composed for needle
ultraSoundComposeRvec, ultraSoundComposeTvec = None, None # Composed for ultrasound
savedNeedleRvec, savedNeedleTvec = None, None # Pure Composed
savedUltraSoundRvec, savedUltraSoundTvec = None, None # Pure Composed
TcomposedRvecNeedle, TcomposedTvecNeedle = None, None
TcomposedRvecUltrasound, TcomposedTvecUltrasound = None, None
make_480p()
while True:
isFirstMarkerDetected = False
isSecondMarkerDetected = False
ret, frame = cap.read()
# operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Change grayscale
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250) # Use 5x5 dictionary to find markers
parameters = aruco.DetectorParameters_create() # Marker detection parameters
# lists of ids and the corners beloning to each id
corners, ids, rejected_img_points = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters,
cameraMatrix=matrix_coefficients,
distCoeff=distortion_coefficients)
if np.all(ids is not None): # If there are markers found by detector
zipped = zip(ids, corners)
ids, corners = zip(*(sorted(zipped)))
# print(ids)
for i in range(0, len(ids)): # Iterate in markers
# Estimate pose of each marker and return the values rvec and tvec---different from camera coefficients
rvec, tvec, markerPoints = aruco.estimatePoseSingleMarkers(corners[i], oneSideOfTheMarker, matrix_coefficients,
distortion_coefficients)
if ids[i] == firstMarkerID:
firstRvec = rvec
firstTvec = tvec
isFirstMarkerDetected = True
firstMarkerCorners = corners[i]
elif ids[i] == secondMarkerID:
secondRvec = rvec
secondTvec = tvec
isSecondMarkerDetected = True
secondMarkerCorners = corners[i]
(rvec - tvec).any() # get rid of that nasty numpy value array error
# aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) # Draw Axis
aruco.drawDetectedMarkers(frame, corners) # Draw A square around the markers
''' First try
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
info = cv2.composeRT(composedRvec, composedTvec, secondRvec.T, secondTvec.T)
composedRvec, composedTvec = info[0], info[1]
composedRvec, composedTvec = composedRvec.T, composedTvec.T
differenceRvec, differenceTvec = composedRvec-secondRvec, composedTvec-secondTvec
# print infos
print("composed Rvec: ", composedRvec)
print("composed Tvec: ", composedTvec)
print("Second marker Rvec: ", secondRvec)
print("Second marker Tvec: ", secondTvec)
print("differenceRvec: ", differenceRvec)
print("differenceTvec: ", differenceTvec)
print("real difference: ", euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0]))
# draw axis to estimated location
aruco.drawAxis(frame, mtx, dist, composedRvec, composedTvec, 0.0115)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
difference = euclideanDistanceOfTvecs(composedTvec[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / (secondTvec[0][0][2] - firstTvec[0][0][2]))
print(calculatedDistance)
'''
if isFirstMarkerDetected and isSecondMarkerDetected:
composedRvec, composedTvec = relativePosition(firstRvec, firstTvec, secondRvec, secondTvec)
camerafirstRvec, cameraFirstTvec = inversePerspective(firstRvec, firstTvec)
camerasecondRvec, camerasecondTvec = inversePerspective(secondRvec, secondTvec)
differenceRvec, differenceTvec = camerafirstRvec - camerasecondRvec, cameraFirstTvec - camerasecondTvec
# print infos
print("first Rvec: ", camerafirstRvec)
print("first Tvec: ", cameraFirstTvec)
print("Second marker Rvec: ", camerasecondRvec)
print("Second marker Tvec: ", camerasecondTvec)
# print("differenceRvec: ", differenceRvec)
# print("differenceTvec: ", differenceTvec)
realDistanceInTvec = euclideanDistanceOfTvec(secondTvec[0][0])
print(cv2.norm(secondTvec[0][0]))
difference = euclideanDistanceOfTvecs(composedTvec.T[0], secondTvec[0][0])
calculatedDistance = realDistanceInTvec * (distanceBetweenTwoMarkers / difference)
# print(calculatedDistance)
# Display the resulting frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame', image_width, image_height)
cv2.imshow('frame', frame)
# Wait 3 milisecoonds for an interaction. Check the key and do the corresponding job.
key = cv2.waitKey(3) & 0xFF
if key == ord('q'): # Quit
break
elif key == ord('p'): # print necessary information here
pass # Insert necessary print here
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Aruco Marker Tracking')
parser.add_argument('--coefficients', metavar='bool', required=True,
help='File name for matrix coefficients and distortion coefficients')
parser.add_argument('--firstMarker', metavar='int', required=True,
help='first')
parser.add_argument('--secondMarker', metavar='int', required=True,
help='second')
# Parse the arguments and take action for that.
args = parser.parse_args()
firstMarkerID = int(args.firstMarker)
secondMarkerID = int(args.secondMarker)
if args.coefficients == '1':
mtx, dist = loadCoefficients("test.yaml")
ret = True
else:
ret, mtx, dist, rvecs, tvecs = calibrate("calib_images")
saveCoefficients(mtx, dist, "calibrationCoefficients.yaml")
print("Calibration is completed. Starting tracking sequence.")
if ret:
track(mtx, dist)
I got the answer. Problem is in the calibration. When calibration with chessboard, I gave the object points like this:
(0,0,0), (1,0,0) and so on.
The thing is when doing pose estimation, camera should be calibrated nicely. My chessboard square size was 1.5 centimeters which means 0.015 meters. I changed the object point matrix as:
(0,0,0), (0.015,0,0) and so on.
So I said to program that the calibration should be in meters. If you do the calibration with different object points matrix than it should be, the pose estimation fails. That was included in the opencv documentation but I couldn't see it. In the docs it was said like "you can pass them like that." and I didn't think that it fails at the pose estimation.