Related
I am working with VSCode 1.68.1, Ubuntu 20.04.
I am following link:
https://courses.ece.cornell.edu/ece5990/ECE5725_Fall2020_Projects/Dec_21_Demo/Drawing%20Robot/eam348_mm2994_W/index.html
from unittest import result
import numpy as np
import time
import cv2
import sys
import cv2.aruco as aruco
import socket
import datetime
import glob
import math
import multiprocessing as mp
port = 30003
IP = '192.11.0.25'
robot_ID = 20185500976
robot = 'ur-20185500976'
marker_dimension =0.06
worldx = 390
worldy = 260
bottom_left = 31 #this is the origin - positivex: towards bottom right - positivey: towards top left
bottom_right = 32
top_left = 9
top_right = 20
#camera dist, matrix and newcameramatrix
dist=np.array(([[5.0164361897882787e-02, 6.6308284023737640e-01, 2.5493975084043882e-03, -6.0403656948007376e-03, -2.9652221208277720e+00]]))
mtx=np.array([[6.1618286891135097e+02, 0., 3.2106366551961219e+02],
[0 , 6.1595924417559945e+02, 2.4165645046034246e+02],
[0. , 0. , 1. ]])
found_dict_pixel_space = {}
found_dict_camera_space = {}
found_dict_world_space = {}
found_dict_homography_space = {}
final_string = ""
originRvec = np.array([0,0,1])
markerRvec= np.array([0,0,0])
def UDP(IP,port,message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #IPv4 DNS server - UDP protocol
sock.sendto(bytes(message, "utf-8"), (IP,port)) #self, data, address
def getMarkerCenter(corners):
px = (corners[0][0] + corners[1][0] + corners[2][0]+ corners[3][0]) * 0.25
py = (corners[0][1] + corners[1][1] + corners[2][1]+ corners[3][1]) * 0.25
return [px,py]
def getMarkerRotation(corners):
unit_x_axis = [1.,0.]
center = getMarkerCenter(corners)
right_edge_midpoint = (corners[1]+corners[2])/2.
unit_vec = (right_edge_midpoint-center)/np.linalg.norm(right_edge_midpoint-center)
angle = np.arccos(np.dot(unit_x_axis,unit_vec))
return angle
def inversePerspective(rvec, tvec):
R, _ = cv2.Rodrigues(rvec)
R = np.array(R).T #this was np.matrix but had error
invTvec = np.dot(-R, np.array(tvec))
invRvec, _ = cv2.Rodrigues(R)
return invRvec, invTvec
def normalize(v):
if np.linalg.norm(v) == 0 : return v
return v / np.linalg.norm(v)
def findWorldCoordinate(originCorners, point):
zero = np.array(originCorners[3]) #bottom left as the origin - check the data structure
print(zero)
x = (np.array(originCorners[0]) - zero) # bottom right - Green Axis -- throw out z
y = (np.array(originCorners[1]) - zero) # top left - Red Axis -- throw out z
x = x[0][0:2]
y = y[0][0:2]
x = normalize(x)
y = normalize(y)
#print("x", x)
vec = (point - zero)[0][0:2]
#print("vec", vec)
vecNormal = normalize(vec)
cosX = np.dot(x,vecNormal)
cosY = np.dot(y,vecNormal)
xW = np.linalg.norm(vec) * cosX
yW = np.linalg.norm(vec) * cosY
return [xW, yW]
cap=cv2.VideoCapture(4)
font = cv2.FONT_HERSHEY_SIMPLEX #font for displaying text (below)
while True:
t0 = time.time()
ret, frame = cap.read()
h, w = frame.shape[:2]
#new image size to generate192.0.1.25
h1, w1 = h, w
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w,h), 0, (w1,h1))
#print(newcameramtx)
#mapx, mapy = cv2.initUndistortRectifyMap(mtx, dist, None, newcameramtx, (w1,h1), 5)
#dst1 = cv2.remap(frame, mapx, mapy, cv2.INTER_LINEAR)
dst1 = cv2.undistort(frame, mtx, dist, None, newcameramtx)
x, y, w1, h1 = roi
dst1 = dst1[y:y + h1, x:x + w1]
frame=dst1
t1 = time.time()-t0
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
arucoParameters = aruco.DetectorParameters_create()
t2 = time.time()-t0
data = aruco.detectMarkers(gray, aruco_dict, parameters=arucoParameters)
t3 = time.time()-t0
corners = data[0]
ids = data[1]
originIDglobal = 0
# If you can't find it, type id
if ids is not None:
t4 = time.time()-t0
result = aruco.estimatePoseSingleMarkers(corners, marker_dimension, newcameramtx, dist)
rvecs = result[0] # rotation vectors of markers
tvecs = result[1] # translation vector of markers
#setting bottom_left as the origin
if bottom_left in ids:
originID = np.where(ids == bottom_left)[0][0]
originIDglobal = originID
else:
originID = originIDglobal
originCorners = corners[originID] # corners of the tag set as the origin
originCornersCamera = getCornerInCameraWorld(marker_dimension, rvecs[originID], tvecs[originID])[0] # origin tag corners in camera space
originRvec = rvecs[originID] # rotation vec of origin tag
originTvec = tvecs[originID] # translation vec of origin tag
display = aruco.drawDetectedMarkers(frame, corners,ids) #Draw a square around the markers
t5 = time.time()-t0
for i in range(len(ids)):
ID = ids[i]
rvec = rvecs[i]
tvec = tvecs[i]
corners4 = corners[i]
display = cv2.drawFrameAxes(frame, newcameramtx, dist, rvec, tvec,0.03)#Draw 3D Axis, 3cm(0.03)
found_dict_pixel_space[""+str(ids[i][0])] = corners4 # put the corners of this tag in the dictionary
# Homography
zero = found_dict_pixel_space[str(bottom_left)][0][3] #bottom left - 3
x = found_dict_pixel_space[str(bottom_right)][0][2] #bottom right - 27
y = found_dict_pixel_space[str(top_left)][0][0] #top left - 22
xy = found_dict_pixel_space[str(top_right)][0][1] #top right - 24
workspace_world_corners = np.array([[0.0, 0.0], [worldx, 0.0], [0.0, worldy], [worldx, worldy]], np.float32) # 4 corners in millimeters
workspace_pixel_corners = np.array([zero,x,y,xy], np.float32) # 4 corners in pixels
# Homography Matrix
h, status = cv2.findHomography(workspace_pixel_corners, workspace_world_corners) #perspective matrix
t6=time.time()-t0
im_out = cv2.warpPerspective(frame, h, (worldx,worldy)) #showing that it works
t7 = time.time()-t0
for i in range(len(ids)):
j = ids[i][0]
corners_pix = found_dict_pixel_space[str(j)]#[0]
corners_pix_transformed = cv2.perspectiveTransform(corners_pix,h)
found_dict_homography_space[str(j)] = corners_pix_transformed
print(found_dict_homography_space)
robot = found_dict_homography_space[str(robot_ID)][0]
print(getMarkerCenter(robot))
cv2.imshow('Warped Source Image', im_out)
t8=time.time()-t0
print("t1: %8.4f t2: %8.4f t3: %8.4f t4: %8.4f t5: %8.4f t6: %8.4f t7: %8.4f t8: %8.4f" %(t1,t2-t1,t3-t2,t4-t3,t5-t4,t6-t5,t7-t6,t8-t7))
else:
display = frame
cv2.imshow('Display', display)
# Display result frame
cv2.imshow("frame",frame)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
I am getting Error: "NameError: name 'getCornerInCameraWorld' is not defined"
I am unable to find anything related to getCornerInCameraWorld.
Please provide some help.
I got reply from the author of the code. She confirmed the code was missing some part and provided that.
The code missing was:
def getCornerInCameraWorld(size, rvec, tvec):
half_size = size * 0.5
rotMatrix, jacobian = cv2.Rodrigues(rvec) #convert rot vector from marker space to camera space
X = half_size * rotMatrix[:,0]
Y = half_size * rotMatrix[:,1]
c1 = np.add(np.add(-1*X,Y), tvec) #top left
c2 = np.add(np.add(X, Y), tvec) #top right
c3 = np.add(np.add(X, -1*Y), tvec) # bottom right
c4 = np.add(np.add(-1*X, -1*Y), tvec) # bottom left
cornersInCameraWorld = [c1,c2,c3,c4]
cornersInCameraWorld = np.array(cornersInCameraWorld, dtype=np.float32)
return cornersInCameraWorld, rotMatrix
I have checked and it is working for me
I made a simple ray tracer python program that takes xml file as input and creates a png image file.
In the python file, I used xml.etree.ElementTree as extension and implemented parser to read the xml file.
I'm trying to run the program on cmd like:
c:\WINDOWS\system32>python3 rayTracer.py scenes/one-sphere.xml
However, it doesn't create an image.
Did I use wrong command on cmd?
(one-sphere.xml file is in the scenes file)
this is main
def main():
#parser
tree = ET.parse(sys.argv[1])
root = tree.getroot()
viewPoint = np.array([0, 0, 0]).astype(np.float)
viewDir = np.array([0, 0, -1]).astype(np.float)
viewUp = np.array([0, 1, 0]).astype(np.float)
viewProjNormal = -1 * viewDir
viewWidth = 1.0
viewHeight = 1.0
projDistance = 1.0
intensity = np.array([1, 1, 1]).astype(np.float) # how bright the light is.
imgSize = np.array(root.findtext('image').split()).astype(np.int)
list = []
light = []
for c in root.findall('camera'):
viewPoint = np.array(c.findtext('viewPoint').split()).astype(np.float)
viewDir = np.array(c.findtext('viewDir').split()).astype(np.float)
if (c.findtext('projNormal')):
viewProjNormal = np.array(c.findtext('projNormal').split()).astype(np.float)
viewUp = np.array(c.findtext('viewUp').split()).astype(np.float)
if (c.findtext('projDistance')):
projDistance = np.array(c.findtext('projDistance').split()).astype(np.float)
viewWidth = np.array(c.findtext('viewWidth').split()).astype(np.float)
viewHeight = np.array(c.findtext('viewHeight').split()).astype(np.float)
view = View(viewPoint, viewDir, viewUp, viewProjNormal, viewWidth, viewHeight, projDistance, intensity)
for c in root.findall('surface'):
type_c = c.get('type')
if type_c == 'Sphere':
center_c = np.array(c.findtext('center').split()).astype(np.float)
radius_c = np.array(c.findtext('radius')).astype(np.float)
ref = ''
for child in c:
if child.tag == 'shader':
ref = child.get('ref')
for d in root.findall('shader'):
if d.get('name') == ref:
diffuse_d = np.array(d.findtext('diffuseColor').split()).astype(np.float)
type_d = d.get('type')
for c in root.findall('light'):
position_c = np.array(c.findtext('position').split()).astype(np.float)
intensity_c = np.array(c.findtext('intensity').split()).astype(np.float)
light.append(Light(position_c, intensity_c))
# Create an empty image
channels = 3
img = np.zeros((imgSize[1], imgSize[0], channels), dtype=np.uint8)
img[:, :] = 0
pixel_x = view.viewWidth / imgSize[0]
pixel_y = view.viewHeight / imgSize[1]
w = view.viewDir
u = np.cross(w, view.viewUp)
v = np.cross(w, u)
w_unit = w / np.sqrt(np.sum(w * w))
u_unit = u / np.sqrt(np.sum(u * u))
v_unit = v / np.sqrt(np.sum(v * v))
start = w_unit * view.projDistance - u_unit * pixel_x * ((imgSize[0]/2) + 1/2) - v_unit * pixel_y * ((imgSize[1]/2) + 1/2)
for x in np.arange(imgSize[0]):
for y in np.arange(imgSize[1]):
ray = start + u_unit * x * pixel_x + pixel_y * y * v_unit
tmp = raytrace(list, ray, view.viewPoint)
img[y][x] = shade(tmp[0], ray, view, list, tmp[1], light)
rawimg = Image.fromarray(img, 'RGB')
rawimg.save(sys.argv[1] + '.png')
if __name__ == "__main__":
main()
I recently tried the new fingerprint feature extractor library by Utkarsh-Deshmukh (https://github.com/Utkarsh-Deshmukh/Fingerprint-Feature-Extraction) and it works like wonder. The problem is I need to extract terminations and bifurcations value from the library. I used this code that's included in the github link to get features bifurcations and terminations:
import fingerprint_feature_extractor
img = cv2.imread('image_path', 0)
FeaturesTerminations, FeaturesBifurcations = fingerprint_feature_extractor.extract_minutiae_features(img, showResult=True, spuriousMinutiaeThresh=10)
I tried using print() command to see what's inside FeaturesBifurcations and I can't understand what the output means.
print() command on FeaturesBifurcations
What I needed is values that looks like this where first and second column indicates xy coordinates, third column indicates orientations, and fourth column indicate type:
minutiaes bifurcations (marked with 1 in the last column) and terminations (marked with 0 in the last column) values extracted from fingerprint
I tried reading the classes in the library and I figure I could get those values (features locations, orientations, and type is explicitly stated in the library), but i do not know how to extract those values. This is what is inside the fingerprint-feature-extractor library:
import cv2
import numpy as np
import skimage.morphology
from skimage.morphology import convex_hull_image, erosion
from skimage.morphology import square
import math
class MinutiaeFeature(object):
def __init__(self, locX, locY, Orientation, Type):
self.locX = locX;
self.locY = locY;
self.Orientation = Orientation;
self.Type = Type;
class FingerprintFeatureExtractor(object):
def __init__(self):
self._mask = []
self._skel = []
self.minutiaeTerm = []
self.minutiaeBif = []
def __skeletonize(self, img):
img = np.uint8(img > 128)
self._skel = skimage.morphology.skeletonize(img)
self._skel = np.uint8(self._skel) * 255
self._mask = img * 255
def __computeAngle(self, block, minutiaeType):
angle = []
(blkRows, blkCols) = np.shape(block);
CenterX, CenterY = (blkRows - 1) / 2, (blkCols - 1) / 2
if (minutiaeType.lower() == 'termination'):
sumVal = 0;
for i in range(blkRows):
for j in range(blkCols):
if ((i == 0 or i == blkRows - 1 or j == 0 or j == blkCols - 1) and block[i][j] != 0):
angle.append(-math.degrees(math.atan2(i - CenterY, j - CenterX)))
sumVal += 1
if (sumVal > 1):
angle.append(float('nan'))
return (angle)
elif (minutiaeType.lower() == 'bifurcation'):
(blkRows, blkCols) = np.shape(block);
CenterX, CenterY = (blkRows - 1) / 2, (blkCols - 1) / 2
angle = []
sumVal = 0;
for i in range(blkRows):
for j in range(blkCols):
if ((i == 0 or i == blkRows - 1 or j == 0 or j == blkCols - 1) and block[i][j] != 0):
angle.append(-math.degrees(math.atan2(i - CenterY, j - CenterX)))
sumVal += 1
if (sumVal != 3):
angle.append(float('nan'))
return (angle)
def __getTerminationBifurcation(self):
self._skel = self._skel == 255;
(rows, cols) = self._skel.shape;
self.minutiaeTerm = np.zeros(self._skel.shape);
self.minutiaeBif = np.zeros(self._skel.shape);
for i in range(1, rows - 1):
for j in range(1, cols - 1):
if (self._skel[i][j] == 1):
block = self._skel[i - 1:i + 2, j - 1:j + 2];
block_val = np.sum(block);
if (block_val == 2):
self.minutiaeTerm[i, j] = 1;
elif (block_val == 4):
self.minutiaeBif[i, j] = 1;
self._mask = convex_hull_image(self._mask > 0)
self._mask = erosion(self._mask, square(5)) # Structuing element for mask erosion = square(5)
self.minutiaeTerm = np.uint8(self._mask) * self.minutiaeTerm
def __removeSpuriousMinutiae(self, minutiaeList, img, thresh):
img = img * 0;
SpuriousMin = [];
numPoints = len(minutiaeList);
D = np.zeros((numPoints, numPoints))
for i in range(1,numPoints):
for j in range(0, i):
(X1,Y1) = minutiaeList[i]['centroid']
(X2,Y2) = minutiaeList[j]['centroid']
dist = np.sqrt((X2-X1)**2 + (Y2-Y1)**2);
D[i][j] = dist
if(dist < thresh):
SpuriousMin.append(i)
SpuriousMin.append(j)
SpuriousMin = np.unique(SpuriousMin)
for i in range(0,numPoints):
if(not i in SpuriousMin):
(X,Y) = np.int16(minutiaeList[i]['centroid']);
img[X,Y] = 1;
img = np.uint8(img);
return(img)
def __cleanMinutiae(self, img):
self.minutiaeTerm = skimage.measure.label(self.minutiaeTerm, connectivity=2);
RP = skimage.measure.regionprops(self.minutiaeTerm)
self.minutiaeTerm = self.__removeSpuriousMinutiae(RP, np.uint8(img), 10);
def __performFeatureExtraction(self):
FeaturesTerm = []
self.minutiaeTerm = skimage.measure.label(self.minutiaeTerm, connectivity=2);
RP = skimage.measure.regionprops(np.uint8(self.minutiaeTerm))
WindowSize = 2 # --> For Termination, the block size must can be 3x3, or 5x5. Hence the window selected is 1 or 2
FeaturesTerm = []
for num, i in enumerate(RP):
print(num)
(row, col) = np.int16(np.round(i['Centroid']))
block = self._skel[row - WindowSize:row + WindowSize + 1, col - WindowSize:col + WindowSize + 1]
angle = self.__computeAngle(block, 'Termination')
if(len(angle) == 1):
FeaturesTerm.append(MinutiaeFeature(row, col, angle, 'Termination'))
FeaturesBif = []
self.minutiaeBif = skimage.measure.label(self.minutiaeBif, connectivity=2);
RP = skimage.measure.regionprops(np.uint8(self.minutiaeBif))
WindowSize = 1 # --> For Bifurcation, the block size must be 3x3. Hence the window selected is 1
for i in RP:
(row, col) = np.int16(np.round(i['Centroid']))
block = self._skel[row - WindowSize:row + WindowSize + 1, col - WindowSize:col + WindowSize + 1]
angle = self.__computeAngle(block, 'Bifurcation')
if(len(angle) == 3):
FeaturesBif.append(MinutiaeFeature(row, col, angle, 'Bifurcation'))
return (FeaturesTerm, FeaturesBif)
def extractMinutiaeFeatures(self, img):
self.__skeletonize(img)
self.__getTerminationBifurcation()
self.__cleanMinutiae(img)
FeaturesTerm, FeaturesBif = self.__performFeatureExtraction()
return(FeaturesTerm, FeaturesBif)
def showResults(self):
BifLabel = skimage.measure.label(self.minutiaeBif, connectivity=2);
TermLabel = skimage.measure.label(self.minutiaeTerm, connectivity=2);
minutiaeBif = TermLabel * 0;
minutiaeTerm = BifLabel * 0;
(rows, cols) = self._skel.shape
DispImg = np.zeros((rows, cols, 3), np.uint8)
DispImg[:, :, 0] = 255*self._skel;
DispImg[:, :, 1] = 255*self._skel;
DispImg[:, :, 2] = 255*self._skel;
RP = skimage.measure.regionprops(BifLabel)
for idx, i in enumerate(RP):
(row, col) = np.int16(np.round(i['Centroid']))
minutiaeBif[row, col] = 1;
(rr, cc) = skimage.draw.circle_perimeter(row, col, 3);
skimage.draw.set_color(DispImg, (rr, cc), (255, 0, 0));
RP = skimage.measure.regionprops(TermLabel)
for idx, i in enumerate(RP):
(row, col) = np.int16(np.round(i['Centroid']))
minutiaeTerm[row, col] = 1;
(rr, cc) = skimage.draw.circle_perimeter(row, col, 3);
skimage.draw.set_color(DispImg, (rr, cc), (0, 0, 255));
cv2.imshow('a', DispImg);
cv2.waitKey(0)
def extract_minutiae_features(img, showResult=False):
feature_extractor = FingerprintFeatureExtractor()
FeaturesTerm, FeaturesBif = feature_extractor.extractMinutiaeFeatures(img)
if(showResult):
feature_extractor.showResults()
return(FeaturesTerm, FeaturesBif)
Is there a way to extract locations, orientations, and type float (and/or integer) values from this library? or is there a way or library to plot those lists to cartesian or polar coordinates?
I'm trying to create trails and map of trails of all ID like this video: https://www.youtube.com/watch?v=tq0BgncuMhs
So far I haven't been able to, I'm currently using this repo from bendidi https://github.com/bendidi/Tracking-with-darkflow which I modifies to also show the trails.
I did try using cv2.line and extract it from track.to_tlbr() but right now the result look like this:
Here's the code that I modified to get the current result:
darkflow/darkflow/net/yolov2/predict.py
from collections import deque
import numpy as np
import math
import cv2
import os
import json
#from scipy.special import expit
#from utils.box import BoundBox, box_iou, prob_compare
#from utils.box import prob_compare2, box_intersection
from ...utils.box import BoundBox
from ...cython_utils.cy_yolo2_findboxes import box_constructor
ds = True
try :
from deep_sort.application_util import preprocessing as prep
from deep_sort.application_util import visualization
from deep_sort.deep_sort.detection import Detection
except :
ds = False
def expit(x):
return 1. / (1. + np.exp(-x))
def _softmax(x):
e_x = np.exp(x - np.max(x))
out = e_x / e_x.sum()
return out
def findboxes(self, net_out):
# meta
meta = self.meta
boxes = list()
boxes=box_constructor(meta,net_out)
return boxes
def extract_boxes(self,new_im):
cont = []
new_im=new_im.astype(np.uint8)
ret, thresh=cv2.threshold(new_im, 127, 255, 0)
p, contours, hierarchy=cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
cnt=contours[i]
x, y, w, h=cv2.boundingRect(cnt)
if w*h > 30**2 and ((w < new_im.shape[0] and h <= new_im.shape[1]) or (w <= new_im.shape[0] and h < new_im.shape[1])):
if self.FLAGS.tracker == "sort":
cont.append([x, y, x+w, y+h])
else : cont.append([x, y, w, h])
return cont
def postprocess(self,net_out, im,frame_id = 0,csv_file=None,csv=None,mask = None,encoder=None,tracker=None):
"""
Takes net output, draw net_out, save to disk
"""
boxes = self.findboxes(net_out)
# meta
meta = self.meta
nms_max_overlap = 0.1
threshold = meta['thresh']
colors = meta['colors']
labels = meta['labels']
if type(im) is not np.ndarray:
imgcv = cv2.imread(im)
else: imgcv = im
h, w, _ = imgcv.shape
thick = int((h + w) // 300)
resultsForJSON = []
if not self.FLAGS.track :
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if self.FLAGS.json:
resultsForJSON.append({"label": mess, "confidence": float('%.2f' % confidence), "topleft": {"x": left, "y": top}, "bottomright": {"x": right, "y": bot}})
continue
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv,
(left, top), (right, bot),
colors[max_indx], thick)
cv2.putText(imgcv, mess, (left, top - 12),
0, 1e-3 * h, colors[max_indx],thick//3)
else :
if not ds :
print("ERROR : deep sort or sort submodules not found for tracking please run :")
print("\tgit submodule update --init --recursive")
print("ENDING")
exit(1)
detections = []
scores = []
lines = deque(maxlen=64)
for b in boxes:
boxResults = self.process_box(b, h, w, threshold)
if boxResults is None:
continue
left, right, top, bot, mess, max_indx, confidence = boxResults
if mess not in self.FLAGS.trackObj :
continue
if self.FLAGS.tracker == "deep_sort":
detections.append(np.array([left,top,right-left,bot-top]).astype(np.float64))
scores.append(confidence)
elif self.FLAGS.tracker == "sort":
detections.append(np.array([left,top,right,bot]).astype(np.float64))
if len(detections) < 3 and self.FLAGS.BK_MOG:
detections = detections + extract_boxes(self,mask)
detections = np.array(detections)
if detections.shape[0] == 0 :
return imgcv
if self.FLAGS.tracker == "deep_sort":
scores = np.array(scores)
features = encoder(imgcv, detections.copy())
detections = [
Detection(bbox, score, feature) for bbox,score, feature in
zip(detections,scores, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
indices = prep.non_max_suppression(boxes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
tracker.predict()
tracker.update(detections)
trackers = tracker.tracks
elif self.FLAGS.tracker == "sort":
trackers = tracker.update(detections)
for track in trackers:
if self.FLAGS.tracker == "deep_sort":
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
id_num = str(track.track_id)
elif self.FLAGS.tracker == "sort":
bbox = [int(track[0]),int(track[1]),int(track[2]),int(track[3])]
id_num = str(int(track[4]))
if self.FLAGS.csv:
csv.writerow([frame_id,id_num,int(bbox[0]),int(bbox[1]),int(bbox[2])-int(bbox[0]),int(bbox[3])-int(bbox[1])])
csv_file.flush()
if self.FLAGS.display or self.FLAGS.saveVideo:
cv2.rectangle(imgcv, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
(255,255,255), thick//3)
cv2.putText(imgcv, id_num,(int(bbox[0]), int(bbox[1]) - 12),0, 1e-3 * h, (255,255,255),thick//6)
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
return imgcv
Or just the code that I added:
...
lines = deque(maxlen=64)
...
center = (int(bbox[0]) + ((int(bbox[2]) - int(bbox[0])) // 2)), (int(bbox[1]) + ((int(bbox[3]) - int(bbox[1])) // 2)) # X + Width / 2, Y + Height / 2
lines.appendleft(center)
...
for i in range(1, len(lines)):
cv2.line(imgcv, lines[i - 1], lines[i], (255,255,255), thick//3)
Could someone help me out on this? Or should I do something with the data first instead of plug in straight into cv2.line? If you also have any suggestion for using external software rather than using Python, that's also welcome (I have frame_id, track_id, x, y, w, h data)
hi guys I have been working on a small program in python using the opencv library and two webcams so that I can measure the distance between this last tow and the object right in front of them(using the disparity map) ,so when I run the program at the end I normally I get the result in a matrix but what I get is not only one number(which is supposed to be the distance) but many different numbers,besides I always get one number which doesn't change even if I change the view can any body tell me why?!
here is the code:
import numpy as np
import cv2 as cv
import cv2.cv as cv1
from VideoCapture import Device
import os
def caliLeftCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/img*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsL = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgL = cv.imread(fn, 0)
h, w = imgL.shape[:2]
found, corners = cv.findChessboardCorners(imgL, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgL, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(imgL, cv.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsL.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsL, cameraL_matrix, dist_coefsL, rvecsL, tvecsL = cv.calibrateCamera(obj_points, img_pointsL, (w, h))
print "RMSL:", rmsL
print "Left camera matrix:\n", cameraL_matrix
print "distortion coefficients: ", dist_coefsL.ravel()
newcameramtxL, roi=cv.getOptimalNewCameraMatrix(cameraL_matrix,dist_coefsL,(w,h),1,(w,h))
#undistort
mapxL,mapyL = cv.initUndistortRectifyMap(cameraL_matrix,dist_coefsL,None,newcameramtxL,(w,h),5)
dstL = cv.remap(imgL,mapxL,mapyL,cv.INTER_LINEAR)
return img_pointsL, cameraL_matrix, dist_coefsL
def caliRightCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/ph*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsR = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgR = cv.imread(fn, 0)
h, w = imgR.shape[:2]
found, corners = cv.findChessboardCorners(imgR, pattern_size)
if found:
term = ( cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_COUNT, 30, 0.1 )
cv.cornerSubPix(imgR, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsR.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsR, cameraR_matrix, dist_coefsR, rvecsR, tvecsR = cv.calibrateCamera(obj_points, img_pointsR, (w, h))
print "RMSR:", rmsR
print "Right camera matrix:\n", cameraR_matrix
print "distortion coefficients: ", dist_coefsR.ravel()
newcameramtxR, roi=cv.getOptimalNewCameraMatrix(cameraR_matrix,dist_coefsR,(w,h),1,(w,h))
# undistort
mapxR,mapyR = cv.initUndistortRectifyMap(cameraR_matrix,dist_coefsR,None,newcameramtxR,(w,h),5)
dstR = cv.remap(imgR,mapxR,mapyR,cv.INTER_LINEAR)
return img_pointsR,obj_points, cameraR_matrix, dist_coefsR
def Pics():
vc = cv.VideoCapture(2)
retVal, frame = vc.read();
while True :
if frame is not None:
imgray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)
ret,thresh = cv.threshold(imgray,127,255,1)
contours, hierarchy = cv.findContours(thresh,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
cv.namedWindow("threshold")
cv.namedWindow("Camera")
#cv2.drawContours(frame, contours, -1, (0,255,0), 2)
cv.imshow("Camera", frame)
cv.imshow("threshold", thresh)
rval, frame = vc.read()
if cv.waitKey(1) & 0xFF == 27:
break
cv1.DestroyAllWindows()
def LeftCap():
cam = Device(2)
cam.saveSnapshot('imageL.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageL.jpg'
return fn
def RightCap():
cam = Device(0)
cam.saveSnapshot('imageR.jpg')
fn = 'C:\opencv2.4.8\sources\samples\python2\imageR.jpg'
return fn
def Calculate(Li, Ri, Mat):
img_L = cv.pyrDown( cv.imread(Li) )
img_R = cv.pyrDown( cv.imread(Ri) )
window_size = 3
min_disp = 16
num_disp = 112-min_disp
stereo = cv.StereoSGBM(minDisparity = min_disp,
numDisparities = num_disp,
SADWindowSize = window_size,
uniquenessRatio = 10,
speckleWindowSize = 100,
speckleRange = 32,
disp12MaxDiff = 1,
P1 = 8*3*window_size**2,
P2 = 32*3*window_size**2,
fullDP = False
)
print "computing disparity..."
disp = stereo.compute(img_L, img_R).astype(np.float32) / 16.0
print "generating 3d point cloud..."
h, w = img_L.shape[:2]
f = 0.8*w # guess for focal length
points = cv.reprojectImageTo3D(disp, Mat)
colors = cv.cvtColor(img_L, cv.COLOR_BGR2RGB)
mask = disp > disp.min()
cv.imshow('left', img_L)
cv.imshow('disparity', (disp-min_disp)/num_disp)
b=6.50
D=b*f/disp
print "The Distance =", D
cv.waitKey()
cv1.DestroyAllWindows()
if __name__ == '__main__':
import sys, getopt
from glob import glob
Img_pointsL, Cam_MatL, DisL = caliLeftCam()
Img_pointsR,obj_points, Cam_MatR, DisR = caliRightCam()
print "Running stereo calibration..."
retval, Cam_MatL, DisL, Cam_MatR, DisR, R, T, E, F= cv.stereoCalibrate(obj_points, Img_pointsL, Img_pointsR,(384,288))
print"running rectifation..."
RL, Rr, PL, PR, Q, validRoiL, validRoiR = cv.stereoRectify(Cam_MatL, DisL, Cam_MatR, DisR,(384,288), R, T)
Pics()
Li = LeftCap()
Ri = RightCap()
Calculate(Li, Ri, Q)
Not sure if that is a problem, but you call cv.StereoRectify() function using cv2.StereoRectify() not cv2.cv.StefeoRectify() and you provided wrong arguments to it.
From documentation:
cv.StereoRectify(cameraMatrix1, cameraMatrix2, distCoeffs1, distCoeffs2, imageSize, R, T, R1, R2, P1, P2, Q=None, flags=CV_CALIB_ZERO_DISPARITY, alpha=-1, newImageSize=(0, 0))
You did it that(wrong) way:
cv.stereoRectify(cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2...)