Apply CRST object tracking for 2 cameras with multithread - python

I am trying to apply CRST object tracking with 2 cameras by multithread.
2 cameras will be directly connected in my laptop USB ports separately.
And 2 cameras will be tracking a object in real time.
The first function which is "object_tracking_1" and second one is "object_tracking_2".
And I try to use it with multithread.
If I execute one of any cameras, it works very well.
(The camID : 0(labtop webcam) , 1(camera1) , 2(camera2) )
My problem:
My laptop webcam + one of the connected camera
It can be executed.
But when one is executed with proper object tracking ,
the other one is not working.
I mean I see the window plot to choose box area, but i cannot apply the object tracking.
In other words, both can be executed but one is not working for the object tracking.
2 cameras (main goal)
When I executed it , it is kind of working but the plotted window shows 2 camera's views very fast and alternatively.
And i got this error message:
Exception in thread Thread-8:
Traceback (most recent call last):
File "C:\ProgramData\Anaconda3\lib\threading.py", line 973, in _bootstrap_inner
self.run()
File "C:\ProgramData\Anaconda3\lib\threading.py", line 910, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\user\AppData\Local\Temp/ipykernel_18232/2959108515.py", line 59, in object_tracking_1
cv2.error: OpenCV(3.4.11) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-neg5amx3\opencv\modules\core\src\dxt.cpp:3335: error: (-215:Assertion failed) type == CV_32FC1 || type == CV_32FC2 || type == CV_64FC1 || type == CV_64FC2 in function 'cv::dft'
On the internet, it says that it is the problem with bus and address with USB, but if i use it one by one, it works perfectly.
Code :
```
import cv2
import sys
import threading
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
def object_tracking_1():
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[7]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture(1) ######################################################
#video = cv2.VideoCapture("video15.mp4")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1])) # bbox[0]=x value, bbox[1]=y value
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) # x2, y2
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
#print(bbox)
List_x1 = (bbox[0]+bbox[2])/2
List_y1 = (bbox[1]+bbox[3])/2
RList_x1 = round(List_x1, 2)
RList_y1 = round(List_y1, 2)
Coordinate_x1 = str(RList_x1)
Coordinate_y1 = str(RList_y1)
cv2.putText(frame, "x Coordinate : "+ Coordinate_x1 , (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
cv2.putText(frame, "y Coordinate : "+ Coordinate_y1 , (100,105), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# print('x1=' , RList_x1 , 'y2=' , RList_y1 )
# X = list(map(str,bbox))
# print(X)
# cv2.putText(frame, ", ".join(X) , (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
def object_tracking_2():
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[7]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture(2) ##################################################
#video = cv2.VideoCapture("video15.mp4")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1])) # bbox[0]=x value, bbox[1]=y value
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])) # x2, y2
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
#print(bbox)
List_x2 = (bbox[0]+bbox[2])/2
List_y2 = (bbox[1]+bbox[3])/2
RList_x2 = round(List_x2, 2)
RList_y2 = round(List_y2, 2)
Coordinate_x2 = str(RList_x2)
Coordinate_y2 = str(RList_y2)
cv2.putText(frame, "x Coordinate : "+ Coordinate_x2 , (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
cv2.putText(frame, "y Coordinate : "+ Coordinate_y2 , (100,105), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# print('x1=' , RList_x1 , 'y2=' , RList_y1 )
# X = list(map(str,bbox))
# print(X)
# cv2.putText(frame, ", ".join(X) , (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
t1 = threading.Thread(target=object_tracking_1)
t2 = threading.Thread(target=object_tracking_2)
t1.start()
t2.start()
# t1.join()
# t2.join()
```
My main goal is to apply CRST algorithms for each connected camera.
Thanks a lot.

Related

Opencv Rstp Frame Very Slow When I start recording

Before the rstp ip camera I didnt faced with this problem.
Firstly I show downcounting(3-2-1-GO!) to user and than I start recording in only 10 seconds. After that record fnished and show the Fnished. But when I start using RSTP camera, I have time lag in record moments how can I solve this ? But in the webcam I dont have this problem.
Process:
DownCount 3-2-1-GO!
Start Recording in 10 second --I have problem this step, video record have time lag.
Show Fnished Text
This is source code
class MainRecord():
def __init__(self, rfidCode):
TIMER = int(3)
TIMER_RECORD = int(10)
self.rfidCode = rfidCode
print(self.rfidCode)
user_id = str(self.rfidCode) # this id will get from RFID
# user_id = "a1"
# test = 'fourth'q
# cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)
cap = cv2.VideoCapture('rtsp://private:private#192.168.1.64/1')
# segmentor = SelfiSegmentation()
filename = user_id +'.avi' #.avi .mp4
frames_per_seconds = 24 #this is the standart for the movie or films
config = CFEVideoConf(cap, filepath = filename, res='1080p')
out = cv2.VideoWriter(filename, config.video_type, frames_per_seconds,config.dims)
img_path = 'ap_logo.png'
logo = cv2.imread(img_path,-1)
watermark = image_resize(logo, height=60)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
#grayscale watermark
# cv2.imshow('watermark',watermark)
# time.sleep(2)
font = cv2.FONT_HERSHEY_SIMPLEX
ret, readBefore = cap.read()
cv2.imshow('Start', readBefore)
while(True):
prev = time.time()
while TIMER >= 0:
ret, img = cap.read()
print("showing")
# Display countdown on each frame
# specify the font and draw the
# countdown using puttext
if TIMER != 0:
cv2.putText(img, str(TIMER),
(560, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
else:
cv2.putText(img, str("GO!"),
(580, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# current time
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER = TIMER - 1
if TIMER == -1:
cv2.destroyAllWindows()
while TIMER_RECORD >= 0: #problem occurs here !!
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame_h,frame_w, frame_c = frame.shape
#overlay with 4 channel BGR and Alpha
overlay = np.zeros((frame_h, frame_w,4), dtype='uint8')
# overlay[100:250, 100:125] = (255,255,0,1) #B,G,R,A
# overlay[100:250, 150:255] = (0,255,0,1)
watermark_h, watermark_w, watermark_c = watermark.shape
for i in range(0, watermark_h):
for j in range(0, watermark_w):
if watermark[i,j][3] != 0:
h_offset = frame_h - watermark_h
w_offset = frame_w - watermark_w
overlay[h_offset+i,w_offset+j] = watermark[i,j]
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
#Display the resulting frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
cv2.imshow('Frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
out.write(frame) #file a ilgili frame yazılıyor
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER_RECORD = TIMER_RECORD - 1
print(TIMER_RECORD)
if TIMER_RECORD == -1:
cv2.destroyAllWindows()
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
else:
ret, finishImg = cap.read()
cv2.putText(finishImg, str("FINISH !"),
(150, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Finished', finishImg)
# cv2.destroyAllWindows()
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# break
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
#When everything done, relase the capture
cap.release()
out.release()
# saved
cv2.destroyAllWindows()
# self.ui.show()
In the while TIMER_RECORD >= 0: loop showing video is very slow but before and after everything is perfect. How can I solve this problem ?

Show the center coordinate of the object tracking bounding box

I would like to know how I can print the tracking coordinate rectangle onto the video. I want the coordinate of the center point of the rectangle and, of course, I want: if the tracker is moving, the coordinate is also updated.
I tried to use:
cv2.putText(frame, **??** , (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
somewhat like this inside the loop, but I cannot figure it out.
import cv2
import sys
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[2]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture('video.mp4')
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
You can obtain the center of rectangle by dividing across the length and breadth of the rectangle obtained from variables p1 and p2.
Coordinate: p1 --> (x1, y1)
Coordinate: p2 --> (x2, y2)
To find the center of the rectangle divide (x1+x2)/2 and (y1+y2)/2
rectangle_center = (int(p1[0] + p2[0])/2, int(p1[1] + p2[1])/2)
Within cv2.putText() place the above variable as a string --> str(rectangle_center)

Real time object tracking - how to let the video play in the beginning, let the user pause it, draw the bounding box, and then begin the tracking?

Following is a code I found here for real time object tracking:
import cv2
import sys
major_ver, minor_ver, subminor_ver = cv2.__version__.split('.')
if __name__ == '__main__' :
# Set up tracker.
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[1]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture(0) # 0 means webcam. Otherwise if you want to use a video file, replace 0 with "video_file.MOV")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
It works fine, but when you run the program, it pauses at the first frame until you draw the bounding box for the tracking to start.
I want to let the video play first, then the user should be able to pause it, draw the bounding box and then resume the video for the tracking to begin. For that, I replaced the following:
# Read video
video = cv2.VideoCapture(0) # 0 means webcam. Otherwise if you want to use a video file, replace 0 with "video_file.MOV")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
with this:
while True:
# Read video
video = cv2.VideoCapture(0) # 0 means webcam. Otherwise if you want to use a video file, replace 0 with "video_file.MOV")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Retrieve an image and Display it.
if(0xFF & cv2.waitKey(10))==ord('p'): # Press key `p` to pause the video to start tracking
break
cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
cv2.imshow("Image", frame)
cv2.destroyWindow("Image")
When I run the program now, the video starts to play in the beginning, but as soon as I press p, Python crashes:
How do I fix this?
EDIT: Pasting the entire code with the replacement/substitution here for clarity:
import cv2
import sys
major_ver, minor_ver, subminor_ver = cv2.__version__.split('.')
if __name__ == '__main__' :
# Set up tracker.
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[1]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
while True:
# Read video
video = cv2.VideoCapture(0) # 0 means webcam. Otherwise if you want to use a video file, replace 0 with "video_file.MOV")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Retrieve an image and Display it.
if((0xFF & cv2.waitKey(10))==ord('p')): # Press key `p` to pause the video to start tracking
break
cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
cv2.imshow("Image", frame)
cv2.destroyWindow("Image")
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
your if statement has the parenthesis in the wrong place
if(0xFF & cv2.waitKey(10))==ord('p'):
should be
if ((0xFF & cv2.waitKey(10)) == ord('p')):
Though if you're using ord() you can do a direct comparison
if (cv2.waitKey(10) == ord('p')):
Here's your code with a few edits. It works for tracking my face.
import cv2
import sys
major_ver, minor_ver, subminor_ver = cv2.__version__.split('.')
if __name__ == '__main__' :
# Set up tracker.
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
tracker_type = tracker_types[1]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
if tracker_type == 'MOSSE':
tracker = cv2.TrackerMOSSE_create()
if tracker_type == "CSRT":
tracker = cv2.TrackerCSRT_create()
# Read video
video = cv2.VideoCapture(0) # 0 means webcam. Otherwise if you want to use a video file, replace 0 with "video_file.MOV")
# Exit if video not opened.
if not video.isOpened():
print ("Could not open video")
sys.exit()
while True:
# Read first frame.
ok, frame = video.read()
if not ok:
print ('Cannot read video file')
sys.exit()
# Retrieve an image and Display it.
if((0xFF & cv2.waitKey(10))==ord('p')): # Press key `p` to pause the video to start tracking
break
cv2.namedWindow("Image", cv2.WINDOW_NORMAL)
cv2.imshow("Image", frame)
cv2.destroyWindow("Image");
# select the bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = video.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
I moved the videocapture declaration outside of the loop so that it's not getting rebuilt every loop iteration. I added back in the ROI selector and tracker initialization inbetween the first and second loops.

OpenCV object detecting and save videofile

I want to object detect and save the video, but the video saved only 6kb or 0kb and it can't be play
If there is no this line
x, y, width, height, area = stats[index]
it will be saved
Do you know why And is there a solution?
import cv2
import time
import numpy as np
cap = cv2.VideoCapture("rtsp://admin:admin#128.1.1.110:554")
width = int(cap.get(3))
height = int(cap.get(4))
fcc = cv2.VideoWriter_fourcc(*'XVID')
recording = False
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
while(1):
ret, frame = cap.read()
hms = time.strftime('%H_%M_%S', time.localtime())
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.imshow('frame', frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
path = 'test_' + str(hms) + '.avi'
print('recording start')
writer = cv2.VideoWriter(path, fcc, 30.0, (width, height))
recording = True
if recording:
writer.write(frame)
if k == ord('e'):
print('recording end')
recording = False
writer.release()
cap.release()
cv2.destroyAllWindows()
I think this will solve your problem
# importing the module
import cv2
import numpy as np
# reading the vedio
source = cv2.VideoCapture(0) // add your URL insed of "0"
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(source.get(3))
frame_height = int(source.get(4))
recording = False
fcc = cv2.VideoWriter_fourcc(*'XVID')
size = (frame_width, frame_height)
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
result = cv2.VideoWriter('output.avi', fcc, 30, size)
# running the loop
while True:
# extracting the frames
ret, frame = source.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
# displaying the video
cv2.imshow("Live", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
print('recording start')
recording = True
if recording:
result.write(frame)
if k == ord('e'):
print('recording end')
recording = False
result.release()
# closing the window
cv2.destroyAllWindows()
source.release()
But unfortunately, I can not hms with the output file name.
That can try your self
If helpful this for you give 👍
Actually, you need to delete some codes.
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
# k = cv2.waitKey(1) & 0xff
#if k == ord('r') and recording is False:
# print('recording start')
# recording = True
#if recording:
result.write(frame)
#if k == ord('e'):
# print('recording end')
# recording = False
# result.release()
result.release()
cv2.destroyAllWindows()
cap.release()
it works for me, the reason why it is 6kb is you start write but not append frame to output avi file.

how to cancel the target object of MultiTracker in opencv-python

I'm using python-opencv to complete my undergraduate graduation project and I need to use MultiTracker to implement multi-target detection and tracking functions. However, I cannot cancel the target object after it disappeared from the screen. I'm not a student major in digital image processing, the problem bothers me a lot. Can anyone help me? The code is as follows:
import sys
import cv2
from random import randint
trackerTypes = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
trackerType = trackerTypes[6] # KCF, MOSSE的效果还能接受,KCF效果最好,MOSSE速度最快
def adjust_frame(frame):
rows, cols, ch = frame.shape
M = cv2.getRotationMatrix2D((cols, rows), 1, 1) # 三个参数分别是旋转中心,旋转角度,比例
frame = cv2.warpAffine(frame, M, (cols, rows))
frame = frame[580:670, 470:1030]
frame = cv2.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
return frame
def createTrackerByName(trackerType):
# 通过跟踪器的名字创建跟踪器
if trackerType == trackerTypes[0]:
tracker = cv2.TrackerBoosting_create()
elif trackerType == trackerTypes[1]:
tracker = cv2.TrackerMIL_create()
elif trackerType == trackerTypes[2]:
tracker = cv2.TrackerKCF_create()
elif trackerType == trackerTypes[3]:
tracker = cv2.TrackerTLD_create()
elif trackerType == trackerTypes[4]:
tracker = cv2.TrackerMedianFlow_create()
elif trackerType == trackerTypes[5]:
tracker = cv2.TrackerGOTURN_create()
elif trackerType == trackerTypes[6]:
tracker = cv2.TrackerMOSSE_create()
elif trackerType == trackerTypes[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('Incorrect tracker name')
print('Available tracker name')
for t in trackerTypes:
print(t)
return tracker
print('Default tracking algorithm is CSRT \n'
'Available tracking algorithms are:\n')
for t in trackerTypes:
print(t, end=' ')
videoPath = r'E:\python files\vehicle identification\4.MOV' # 设置加载的视频文件路径
cap = cv2.VideoCapture(videoPath) # 创建video capture 来读取视频文件
# 读取第一帧
ret, frame = cap.read()
frame = adjust_frame(frame)
# 如果无法读取视频文件就退出
if not ret:
print('Failed to read video')
sys.exit(1)
# 选择框
bboxes = []
colors = []
# OpenCV的selectROI函数不适用于在Python中选择多个对象
# 所以循环调用此函数,直到完成选择所有对象
while True:
# 在对象上绘制边界框selectROI的默认行为是从fromCenter设置为false时从中心开始绘制框,可以从左上角开始绘制框
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
print("Press q to quit selecting boxes and start tracking")
print("Press any other key to select next object")
k = cv2.waitKey(0)
if k == 113: # q is pressed
break
print('Selected bounding boxes {}'.format(bboxes))
# 初始化MultiTracker
# 有两种方法可以初始化multitracker
# 1. tracker = cv2.MultiTracker(“CSRT”)
# 所有跟踪器都添加到这个多路程序中
# 将使用CSRT算法作为默认值
# 2. tracker = cv2.MultiTracker()
# 未指定默认算法
# 使用跟踪算法初始化MultiTracker
# 指定跟踪器类型
# 创建多跟踪器对象
multiTracker = cv2.MultiTracker_create()
# 初始化多跟踪器
for bbox in bboxes:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
# 处理视频并跟踪对象
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = adjust_frame(frame)
timer = cv2.getTickCount() # 计时点1
# 获取后续帧中对象的更新位置
ret, boxes = multiTracker.update(frame)
# 绘制跟踪的对象
for i, newbox in enumerate(boxes):
p1 = (int(newbox[0]), int(newbox[1])) # x, y坐标
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer) # 计时点2
cv2.putText(frame, "FPS : " + str(int(fps)), (10, 13), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.putText(frame, trackerType + " Tracker", (10, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.imshow('MultiTracker', frame)
k = cv2.waitKey(1)
if k == 27:
break
elif k == ord('p'): # 按下p键可以新添加目标
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
multiTracker.add(createTrackerByName(trackerType), frame, bbox)

Categories