OpenCV object detecting and save videofile - python

I want to object detect and save the video, but the video saved only 6kb or 0kb and it can't be play
If there is no this line
x, y, width, height, area = stats[index]
it will be saved
Do you know why And is there a solution?
import cv2
import time
import numpy as np
cap = cv2.VideoCapture("rtsp://admin:admin#128.1.1.110:554")
width = int(cap.get(3))
height = int(cap.get(4))
fcc = cv2.VideoWriter_fourcc(*'XVID')
recording = False
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
while(1):
ret, frame = cap.read()
hms = time.strftime('%H_%M_%S', time.localtime())
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.imshow('frame', frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
path = 'test_' + str(hms) + '.avi'
print('recording start')
writer = cv2.VideoWriter(path, fcc, 30.0, (width, height))
recording = True
if recording:
writer.write(frame)
if k == ord('e'):
print('recording end')
recording = False
writer.release()
cap.release()
cv2.destroyAllWindows()

I think this will solve your problem
# importing the module
import cv2
import numpy as np
# reading the vedio
source = cv2.VideoCapture(0) // add your URL insed of "0"
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(source.get(3))
frame_height = int(source.get(4))
recording = False
fcc = cv2.VideoWriter_fourcc(*'XVID')
size = (frame_width, frame_height)
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
result = cv2.VideoWriter('output.avi', fcc, 30, size)
# running the loop
while True:
# extracting the frames
ret, frame = source.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
# displaying the video
cv2.imshow("Live", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
print('recording start')
recording = True
if recording:
result.write(frame)
if k == ord('e'):
print('recording end')
recording = False
result.release()
# closing the window
cv2.destroyAllWindows()
source.release()
But unfortunately, I can not hms with the output file name.
That can try your self
If helpful this for you give 👍

Actually, you need to delete some codes.
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
# k = cv2.waitKey(1) & 0xff
#if k == ord('r') and recording is False:
# print('recording start')
# recording = True
#if recording:
result.write(frame)
#if k == ord('e'):
# print('recording end')
# recording = False
# result.release()
result.release()
cv2.destroyAllWindows()
cap.release()
it works for me, the reason why it is 6kb is you start write but not append frame to output avi file.

Related

Opencv Rstp Frame Very Slow When I start recording

Before the rstp ip camera I didnt faced with this problem.
Firstly I show downcounting(3-2-1-GO!) to user and than I start recording in only 10 seconds. After that record fnished and show the Fnished. But when I start using RSTP camera, I have time lag in record moments how can I solve this ? But in the webcam I dont have this problem.
Process:
DownCount 3-2-1-GO!
Start Recording in 10 second --I have problem this step, video record have time lag.
Show Fnished Text
This is source code
class MainRecord():
def __init__(self, rfidCode):
TIMER = int(3)
TIMER_RECORD = int(10)
self.rfidCode = rfidCode
print(self.rfidCode)
user_id = str(self.rfidCode) # this id will get from RFID
# user_id = "a1"
# test = 'fourth'q
# cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)
cap = cv2.VideoCapture('rtsp://private:private#192.168.1.64/1')
# segmentor = SelfiSegmentation()
filename = user_id +'.avi' #.avi .mp4
frames_per_seconds = 24 #this is the standart for the movie or films
config = CFEVideoConf(cap, filepath = filename, res='1080p')
out = cv2.VideoWriter(filename, config.video_type, frames_per_seconds,config.dims)
img_path = 'ap_logo.png'
logo = cv2.imread(img_path,-1)
watermark = image_resize(logo, height=60)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
#grayscale watermark
# cv2.imshow('watermark',watermark)
# time.sleep(2)
font = cv2.FONT_HERSHEY_SIMPLEX
ret, readBefore = cap.read()
cv2.imshow('Start', readBefore)
while(True):
prev = time.time()
while TIMER >= 0:
ret, img = cap.read()
print("showing")
# Display countdown on each frame
# specify the font and draw the
# countdown using puttext
if TIMER != 0:
cv2.putText(img, str(TIMER),
(560, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
else:
cv2.putText(img, str("GO!"),
(580, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# current time
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER = TIMER - 1
if TIMER == -1:
cv2.destroyAllWindows()
while TIMER_RECORD >= 0: #problem occurs here !!
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame_h,frame_w, frame_c = frame.shape
#overlay with 4 channel BGR and Alpha
overlay = np.zeros((frame_h, frame_w,4), dtype='uint8')
# overlay[100:250, 100:125] = (255,255,0,1) #B,G,R,A
# overlay[100:250, 150:255] = (0,255,0,1)
watermark_h, watermark_w, watermark_c = watermark.shape
for i in range(0, watermark_h):
for j in range(0, watermark_w):
if watermark[i,j][3] != 0:
h_offset = frame_h - watermark_h
w_offset = frame_w - watermark_w
overlay[h_offset+i,w_offset+j] = watermark[i,j]
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
#Display the resulting frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
cv2.imshow('Frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
out.write(frame) #file a ilgili frame yazılıyor
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER_RECORD = TIMER_RECORD - 1
print(TIMER_RECORD)
if TIMER_RECORD == -1:
cv2.destroyAllWindows()
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
else:
ret, finishImg = cap.read()
cv2.putText(finishImg, str("FINISH !"),
(150, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Finished', finishImg)
# cv2.destroyAllWindows()
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# break
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
#When everything done, relase the capture
cap.release()
out.release()
# saved
cv2.destroyAllWindows()
# self.ui.show()
In the while TIMER_RECORD >= 0: loop showing video is very slow but before and after everything is perfect. How can I solve this problem ?

Python Process share global Numpy Array

Hey, I want to share a global NumpyArray(cameraImg) between my main
programm and the process which runs parallel. But it doesn't work. It says
"TypeError: only size-1 arrays can be converted to Python scalars"
The main Programm displa_camera() runs in a While Loop, and gets the Image of the Webcam. The img is saved in global cameraImg and used in recognize_faces() to identifiy the face depcit in the Image. It saves the name, which is added to the Image, above the Persons head. Hope you can help me, thx
import time, os, cv2 as cv2, face_recognition
from multiprocessing import Process, Array, Value
global name
name=""
window_name = 'facerecognition'
def display_camera():
cap = cv2.VideoCapture(0)
cap.set(3, 640) # set Width
cap.set(4, 480) # set Height
cascade = cv2.CascadeClassifier('cascade.xml')
global cameraImg, name
while True:
ret, img = cap.read()
cameraImg=Array('d',img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20)
)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (36, 255, 12), 1)
cv2.putText(img, name, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36, 255, 12), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('video', img)
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
cap.release()
cv2.destroyAllWindows()
def recognize_Faces():
global cameraImg, name
folder_dir = "../images/"
image_List=[]
for images in os.listdir(folder_dir):
# check if the image ends with png or jpg or jpeg
if (images.endswith(".png") or images.endswith(".jpg") \
or images.endswith(".jpeg")):
# display
image_List.append(images)
while True:
try:
newFaces=False
for path in image_List :
rgb_img = cv2.cvtColor(cameraImg, cv2.COLOR_BGR2RGB)
img_encoding = face_recognition.face_encodings(rgb_img)
img2 = cv2.imread("../images/"+path)
rgb_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img_encoding2 = face_recognition.face_encodings(rgb_img2)[0]
result = face_recognition.compare_faces(img_encoding, img_encoding2)
print("Result: ", result)
if(result[0]):
newFaces=True
name= os.path.basename("../images/"+path).split('.')[0]
except Exception as e:
print(e)
if(newFaces==False):
name=""
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
time.sleep(3)
if __name__ == '__main__':
secondaryThread = Process(target=recognize_Faces)
secondaryThread.start()
display_camera()

I want my Opencv to use threading to increase fps

My opencv code is lagging in raspbpi but in pc its smooth. Can anyone help me make my hard code to a code that uses threading.
from cv2 import cv2
import numpy as np
from pyzbar.pyzbar import decode
import pickle,time
import os
import imutils
import screeninfo
from screeninfo import get_monitors
curr_path = os.getcwd()
#########models##################################################
print("Loading face detection model")
proto_path = os.path.join(curr_path, 'model', 'deploy.prototxt')
model_path = os.path.join(curr_path, 'model', 'res10_300x300_ssd_iter_140000.caffemodel')
face_detector = cv2.dnn.readNetFromCaffe(prototxt=proto_path, caffeModel=model_path)
print("Loading face recognition model")
recognition_model = os.path.join(curr_path, 'model', 'openface_nn4.small2.v1.t7')
face_recognizer = cv2.dnn.readNetFromTorch(model=recognition_model)
################pickles#########################################
recognizer = pickle.loads(open('recognizer.pickle', "rb").read())
le = pickle.loads(open('le.pickle', "rb").read())
print("Starting test video file")
#adjacents########################################################################
no_of_adjacent_prediction=0
no_face_detected=0 #to track the number of times the face is detected
prev_predicted_name='' #to keep track of the previously predicted face(w.r.t frame)
count_frames = total_no_face_detected = 0
#camera#########################################################################
font=cv2.FONT_HERSHEY_SIMPLEX
clr=(255,255,255)
cap = cv2.VideoCapture(0)
time.sleep(2)
profile = None
####TRY_COUNTS###########
MAX_TRY=3
tries=0 #
######flags############
flag = True
flag_face_recognised=False #to keep track if the user face is recognized
flag_face_not_recognised=False
#############FULLSCREEN###############
WINDOW_NAME = "Face-Rcognition and QRCODEQQQQQ"
screenid = 0
while True:
cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
ret, frame = cap.read()
screen = screeninfo.get_monitors()[screenid]
screen_width, screen_height = screen.width,screen.height
frame = cv2.flip(frame, 1)
frame_height, frame_width, _ = frame.shape
scaleWidth = float(screen_width) / float(frame_width)
scaleHeight = float(screen_height) / float(frame_height)
if (flag):
access = open("AccessCodes.txt")
for i in decode(frame):
decoded_data = i.data.decode("utf-8") # converts bytes to string value
print(decoded_data)
# Drawing polygon on frame (tilts w.r.t orientation)
pts = np.array([i.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 0), 1)
# print(pts)
# Display text
rect_pts = i.rect # using rect point as origin for text as we don't want the text to tilt with the qrcode
fontScale = 0.8
thickness = 1
# cv2.putText(frame,decoded_data,(rect_pts[0],rect_pts[1]),cv2.FONT_HERSHEY_SIMPLEX,fontScale,(255,0,0),thickness)
# print(rect_pts)
if decoded_data.lower() in access.read(): # Check private key
flag = False
tries = 0
print("QRCODE is Valid.Proceed to FaceRecog")
time_out_no_of_frames_after_qrcode = 0
else:
# print("INVALID QR CODE")
print("Invalid QRCODE")
if scaleHeight > scaleWidth:
imgScale = scaleWidth
else:
imgScale = scaleHeight
newX, newY = frame.shape[1] * imgScale, frame.shape[0] * imgScale
frame = cv2.resize(frame, (int(newX), int(newY)))
cv2.imshow(WINDOW_NAME, frame)
else:
frame = cv2.resize(frame, (int(newX), int(newY)))
(h, w) = frame.shape[:2]
image_blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)
face_detector.setInput(image_blob)
face_detections = face_detector.forward()
for i in range(0, face_detections.shape[2]):
confidence = face_detections[0, 0, i, 2]
if confidence > 0.90:
box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
face_blob = cv2.dnn.blobFromImage(face, 1.0/255, (96, 96), (0, 0, 0), True, False)
face_recognizer.setInput(face_blob)
vec = face_recognizer.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}".format(name, proba )
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(frame, "Welcome home " + name.replace('_', ' ').title(), (160, 460), font, 0.8, clr,
thickness+3, cv2.LINE_AA)
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 255, 255), 1)
if name == decoded_data.lower():
print("Face is Recognised: "+str(no_of_adjacent_prediction))
no_of_adjacent_prediction += 1
else:
print("Face not Recognised.")
cv2.putText(frame, "Face Not Recognised", (160, 460), font, 0.8, clr, thickness, cv2.LINE_AA)
flag_face_not_recognised = True
no_of_adjacent_prediction = 0
if (no_of_adjacent_prediction > 10): # no_of_adjacent_prediction is only updated when the confidence of classification is >80
flag_face_recognised = True
no_of_adjacent_prediction = 0
no_face_detected = 0
cv2.imshow(WINDOW_NAME, frame)
if (flag_face_recognised): # if face is recognized then open the door
# arduino.write(bytes('o', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is OPEN")
time.sleep(5)
# speak("Closing door")
# arduino.write(bytes('c', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is CLOSED")
flag_face_recognised = False
flag = True # to start from qrcode
if (flag_face_not_recognised):
# speak("Face not recognised. The door will remain closed")
time.sleep(2)
flag_face_not_recognised = False
tries += 1
if (tries >= MAX_TRY):
flag = True # to start from qrcode
tries = 0
if (time_out_no_of_frames_after_qrcode >= 400):
# speak("User authentication failed due to time out")
flag = True # to start from qrcode
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
FPS PC: 20 fps
FPS RASPBERRY PI : 9 fps
i tried various opencv codes and the result is still the same. I found a solution that threading increases the fps of opencv but i do not know how to apply this to my code due the fact that i am a noob python kid. a help would be nice. I want my fps in my raspberry ranging from 15-20 instead of 9 fps.

Shifting from OpenCV GUI to Tkinter Form

I have this code that performs the mouse functions using eyes and other facial gestures with opencv and dlib. I am running this code using a button click from a tkinter window. When this code starts to run, that tkinter window freezes (i.e, I cannot click any other button from that).
Is there a way that I can make the frame used by opencv a Top level, like top level frames in tkinter so that it doesn't freezes any other frames, or how can I replace opencv frame with Tkinter toplevel frame.
P.S: I have been on it for two days, literally tried anything I can find on the internet and can't seem to find a solution.
_, frame = vid.read()
frame = cv2.flip(frame, 1)
frame = imutils.resize(frame, width=cam_w, height=cam_h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
This is the part where changes will be made I guess. Full code is shared below.
from imutils import face_utils
from utils import *
import numpy as np
import pyautogui as pag
import imutils
import dlib
import cv2
# Thresholds and consecutive frame length for triggering the mouse action.
MOUTH_AR_THRESH = 0.3
MOUTH_AR_CONSECUTIVE_FRAMES = 3
EYE_AR_THRESH = 0.20
EYE_AR_CONSECUTIVE_FRAMES = 5
WINK_AR_DIFF_THRESH = 0.001
WINK_AR_CLOSE_THRESH = 0.2
WINK_CONSECUTIVE_FRAMES = 4
# Initialize the frame counters for each action as well as
# booleans used to indicate if action is performed or not
MOUTH_COUNTER = 0
EYE_COUNTER = 0
WINK_COUNTER = 0
INPUT_MODE = False
EYE_CLICK = False
LEFT_WINK = False
RIGHT_WINK = False
SCROLL_MODE = False
ANCHOR_POINT = (0, 0)
WHITE_COLOR = (255, 255, 255)
YELLOW_COLOR = (0, 255, 255)
RED_COLOR = (0, 0, 255)
GREEN_COLOR = (0, 255, 0)
BLUE_COLOR = (255, 0, 0)
BLACK_COLOR = (0, 0, 0)
# Initialize Dlib's face detector (HOG-based) and then create
# the facial landmark predictor
shape_predictor = "model/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
# Grab the indexes of the facial landmarks for the left and
# right eye, nose and mouth respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# Video capture
vid = cv2.VideoCapture(0)
resolution_w = 1366
resolution_h = 768
cam_w = 640
cam_h = 480
unit_w = resolution_w / cam_w
unit_h = resolution_h / cam_h
while True:
# Grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
_, frame = vid.read()
frame = cv2.flip(frame, 1)
frame = imutils.resize(frame, width=cam_w, height=cam_h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the grayscale frame
rects = detector(gray, 0)
# Loop over the face detections
if len(rects) > 0:
rect = rects[0]
else:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
continue
# Determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
mouth = shape[mStart:mEnd]
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
nose = shape[nStart:nEnd]
# Because I flipped the frame, left is right, right is left.
temp = leftEye
leftEye = rightEye
rightEye = temp
# Average the mouth aspect ratio together for both eyes
mar = mouth_aspect_ratio(mouth)
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
diff_ear = np.abs(leftEAR - rightEAR)
nose_point = (nose[3, 0], nose[3, 1])
# Compute the convex hull for the left and right eye, then
# visualize each of the eyes
mouthHull = cv2.convexHull(mouth)
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [mouthHull], -1, YELLOW_COLOR, 1)
cv2.drawContours(frame, [leftEyeHull], -1, YELLOW_COLOR, 1)
cv2.drawContours(frame, [rightEyeHull], -1, YELLOW_COLOR, 1)
for (x, y) in np.concatenate((mouth, leftEye, rightEye), axis=0):
cv2.circle(frame, (x, y), 2, GREEN_COLOR, -1)
# Check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if diff_ear > WINK_AR_DIFF_THRESH:
if leftEAR < rightEAR:
if leftEAR < EYE_AR_THRESH:
WINK_COUNTER += 1
if WINK_COUNTER > WINK_CONSECUTIVE_FRAMES:
pag.click(button='left')
WINK_COUNTER = 0
elif leftEAR > rightEAR:
if rightEAR < EYE_AR_THRESH:
WINK_COUNTER += 1
if WINK_COUNTER > WINK_CONSECUTIVE_FRAMES:
pag.click(button='right')
WINK_COUNTER = 0
else:
WINK_COUNTER = 0
else:
if ear <= EYE_AR_THRESH:
EYE_COUNTER += 1
if EYE_COUNTER > EYE_AR_CONSECUTIVE_FRAMES:
SCROLL_MODE = not SCROLL_MODE
# INPUT_MODE = not INPUT_MODE
EYE_COUNTER = 0
# nose point to draw a bounding box around it
else:
EYE_COUNTER = 0
WINK_COUNTER = 0
if mar > MOUTH_AR_THRESH:
MOUTH_COUNTER += 1
if MOUTH_COUNTER >= MOUTH_AR_CONSECUTIVE_FRAMES:
# if the alarm is not on, turn it on
INPUT_MODE = not INPUT_MODE
# SCROLL_MODE = not SCROLL_MODE
MOUTH_COUNTER = 0
ANCHOR_POINT = nose_point
else:
MOUTH_COUNTER = 0
if INPUT_MODE:
cv2.putText(frame, "READING INPUT!", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
x, y = ANCHOR_POINT
nx, ny = nose_point
w, h = 60, 35
multiple = 1
cv2.rectangle(frame, (x - w, y - h), (x + w, y + h), GREEN_COLOR, 2)
cv2.line(frame, ANCHOR_POINT, nose_point, BLUE_COLOR, 2)
dir = direction(nose_point, ANCHOR_POINT, w, h)
cv2.putText(frame, dir.upper(), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
drag = 18
if dir == 'right':
pag.moveRel(drag, 0)
elif dir == 'left':
pag.moveRel(-drag, 0)
elif dir == 'up':
if SCROLL_MODE:
pag.scroll(40)
else:
pag.moveRel(0, -drag)
elif dir == 'down':
if SCROLL_MODE:
pag.scroll(-40)
else:
pag.moveRel(0, drag)
if SCROLL_MODE:
cv2.putText(frame, 'SCROLL MODE IS ON!', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
# Show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# If the `Esc` key was pressed, break from the loop
if key == 27:
break
# Do a bit of cleanup
cv2.destroyAllWindows()
vid.release()

how to cancel the target object of MultiTracker in opencv-python

I'm using python-opencv to complete my undergraduate graduation project and I need to use MultiTracker to implement multi-target detection and tracking functions. However, I cannot cancel the target object after it disappeared from the screen. I'm not a student major in digital image processing, the problem bothers me a lot. Can anyone help me? The code is as follows:
import sys
import cv2
from random import randint
trackerTypes = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
trackerType = trackerTypes[6] # KCF, MOSSE的效果还能接受,KCF效果最好,MOSSE速度最快
def adjust_frame(frame):
rows, cols, ch = frame.shape
M = cv2.getRotationMatrix2D((cols, rows), 1, 1) # 三个参数分别是旋转中心,旋转角度,比例
frame = cv2.warpAffine(frame, M, (cols, rows))
frame = frame[580:670, 470:1030]
frame = cv2.resize(frame, None, fx=1.5, fy=1.5, interpolation=cv2.INTER_CUBIC)
return frame
def createTrackerByName(trackerType):
# 通过跟踪器的名字创建跟踪器
if trackerType == trackerTypes[0]:
tracker = cv2.TrackerBoosting_create()
elif trackerType == trackerTypes[1]:
tracker = cv2.TrackerMIL_create()
elif trackerType == trackerTypes[2]:
tracker = cv2.TrackerKCF_create()
elif trackerType == trackerTypes[3]:
tracker = cv2.TrackerTLD_create()
elif trackerType == trackerTypes[4]:
tracker = cv2.TrackerMedianFlow_create()
elif trackerType == trackerTypes[5]:
tracker = cv2.TrackerGOTURN_create()
elif trackerType == trackerTypes[6]:
tracker = cv2.TrackerMOSSE_create()
elif trackerType == trackerTypes[7]:
tracker = cv2.TrackerCSRT_create()
else:
tracker = None
print('Incorrect tracker name')
print('Available tracker name')
for t in trackerTypes:
print(t)
return tracker
print('Default tracking algorithm is CSRT \n'
'Available tracking algorithms are:\n')
for t in trackerTypes:
print(t, end=' ')
videoPath = r'E:\python files\vehicle identification\4.MOV' # 设置加载的视频文件路径
cap = cv2.VideoCapture(videoPath) # 创建video capture 来读取视频文件
# 读取第一帧
ret, frame = cap.read()
frame = adjust_frame(frame)
# 如果无法读取视频文件就退出
if not ret:
print('Failed to read video')
sys.exit(1)
# 选择框
bboxes = []
colors = []
# OpenCV的selectROI函数不适用于在Python中选择多个对象
# 所以循环调用此函数,直到完成选择所有对象
while True:
# 在对象上绘制边界框selectROI的默认行为是从fromCenter设置为false时从中心开始绘制框,可以从左上角开始绘制框
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
print("Press q to quit selecting boxes and start tracking")
print("Press any other key to select next object")
k = cv2.waitKey(0)
if k == 113: # q is pressed
break
print('Selected bounding boxes {}'.format(bboxes))
# 初始化MultiTracker
# 有两种方法可以初始化multitracker
# 1. tracker = cv2.MultiTracker(“CSRT”)
# 所有跟踪器都添加到这个多路程序中
# 将使用CSRT算法作为默认值
# 2. tracker = cv2.MultiTracker()
# 未指定默认算法
# 使用跟踪算法初始化MultiTracker
# 指定跟踪器类型
# 创建多跟踪器对象
multiTracker = cv2.MultiTracker_create()
# 初始化多跟踪器
for bbox in bboxes:
multiTracker.add(createTrackerByName(trackerType), frame, bbox)
# 处理视频并跟踪对象
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
frame = adjust_frame(frame)
timer = cv2.getTickCount() # 计时点1
# 获取后续帧中对象的更新位置
ret, boxes = multiTracker.update(frame)
# 绘制跟踪的对象
for i, newbox in enumerate(boxes):
p1 = (int(newbox[0]), int(newbox[1])) # x, y坐标
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer) # 计时点2
cv2.putText(frame, "FPS : " + str(int(fps)), (10, 13), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.putText(frame, trackerType + " Tracker", (10, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (50, 170, 50), 2)
cv2.imshow('MultiTracker', frame)
k = cv2.waitKey(1)
if k == 27:
break
elif k == ord('p'): # 按下p键可以新添加目标
bbox = cv2.selectROI('MultiTracker', frame) # 返回的四个值x, y, w, h
bboxes.append(bbox)
colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
multiTracker.add(createTrackerByName(trackerType), frame, bbox)

Categories