Different results when calculating with floats instead of integers - python

Why does using float instead of int give me different results when all of my inputs are integers?
from deepface import DeepFace
import cv2
import matplotlib.pyplot as plt
import webbrowser
from turtle import *
import turtle
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
pause = 0
def happy():
print("Vani, Gỗ đàn hương, Hoa nhài và Hương thảo")
webbrowser.open("https://www.youtube.com/watch?v=LjhCEhWiKXk&list=PL1VuYyZcPYIJTP3W_x0jq9olXviPQlOe1")
tur = turtle.Screen()
tur.bgcolor("pink")
tur.title("Light")
turtle.done()
def sad():
print("Sả chanh, Cam Bergamot, Húng quế và Nhục đậu khấu")
webbrowser.open("https://www.youtube.com/watch?v=LanCLS_hIo4&list=PLWOvS8Nliu2x6LvCNnRydpT-owR8zxXB7")
tur = turtle.Screen()
tur.bgcolor("yellow")
tur.title("Light")
turtle.done()
def neutral():
print("Gừng, Bưởi và Chanh")
webbrowser.open("https://www.youtube.com/watch?v=ru0K8uYEZWw&list=PLVgakZ6MigxxNhXZae5cALEW588-sMQn6")
tur = turtle.Screen()
tur.bgcolor("orange")
tur.title("Light")
turtle.done()
def angry():
print(" Oải hương, Hoa hồng, Cúc la mã và Ylang-Ylang")
webbrowser.open("https://www.youtube.com/watch?v=qq-RGFyaq0U&list=PLefKpFQ8Pvy5aCLAGHD8Zmzsdljos-t2l")
tur = turtle.Screen()
tur.bgcolor("green")
tur.title("Light")
turtle.done()
while True:
ret, frame = cap.read()
try:
result = DeepFace.analyze(frame, actions=['emotion'])
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.1, 4)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, result['dominant_emotion'], (50, 50), font, 3, (0, 0, 255), 2, cv2.LINE_4)
if result['dominant_emotion'] == "happy" and pause == 0:
happy()
if result['dominant_emotion'] == ["sad","tear"] and pause == 0:
sad()
if result['dominant_emotion'] == "angry" and pause == 0:
angry()
if result['dominant_emotion'] == "neutral" and pause == 0:
neutral()
except:
pass
cv2.imshow('Emotional Expression', frame)
if cv2.waitKey(2) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Related

OpenCV how to detect in specific position

Can i detect already in square fires in specific positions? I already have a code for detect fire and specific position for view but can i change to minimal position?
Also can i run a "main" (function name is "main") function every 5 minute but different times? Now my code:
import cv2
import numpy as np
import math
import time
import asyncio
from asyncio import sleep
yukseklik = int(input("Yukseklik giriniz "))
hiz = input("Hizi giriniz ")
global kez
kez=0
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([0, 80, 20])
high_red = np.array([35, 255, 255])
kernal = np.ones((5, 5), "uint8")
low_red1 = np.array([160, 100, 20])
high_red1 = np.array([190, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (213, 160), (426, 320), (255,255,255), 4)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 300):
x, y, w, h = cv2.boundingRect(contour)
imframe = cv2.rectangle(frame, (x-20, y+20),(x + w, y + h),(255, 255,255),2)
if(x>213 and x<426 and y<320 and y>160):
if(kez == 0):
def main():
g = 9.80
y = 2*(yukseklik-15)
u = float(y)/9.80
x_ = math.sqrt(u)
x_ = x_*float(hiz)
xi = float(yukseklik)*1.73205080756887729352744463415059
print("x= ",str(xi))
print("x'= ", str(x_))
t = (float(xi)-float(x_))/float(hiz)
print("t= ",t)
global kez
kez = kez+1
asyncio.sleep(5)
kez = 0
main()
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
key = cv2.waitKey(1)
if key == 27:
break
Output:

I want my Opencv to use threading to increase fps

My opencv code is lagging in raspbpi but in pc its smooth. Can anyone help me make my hard code to a code that uses threading.
from cv2 import cv2
import numpy as np
from pyzbar.pyzbar import decode
import pickle,time
import os
import imutils
import screeninfo
from screeninfo import get_monitors
curr_path = os.getcwd()
#########models##################################################
print("Loading face detection model")
proto_path = os.path.join(curr_path, 'model', 'deploy.prototxt')
model_path = os.path.join(curr_path, 'model', 'res10_300x300_ssd_iter_140000.caffemodel')
face_detector = cv2.dnn.readNetFromCaffe(prototxt=proto_path, caffeModel=model_path)
print("Loading face recognition model")
recognition_model = os.path.join(curr_path, 'model', 'openface_nn4.small2.v1.t7')
face_recognizer = cv2.dnn.readNetFromTorch(model=recognition_model)
################pickles#########################################
recognizer = pickle.loads(open('recognizer.pickle', "rb").read())
le = pickle.loads(open('le.pickle', "rb").read())
print("Starting test video file")
#adjacents########################################################################
no_of_adjacent_prediction=0
no_face_detected=0 #to track the number of times the face is detected
prev_predicted_name='' #to keep track of the previously predicted face(w.r.t frame)
count_frames = total_no_face_detected = 0
#camera#########################################################################
font=cv2.FONT_HERSHEY_SIMPLEX
clr=(255,255,255)
cap = cv2.VideoCapture(0)
time.sleep(2)
profile = None
####TRY_COUNTS###########
MAX_TRY=3
tries=0 #
######flags############
flag = True
flag_face_recognised=False #to keep track if the user face is recognized
flag_face_not_recognised=False
#############FULLSCREEN###############
WINDOW_NAME = "Face-Rcognition and QRCODEQQQQQ"
screenid = 0
while True:
cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
ret, frame = cap.read()
screen = screeninfo.get_monitors()[screenid]
screen_width, screen_height = screen.width,screen.height
frame = cv2.flip(frame, 1)
frame_height, frame_width, _ = frame.shape
scaleWidth = float(screen_width) / float(frame_width)
scaleHeight = float(screen_height) / float(frame_height)
if (flag):
access = open("AccessCodes.txt")
for i in decode(frame):
decoded_data = i.data.decode("utf-8") # converts bytes to string value
print(decoded_data)
# Drawing polygon on frame (tilts w.r.t orientation)
pts = np.array([i.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 0), 1)
# print(pts)
# Display text
rect_pts = i.rect # using rect point as origin for text as we don't want the text to tilt with the qrcode
fontScale = 0.8
thickness = 1
# cv2.putText(frame,decoded_data,(rect_pts[0],rect_pts[1]),cv2.FONT_HERSHEY_SIMPLEX,fontScale,(255,0,0),thickness)
# print(rect_pts)
if decoded_data.lower() in access.read(): # Check private key
flag = False
tries = 0
print("QRCODE is Valid.Proceed to FaceRecog")
time_out_no_of_frames_after_qrcode = 0
else:
# print("INVALID QR CODE")
print("Invalid QRCODE")
if scaleHeight > scaleWidth:
imgScale = scaleWidth
else:
imgScale = scaleHeight
newX, newY = frame.shape[1] * imgScale, frame.shape[0] * imgScale
frame = cv2.resize(frame, (int(newX), int(newY)))
cv2.imshow(WINDOW_NAME, frame)
else:
frame = cv2.resize(frame, (int(newX), int(newY)))
(h, w) = frame.shape[:2]
image_blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)
face_detector.setInput(image_blob)
face_detections = face_detector.forward()
for i in range(0, face_detections.shape[2]):
confidence = face_detections[0, 0, i, 2]
if confidence > 0.90:
box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
face_blob = cv2.dnn.blobFromImage(face, 1.0/255, (96, 96), (0, 0, 0), True, False)
face_recognizer.setInput(face_blob)
vec = face_recognizer.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}".format(name, proba )
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(frame, "Welcome home " + name.replace('_', ' ').title(), (160, 460), font, 0.8, clr,
thickness+3, cv2.LINE_AA)
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 255, 255), 1)
if name == decoded_data.lower():
print("Face is Recognised: "+str(no_of_adjacent_prediction))
no_of_adjacent_prediction += 1
else:
print("Face not Recognised.")
cv2.putText(frame, "Face Not Recognised", (160, 460), font, 0.8, clr, thickness, cv2.LINE_AA)
flag_face_not_recognised = True
no_of_adjacent_prediction = 0
if (no_of_adjacent_prediction > 10): # no_of_adjacent_prediction is only updated when the confidence of classification is >80
flag_face_recognised = True
no_of_adjacent_prediction = 0
no_face_detected = 0
cv2.imshow(WINDOW_NAME, frame)
if (flag_face_recognised): # if face is recognized then open the door
# arduino.write(bytes('o', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is OPEN")
time.sleep(5)
# speak("Closing door")
# arduino.write(bytes('c', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is CLOSED")
flag_face_recognised = False
flag = True # to start from qrcode
if (flag_face_not_recognised):
# speak("Face not recognised. The door will remain closed")
time.sleep(2)
flag_face_not_recognised = False
tries += 1
if (tries >= MAX_TRY):
flag = True # to start from qrcode
tries = 0
if (time_out_no_of_frames_after_qrcode >= 400):
# speak("User authentication failed due to time out")
flag = True # to start from qrcode
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
FPS PC: 20 fps
FPS RASPBERRY PI : 9 fps
i tried various opencv codes and the result is still the same. I found a solution that threading increases the fps of opencv but i do not know how to apply this to my code due the fact that i am a noob python kid. a help would be nice. I want my fps in my raspberry ranging from 15-20 instead of 9 fps.

Why does program freeze when using time.sleep()

So I am working on a OpenCV project which would detect if a certain hand sign has taken place. That part works.
I also wanted to make it so that it waits for a few seconds so it does not count accidental movements. But when I use time.sleep() or start time end time methods it either freezes my program and always executes the program and stops following the if conditions, or worse it simply doesn't execute the if statement ever.
Here is the code snippet I used. Btw I have already tried time.sleep() It would just freeze the program and always play the clip regardless of whether it followed my if statement or not.
Is there any way to resolve this issue?
import cv2
import time
import os
import HandTrackingModule as htm
from playsound import playsound
wCam, hCam = 1920, 1080
cap = cv2.VideoCapture(2)
cap.set(3, wCam)
cap.set(4, hCam)
frame = cap.read()
pTime = 0
detector = htm.handDetector(detectionCon=1)
Player1 = []
Player2 = []
while 1 > 0:
success, img = cap.read()
img = detector.findHands(img)
lmlist = detector.findPosition(img, handNo=0, draw=False)
if len(lmlist) != 0:
Player1 = []
Player2 = []
if (lmlist[4][1] < lmlist[3][1] and lmlist[8][2] < lmlist[6][2] and lmlist[20][2] < lmlist[18][2] and lmlist[12][2] > lmlist[10][2] and lmlist[16][2] > lmlist[14][2]) == False:
StartTime = time.time()
if lmlist[4][1] < lmlist[3][1] and lmlist[8][2] < lmlist[6][2] and lmlist[20][2] < lmlist[18][2] and lmlist[12][2] > lmlist[10][2] and lmlist[16][2] > lmlist[14][2]:
EndTime = time.time()
Eyetime = EndTime - StartTime
if Eyetime > 5:
Player1.append("hello")
playsound(r'C:\Users\haris\Documents\GitHub\Haz3-jolt\Pong_with_opencv\venv\notw.mp3')
cTime = time.time()
fps = 1 / (cTime-pTime)
pTime = cTime
cv2.putText(img, f'FPS: {int(fps)}',(400,70), cv2.FONT_HERSHEY_COMPLEX, 3, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
I also have a bonus script Import called handtrackingmodule.
import cv2
import mediapipe as mp
import time
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
#print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
# print(id, cx, cy)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
return lmList
def Marks(self,frame):
myHands=[]
frameRGB=cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results=self.hands.process(frameRGB)
if results.multi_hand_landmarks != None:
for handLandMarks in results.multi_hand_landmarks:
myHand=[]
for landMark in handLandMarks.landmark:
myHand.append((int(landMark.x*width), int(landMark.y*height)))
myHands.append(myHand)
return myHands
width=1920
height=1080
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(2)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
GUI (imshow) only works as long as waitKey/pollKey run continuously or frequently.
When you sleep(), you choke the GUI (imshow window). It can't update, or handle any events.
If you need to "sleep", give waitKey a suitable integer argument in milliseconds.
Be aware that waitKey can return before the time is up, e.g. if all windows were closed or if a key was pressed.

OpenCV object detecting and save videofile

I want to object detect and save the video, but the video saved only 6kb or 0kb and it can't be play
If there is no this line
x, y, width, height, area = stats[index]
it will be saved
Do you know why And is there a solution?
import cv2
import time
import numpy as np
cap = cv2.VideoCapture("rtsp://admin:admin#128.1.1.110:554")
width = int(cap.get(3))
height = int(cap.get(4))
fcc = cv2.VideoWriter_fourcc(*'XVID')
recording = False
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
while(1):
ret, frame = cap.read()
hms = time.strftime('%H_%M_%S', time.localtime())
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
cv2.imshow('frame', frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
path = 'test_' + str(hms) + '.avi'
print('recording start')
writer = cv2.VideoWriter(path, fcc, 30.0, (width, height))
recording = True
if recording:
writer.write(frame)
if k == ord('e'):
print('recording end')
recording = False
writer.release()
cap.release()
cv2.destroyAllWindows()
I think this will solve your problem
# importing the module
import cv2
import numpy as np
# reading the vedio
source = cv2.VideoCapture(0) // add your URL insed of "0"
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(source.get(3))
frame_height = int(source.get(4))
recording = False
fcc = cv2.VideoWriter_fourcc(*'XVID')
size = (frame_width, frame_height)
fgbg = cv2.createBackgroundSubtractorMOG2(varThreshold=200, detectShadows=0)
result = cv2.VideoWriter('output.avi', fcc, 30, size)
# running the loop
while True:
# extracting the frames
ret, frame = source.read()
fgmask = fgbg.apply(frame)
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(fgmask)
for index, centroid in enumerate(centroids):
if stats[index][0] == 0 and stats[index][1] == 0:
continue
if np.any(np.isnan(centroid)):
continue
x, y, width, height, area = stats[index]
centerX, centerY = int(centroid[0]), int(centroid[1])
if area > 200:
cv2.circle(frame, (centerX, centerY), 1, (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x + width, y + height), (0, 0, 255))
cv2.putText(frame, str(area), (centerX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255))
# displaying the video
cv2.imshow("Live", frame)
k = cv2.waitKey(1) & 0xff
if k == ord('r') and recording is False:
print('recording start')
recording = True
if recording:
result.write(frame)
if k == ord('e'):
print('recording end')
recording = False
result.release()
# closing the window
cv2.destroyAllWindows()
source.release()
But unfortunately, I can not hms with the output file name.
That can try your self
If helpful this for you give 👍
Actually, you need to delete some codes.
cv2.imshow('MultiTracker', frame)
# quit on ESC button
if cv2.waitKey(1) & 0xFF == 27: # Esc pressed
break
# k = cv2.waitKey(1) & 0xff
#if k == ord('r') and recording is False:
# print('recording start')
# recording = True
#if recording:
result.write(frame)
#if k == ord('e'):
# print('recording end')
# recording = False
# result.release()
result.release()
cv2.destroyAllWindows()
cap.release()
it works for me, the reason why it is 6kb is you start write but not append frame to output avi file.

Human detection is not uniform

I am doing a people counter activity on a live video using Raspberry pi 3 model B with PiCamera.
Following is our code :
# import the necessary packages
from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import RPi.GPIO as GPIO
from threading import Thread
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
# initialize the camera and grab a reference to the raw camera capture
resX = 240
resY = 180
camera = PiCamera()
camera.resolution = (resX,resY)
camera.framerate = 10
rawCapture = PiRGBArray(camera, size=(resX, resY))
# initialize textIn and textOut values
textIn = 0
textOut = 0
print(time.strftime("%H_%M_%S"))
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter(time.strftime("%H_%M_%S")+'.avi',fourcc, 20.0, (resX, resY))
# initialize the HOG descriptor/person detector
hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
detectFlag = 0
detectCounter = [0]
# allow the camera to warmup
time.sleep(0.1)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(16, GPIO.OUT)
# methods for IN and OUT counters
def testIntersectionIn(x, y, z):
if((x >= 75) and (x <= 90) and (x < z[0]) and (z[0]>0)):
print (x,z[0],"IN")
return True
return False
def testIntersectionOut(x, y, z):
if((x >= 75) and (x <= 90) and (x > z[0]) and (z[0]>0)):
print (x,z[0],"OUT")
return True
return False
previousObj = (0,0)
def classfier(testImage,threadNum,capTime, detectCounter):
global textIn, textOut, previousObj
#print(threadNum,capTime)
(rects, weights) = hog.detectMultiScale(testImage, winStride=(8, 8),
padding=(16, 16), scale=1.1)
rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
# draw the final bounding boxes
cv2.line(image, (75,0), (75,192), (250, 0, 1), 2) #blue line
cv2.line(image, (90,0), (90,192), (0, 0, 255), 2)#red line
for (xA, yA, xB, yB) in pick:
print("Image detected")
print ("Previous Coord : ",previousObj)
detectCounter[0] = 0
cv2.rectangle(testImage, (xA, yA), (xB, yB), (0, 255, 0), 2)
rectangleCenterPont = ((xA + xB) /2, (yA + yB) /2 )
cv2.circle(testImage, rectangleCenterPont, 1, (0,0,255), 5)
print (rectangleCenterPont)
if(testIntersectionIn((xA + xB) /2, (yA + yB) /2,previousObj)):
textIn += 1
#print testIntersectionIn((x + x + w) / 2, (y + y + h) / 2)
if(testIntersectionOut((xA + xB) /2, (yA + yB) /2,previousObj)):
textOut += 1
#print textOut
previousObj = rectangleCenterPont
cv2.putText(image, "In: {}".format(str(textIn)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(image, "Out: {}".format(str(textOut)), (10, 70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# print(pick,"\n");
curTime = time.time()
#print ("Total time from capture", curTime - capTime)
out.write(testImage)
cv2.imshow("After NMS", testImage)
# capture frames from the camera
i = 0
frameCount = 0
prevTime = time.time()
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
if (detectCounter[0] < 10):
GPIO.output(16, GPIO.LOW)
print ("Waiting ", detectCounter[0])
detectCounter[0] += 1
else:
GPIO.output(16,GPIO.HIGH)
image = frame.array
captureTime = time.time()
# print("FRAME Time", captureTime-prevTime)
prevTime = captureTime
# if frameCount == 0:
# frameCount = 0
#if i == 0:
t1 = Thread(target = classfier, args = (image,i,captureTime,detectCounter))
t1.start()
threadPick = t1.join()
# cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
cleanup_stop_thread();
sys.exit()
t1.stop()
break
In the above code
We draw two lines on the frame
Finding the center point of rectangle
When that center point falls between these two lines we increase our
counter
IN and OUT counter logic is based on the values from these two lines
The Problem I face here is
The rectangle over the human is not on it uniformly. It disappears and appearing again at random points in the frame.
For e.g, When the object comes close to the first line the rectangle disappears and comes again when he goes out of the line. So here the center point of rectangle will not fall between these lines so our counter will not increase for this case.
In some cases the detection is happening late. Not once the person comes into the frame.
Please suggest me any ways to avoid this and maintain that rectangle all the time when any person is in the frame.

Categories