Opencv webcam camera check variable status after a time period - python

I writing a code to detect human fist inside a box from the webcam. What i want is after checking there is a fist inside, the program will wait for 2 seconds. After 2 seconds, if there is still a fist inside, then it will put a text "FIST"
Here is my code :
import cv2
import numpy as np
from detect import detect_fist
import time
cap = cv2.VideoCapture(0)
isFist = 0
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img, (50, 50), (150, 150), (0, 255, 0), 0)
confirm = img[50:150, 50:150] # narrow the whole webcam to a box
isFist = detect_fist(confirm) # a function to detect fist inside that box
if isFist:
timeout = int(time.time()) + 2
while 1:
if int(time.time()) == int(timeout):
cv2.putText(img, "Fist", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
break
cv2.imshow('Gesture', img)
k = cv2.waitKey(10)
if k == 27:
break
my problem is after it detect my fist in the box ,the camera display turn slowly , almost freeze. What i want is the camera still working normally and will check the fist again after 2 secs.
I suggest this is due to the checking of condition if int(time.time()) == int(timeout) will be executed following the frame rate of the webcam ( which is really fast ) there for the display part cannot follow.
Anyone have any idea to archive this ?

When a fist is detected, your code is stuck in the while loop, which is why you see the image window as being 'stuck'. I recommend using a bool to keep track of the status. I've modified your code to show the effect, although I have not debugged this.
import cv2
import numpy as np
from detect import detect_fist
import time
cap = cv2.VideoCapture(0)
isFist = 0
current_time = 0
target_time = 0
first_fist_detection = False
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img, (50, 50), (150, 150), (0, 255, 0), 0)
confirm = img[50:150, 50:150] # narrow the whole webcam to a box
isFist = detect_fist(confirm) # a function to detect fist inside that box
current_time = int(time.time())
if isFist and not first_fist_detection:
first_fist_detection = True
target_time = current_time + 2
elif isFist and first_fist_detection:
if current_time > target_time:
cv2.putText(img, "Fist", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
#that is, no fist is detected
first_fist_detection = False
cv2.imshow('Gesture', img)
k = cv2.waitKey(10)
if k == 27:
break

Related

python Real time YOLOv4 detection from desktop screen

There is a simple code for using Yolo to display a video or camera from a PC:
import cv2
import time
Conf_threshold = 0.4
NMS_threshold = 0.4
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
class_name = []
with open('Resources\coco.names.txt', 'r') as f:
class_name = [cname.strip() for cname in f.readlines()]
# print(class_name)
net = cv2.dnn.readNet('Resources\yolov4-tiny.weights', 'Resources\yolov4-tiny.cfg')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
cap = cv2.VideoCapture('test.mp4')
starting_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
frame_counter += 1
if ret == False:
break
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
for (classid, score, box) in zip(classes, scores, boxes):
color = COLORS[int(classid) % len(COLORS)]
label = "%s : %f" % (class_name[classid], score)
cv2.rectangle(frame, box, color, 1)
cv2.putText(frame, label, (box[0], box[1]-10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)
endingTime = time.time() - starting_time
fps = frame_counter/endingTime
# print(fps)
cv2.putText(frame, f'FPS: {fps}', (5, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
But I need a screen capture instead of test.mp4 video. To create screenshots from the screen, this code was used:
import numpy as np
import cv2
from mss import mss
def screen_record_efficiency():
bbox = {'top': 0, 'left': 0, 'width': 800, 'height': 600}
sct = mss()
font = cv2.FONT_HERSHEY_SIMPLEX
while 1:
# grab image
sct_img = np.array(sct.grab(bbox))
# display image
cv2.imshow('screen', sct_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can’t understand how it is possible to load an array with images from the second code instead of a video file and how to combine it in general. Due to poor knowledge, Python i can’t solve this problem in any way, and I didn’t find a video or article on capturing the desktop in real time
If you pass an array with pictures directly, by type frame=sct_img, then it displays an error that 4 arguments are passed instead of 3 (or something similar).

how to speed up video capture from webcam in opencv?

I need real time video capture i've tried reducing resolution, setting a static fps and none worked why am i getting a slow video feed although it says my fps is 30 i don't really know where exactly is the problem it's really diving me mad.
Code:
import cv2
import os
import face_recognition
import pickle
from cv2.cv2 import CAP_DSHOW
known_faces_dir = "known_faces"
video = cv2.VideoCapture(0)
accuracy = 0.6
frame_thikness = 3
font_size = 2
MODEL = "cnn"
print("loading known faces")
known_faces = []
known_names = []
unknown_faces = []
for name in os.listdir(known_faces_dir):
for filename in os.listdir(f"{known_faces_dir}/{name}"):
image = face_recognition.load_image_file(f"{known_faces_dir}/{name}/{filename}")
encodings = face_recognition.face_encodings(image)[0]
# encodings = pickle.load(open(f"{name}/{filename}","rb"))
known_faces.append(encodings)
known_names.append(name)
print("treating unknow faces")
while True :
# print(filename)
# image = face_recognition.load_image_file(f"{unknown_faces_dir}/{filename}")
ret, image = video.read()
print(video.get(cv2.CAP_PROP_FPS))
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for face_location, face_encodings in zip(locations, encodings):
results = face_recognition.compare_faces(known_faces, face_encodings, tolerance=0.54)
if True in results:
match = known_names[results.index(True)]
print("Face Found" f"{match}")
top_l = (face_location[3], face_location[0])
bottom_r = (face_location[1], face_location[2])
color = [0, 255, 0]
cv2.rectangle(image, top_l, bottom_r, color, frame_thikness)
top_l = (face_location[3], face_location[2])
bottom_r = (face_location[1], face_location[2] + 22)
cv2.rectangle(image, top_l, bottom_r, color, cv2.FILLED)
cv2.putText(image, str(match), (face_location[3]+10, face_location[2]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 2)
cv2.imshow("", image)
if cv2.waitKey(1)&0xFF == ord("e"):
break
# cv2.waitKey(10200)
video.release()
cv2.destroyWindow(filename)
Try this and look at elapsed time to compute fps.
Then add other process.
Work with gray images from the beginning is a good idea.
if faces images are in color save it in gray and use only gray images.
Avoid the process repeats same things if not necessary.
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
start_time = time.time()
end_time = start_time
elapsed_time = 0
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
while(True):
start_time = time.time()
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.putText(gray, str(1 / elapsed_time) + "fps", org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end_time = time.time()
elapsed_time = end_time - start_time
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
It is highly likely that the inferencing of your model is taking more than 33ms (1000ms / 30 FPS) and thus limiting your FPS. Try to remove your face recognition model from the loop and see if it still is slow.
If that solves your problem, your CPU or GPU is the limiting factor depending on how you run the model.

Augmented Reality line that moves with an image in OpenCV

I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)

Raspberry Pi: How can I import libraries inside the cv environment?

I've been following an opencv tutorial from Sir Adrian Rosebrock for a home surveillance system. This is working. I also have an analog sensor that is using an analog to digital converter which is ADS1115. This is also working.
the problem is once I insert the ADS library inside the surveillance code I get an error.
Error:
Traceback (most recent call last):
File "ss_security.py", line 17, in <module>
import Adafruit_ADS1x15
ImportError: No module named Adafruit_ADS1x15
This is the code:
# import the necessary packages
from pyimagesearch.tempimage import TempImage
from picamera.array import PiRGBArray
from picamera import PiCamera
from imutils.video import VideoStream
import warnings
import dropbox
import json
import datetime
import argparse
import imutils
import time
import cv2
import math
# Import the ADS1x15 module.
import Adafruit_ADS1x15
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None
# check to see if the Dropbox should be used
if conf["use_dropbox"]:
# connect to dropbox and start the session authorization process
client = dropbox.Dropbox(conf["dropbox_access_token"])
print("[SUCCESS] dropbox account linked")
# initialize the video stream and allow the cammera sensor to warmup
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
vs.resolution = tuple(conf["resolution"])
vs.framerate = conf["fps"]
rawCapture = PiRGBArray(vs, size=tuple(conf["resolution"]))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=500)
text = "Unoccupied"
# draw the timestamp on the frame
timestamp = datetime.datetime.now()
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# convert it to grayscale, and blur it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print("[INFO] starting background model...")
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the difference between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < conf["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# display temp
cv2.putText(frame, "Temp: 30 C".format(text), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# check to see if the room is occupied
if text == "Occupied":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
# increment the motion counter
motionCounter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= conf["min_motion_frames"]:
# check to see if dropbox sohuld be used
if conf["use_dropbox"]:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
# upload the image to Dropbox and cleanup the tempory image
print("[UPLOAD] {}".format(ts))
path = "/{base_path}/{timestamp}.jpg".format(
base_path=conf["dropbox_base_path"], timestamp=ts)
client.files_upload(open(t.path, "rb").read(), path)
t.cleanup()
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occupied
else:
motionCounter = 0
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the security feed
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
I have just inserted the library. I w\haven't done any computations/readings. As you can see I placed a default temp value.
The library is located inside pi/Adafruit_Python_ADS1x15
Steps taken:
Installed the ADS library inside the cv environment (Error: destination path already exist)
Tried from Adafruit_Python_ADS1x15 import Adafruit_ADS1x15 (Error: No module named Adafruit_Python_ADS1x15)

OpenCV Web Cam Video Capture issue

I am new to OpenCV and Python as well.
I am getting some errors in my code which I have tried my best but .....I hope anyone would help me out. I want that when I run the code it should capture 10-15 frames per second...below is the error I am getting...could anyone help me with the changes
Code...
import cv2
import time
import sys
import numpy as np
if __name__ == '__main__':
faceCascade = cv2.CascadeClassifier('C:\\Users\\Mudit\\Desktop\\Thesis\\CNN-master\\haarcascade_frontalface_default')
faceNeighborsMax = 10
neighborStep = 1
# Start default camera
video = cv2.VideoCapture(0);
# Number of frames to capture
num_frames = 10;
print("Capturing {0} frames".format(num_frames))
# Start time
start = time.time()
# Grab a few frames
for i in range(0, num_frames):
ret, frame = video.read()
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for neigh in range(1, faceNeighborsMax, neighborStep):
faces = faceCascade.detectMultiScale(frameGray, 1.2, neigh)
frameClone = np.copy(frame)
# Display the image
for (x, y, w, h) in faces:
cv2.rectangle(frameClone, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(frameClone, "# Neighbors = {}".format(neigh), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.imshow('Face Detection', frameClone)
if cv2.waitKey(500) & 0xFF == 27:
cv2.destroyAllWindows()
sys.exit()
# End time
end = time.time()
# Time elapsed
seconds = end - start
print("Time taken : {0} seconds".format(seconds))
# Calculate frames per second
fps = num_frames / seconds;
print("Estimated frames per second : {0}".format(fps))
# Release video
video.release()
Error:
Traceback (most recent call last): File "C:/Users/Mudit/PycharmProjects/CNN/findframerateofacamera/webcam.py", line 28, in <module>
faces = faceCascade.detectMultiScale(frameGray, 1.2, neigh) cv2.error: C:\projects\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp:1698: error: (-215) !empty() in function cv::CascadeClassifier::detectMultiScale
Since
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
does not result in an assertion, I think the image capture works correctly. You can add
cv2.imshow('Gray', frameGray)
to verify that the grayscale image is not empty
The next suspect is the cascade classifier faceCascade. Do check if the path and the filename is correct and it has been loaded correctly.

Categories