python Real time YOLOv4 detection from desktop screen - python

There is a simple code for using Yolo to display a video or camera from a PC:
import cv2
import time
Conf_threshold = 0.4
NMS_threshold = 0.4
COLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),
(255, 255, 0), (255, 0, 255), (0, 255, 255)]
class_name = []
with open('Resources\coco.names.txt', 'r') as f:
class_name = [cname.strip() for cname in f.readlines()]
# print(class_name)
net = cv2.dnn.readNet('Resources\yolov4-tiny.weights', 'Resources\yolov4-tiny.cfg')
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
cap = cv2.VideoCapture('test.mp4')
starting_time = time.time()
frame_counter = 0
while True:
ret, frame = cap.read()
frame_counter += 1
if ret == False:
break
classes, scores, boxes = model.detect(frame, Conf_threshold, NMS_threshold)
for (classid, score, box) in zip(classes, scores, boxes):
color = COLORS[int(classid) % len(COLORS)]
label = "%s : %f" % (class_name[classid], score)
cv2.rectangle(frame, box, color, 1)
cv2.putText(frame, label, (box[0], box[1]-10),
cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)
endingTime = time.time() - starting_time
fps = frame_counter/endingTime
# print(fps)
cv2.putText(frame, f'FPS: {fps}', (5, 35),
cv2.FONT_HERSHEY_COMPLEX, 0.7, (0, 255, 0), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
cap.release()
cv2.destroyAllWindows()
But I need a screen capture instead of test.mp4 video. To create screenshots from the screen, this code was used:
import numpy as np
import cv2
from mss import mss
def screen_record_efficiency():
bbox = {'top': 0, 'left': 0, 'width': 800, 'height': 600}
sct = mss()
font = cv2.FONT_HERSHEY_SIMPLEX
while 1:
# grab image
sct_img = np.array(sct.grab(bbox))
# display image
cv2.imshow('screen', sct_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
I can’t understand how it is possible to load an array with images from the second code instead of a video file and how to combine it in general. Due to poor knowledge, Python i can’t solve this problem in any way, and I didn’t find a video or article on capturing the desktop in real time
If you pass an array with pictures directly, by type frame=sct_img, then it displays an error that 4 arguments are passed instead of 3 (or something similar).

Related

I want my Opencv to use threading to increase fps

My opencv code is lagging in raspbpi but in pc its smooth. Can anyone help me make my hard code to a code that uses threading.
from cv2 import cv2
import numpy as np
from pyzbar.pyzbar import decode
import pickle,time
import os
import imutils
import screeninfo
from screeninfo import get_monitors
curr_path = os.getcwd()
#########models##################################################
print("Loading face detection model")
proto_path = os.path.join(curr_path, 'model', 'deploy.prototxt')
model_path = os.path.join(curr_path, 'model', 'res10_300x300_ssd_iter_140000.caffemodel')
face_detector = cv2.dnn.readNetFromCaffe(prototxt=proto_path, caffeModel=model_path)
print("Loading face recognition model")
recognition_model = os.path.join(curr_path, 'model', 'openface_nn4.small2.v1.t7')
face_recognizer = cv2.dnn.readNetFromTorch(model=recognition_model)
################pickles#########################################
recognizer = pickle.loads(open('recognizer.pickle', "rb").read())
le = pickle.loads(open('le.pickle', "rb").read())
print("Starting test video file")
#adjacents########################################################################
no_of_adjacent_prediction=0
no_face_detected=0 #to track the number of times the face is detected
prev_predicted_name='' #to keep track of the previously predicted face(w.r.t frame)
count_frames = total_no_face_detected = 0
#camera#########################################################################
font=cv2.FONT_HERSHEY_SIMPLEX
clr=(255,255,255)
cap = cv2.VideoCapture(0)
time.sleep(2)
profile = None
####TRY_COUNTS###########
MAX_TRY=3
tries=0 #
######flags############
flag = True
flag_face_recognised=False #to keep track if the user face is recognized
flag_face_not_recognised=False
#############FULLSCREEN###############
WINDOW_NAME = "Face-Rcognition and QRCODEQQQQQ"
screenid = 0
while True:
cv2.namedWindow(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty(WINDOW_NAME, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
ret, frame = cap.read()
screen = screeninfo.get_monitors()[screenid]
screen_width, screen_height = screen.width,screen.height
frame = cv2.flip(frame, 1)
frame_height, frame_width, _ = frame.shape
scaleWidth = float(screen_width) / float(frame_width)
scaleHeight = float(screen_height) / float(frame_height)
if (flag):
access = open("AccessCodes.txt")
for i in decode(frame):
decoded_data = i.data.decode("utf-8") # converts bytes to string value
print(decoded_data)
# Drawing polygon on frame (tilts w.r.t orientation)
pts = np.array([i.polygon], np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.polylines(frame, [pts], True, (0, 255, 0), 1)
# print(pts)
# Display text
rect_pts = i.rect # using rect point as origin for text as we don't want the text to tilt with the qrcode
fontScale = 0.8
thickness = 1
# cv2.putText(frame,decoded_data,(rect_pts[0],rect_pts[1]),cv2.FONT_HERSHEY_SIMPLEX,fontScale,(255,0,0),thickness)
# print(rect_pts)
if decoded_data.lower() in access.read(): # Check private key
flag = False
tries = 0
print("QRCODE is Valid.Proceed to FaceRecog")
time_out_no_of_frames_after_qrcode = 0
else:
# print("INVALID QR CODE")
print("Invalid QRCODE")
if scaleHeight > scaleWidth:
imgScale = scaleWidth
else:
imgScale = scaleHeight
newX, newY = frame.shape[1] * imgScale, frame.shape[0] * imgScale
frame = cv2.resize(frame, (int(newX), int(newY)))
cv2.imshow(WINDOW_NAME, frame)
else:
frame = cv2.resize(frame, (int(newX), int(newY)))
(h, w) = frame.shape[:2]
image_blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0), False, False)
face_detector.setInput(image_blob)
face_detections = face_detector.forward()
for i in range(0, face_detections.shape[2]):
confidence = face_detections[0, 0, i, 2]
if confidence > 0.90:
box = face_detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
face_blob = cv2.dnn.blobFromImage(face, 1.0/255, (96, 96), (0, 0, 0), True, False)
face_recognizer.setInput(face_blob)
vec = face_recognizer.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}".format(name, proba )
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 0, 255), 2)
cv2.putText(frame, "Welcome home " + name.replace('_', ' ').title(), (160, 460), font, 0.8, clr,
thickness+3, cv2.LINE_AA)
cv2.rectangle(frame, (startX, startY), (endX, endY), (255, 255, 255), 1)
if name == decoded_data.lower():
print("Face is Recognised: "+str(no_of_adjacent_prediction))
no_of_adjacent_prediction += 1
else:
print("Face not Recognised.")
cv2.putText(frame, "Face Not Recognised", (160, 460), font, 0.8, clr, thickness, cv2.LINE_AA)
flag_face_not_recognised = True
no_of_adjacent_prediction = 0
if (no_of_adjacent_prediction > 10): # no_of_adjacent_prediction is only updated when the confidence of classification is >80
flag_face_recognised = True
no_of_adjacent_prediction = 0
no_face_detected = 0
cv2.imshow(WINDOW_NAME, frame)
if (flag_face_recognised): # if face is recognized then open the door
# arduino.write(bytes('o', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is OPEN")
time.sleep(5)
# speak("Closing door")
# arduino.write(bytes('c', 'utf-8')) #Output the given byte string over the serial port.
print("DOOR is CLOSED")
flag_face_recognised = False
flag = True # to start from qrcode
if (flag_face_not_recognised):
# speak("Face not recognised. The door will remain closed")
time.sleep(2)
flag_face_not_recognised = False
tries += 1
if (tries >= MAX_TRY):
flag = True # to start from qrcode
tries = 0
if (time_out_no_of_frames_after_qrcode >= 400):
# speak("User authentication failed due to time out")
flag = True # to start from qrcode
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
cv2.destroyAllWindows()
FPS PC: 20 fps
FPS RASPBERRY PI : 9 fps
i tried various opencv codes and the result is still the same. I found a solution that threading increases the fps of opencv but i do not know how to apply this to my code due the fact that i am a noob python kid. a help would be nice. I want my fps in my raspberry ranging from 15-20 instead of 9 fps.

How to multithreading without causing app freeze or lag?

Hey I started learning python not so long time ago. Right now Im creating (or more likely trying to) face and motion detection script based on OpenCV library. Unfortunately Im stuck since few days cause I cant solve problem with I guess its called multi threading.
Here is my code:
import time
import cv2
import datetime
from discord_webhook import DiscordWebhook
import threading
faceCascade = cv2.CascadeClassifier("face_recognition.xml")
# define a video capture object
video_capture = cv2.VideoCapture(0)
#writing video
frame_width = int(video_capture.get(3))
frame_height = int(video_capture.get(4))
# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
out = cv2.VideoWriter(datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p") + '.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame_width,frame_height))
#screenshot when detectors get triggered
def screenshot():
cv2.imwrite('screenshot.png',video_capture.read()[1])
#webhook notify
def alert():
webhook = DiscordWebhook(url="", rate_limit_retry=True,
content='!ALERT!')
webhook.execute()
while(True):
# Capture the video frame by frame
ret, frame = video_capture.read()
text="not detected"
text1="not detected"
timestamp = datetime.datetime.now()
#face recognition
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5,minSize=(30, 30))
if int(format(len(faces))) > 0:
#print("Found {0} faces!".format(len(faces)))
text="detected"
else:
text="not detected"
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#motion detection
ret, frame1 = video_capture.read()
difference = cv2.absdiff(frame, frame1) # find the difference between the frames
gray = cv2.cvtColor(difference, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY) # create threshold
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) < 5000:
continue
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
text1="detected"
# Display the resulting frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Face status: {}".format(text), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, "Motion status: {}".format(text1), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 255), 1)
#check if motion is detected if not change status text
if text1 == "not detected":
text1="detected"
else:
text1="not detected"
out.write(frame)
cv2.imshow('Press Q to quit', frame)
# the 'q' button is set as the
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# After the loop release the cap object
video_capture.release()
out.release()
cv2.destroyAllWindows()
Im trying to send webhook and make screen shot every 3 minutes if motion is detected but for loops completely lags this app. Tried time.sleep or thread timers but it only freezes or lags app. If someone can explain me how to solve this problem I will be very thankful. Have a great day or night

OpenCV Color Detection in video ROI

I am trying to figure out color in a specific ROI in Traffic Light Video. The code although predicts the color correctly it doesn't do it for the specific ROI i am looking at.
Initially when the traffic video starts the ROI region has no (RGY) colors but it still predicts and shows RED based on other areas. What am i doing wrong.
Have uploaded the test Video here for testing -- https://ufile.io/ha20buns
Python Code below.
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Videos\Sample.mp4')
while True:
ret,frame = cap.read()
if ret == False:
break
frame = cv2.resize(frame,(1920 ,1080))
#Extract required section from entire frame
roiColor = cv2.rectangle(frame.copy(),(1022, 565),(1411, 709),(255,255,255),2) #For SampleTL.mp4
blcolor = (255, 0, 0)
cv2.rectangle(frame, (1022, 565),(1411, 709), blcolor)
hsv = cv2.cvtColor(roiColor,cv2.COLOR_BGR2HSV)
#red
lower_hsv_red = np.array([157,177,122])
upper_hsv_red = np.array([179,255,255])
mask_red = cv2.inRange(hsv,lowerb=lower_hsv_red,upperb=upper_hsv_red)
red_blur = cv2.medianBlur(mask_red, 7)
#green
lower_hsv_green = np.array([49,79,137])
upper_hsv_green = np.array([90,255,255])
mask_green = cv2.inRange(hsv,lowerb=lower_hsv_green,upperb=upper_hsv_green)
green_blur = cv2.medianBlur(mask_green, 7)
lower_hsv_yellow = np.array([15,150,150])
upper_hsv_yellow = np.array([35,255,255])
mask_yellow = cv2.inRange(hsv,lowerb=lower_hsv_yellow,upperb=upper_hsv_yellow)
yellow_blur = cv2.medianBlur(mask_yellow, 7)
#Because the image is a binary image, If the image has a white point, which is 255, then take his maximum max value 255
red_color = np.max(red_blur)
green_color = np.max(green_blur)
yellow_color = np.max(yellow_blur)
if red_color == 255:
print('red')
cv2.rectangle(frame,(1020,50),(1060,90),(0,0,255),2 ) #Draw a rectangular frame by coordinates
cv2.putText(frame, "red", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255),2) #red text information
elif green_color == 255:
print('green')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "green", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0),2)
elif yellow_color == 255:
print('yellow')
cv2.rectangle(frame,(1020,50),(1060,90),(0,255 ,0),2)
cv2.putText(frame, "yellow", (1020, 40), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 0),2)
cv2.imshow('frame',frame)
red_blur = cv2.resize(red_blur,(300,200))
green_blur = cv2.resize(green_blur,(300,200))
yellow_blur = cv2.resize(yellow_blur, (300,200))
#cv2.imshow('red_window',red_blur)
#cv2.imshow('green_window',green_blur)
#cv2.imshow('yellow_window',yellow_blur)
c = cv2.waitKey(10)
if c==27:
break
cap.release()
cv2.destroyAllWindows() # destroy all opened windows
cv2.rectangle doesn't crop the image but returns the original image with a drawn rectangle. Try this instead:
roiColor = frame[565:709, 1022:1411]

OpenCV: Can we discover the first and last frame of a bounding box moving?

Working on python I have a video where I:
First draw a bounding box (bbox).
Next, I need to know the frame on which the bbox starts moving.
Finally, I need to know the last frame on which the bbox stops moving.
Initial code:
import cv2
import time
import numpy as np
#open video
cap = cv2.VideoCapture('MyVideo.avi')
#total number of frames
totalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
print(totalFrames)
#MOSSE tracker
tracker = cv2.legacy_TrackerMOSSE.create()
#take first frame
ret, frame = cap.read()
image = cv2.resize(frame, (1600,900))
#create bounding box
bbox = cv2.selectROI("Tracking", image, False) #create bounding box
tracker.init(image,bbox)
#define bounding box
def drawBox(image,bbox):
x, y, w, h = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
cv2.rectangle(image, (x,y),((x+w),(y+h)), (255, 0, 255), 3, 1)
cv2.putText(image, "Tracking", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
#work within video
while True:
success, frame = cap.read()
# resize screen size
image = cv2.resize(frame, (1600, 900))
#tracker
success, bbox1 = tracker.update(image)
This is the part where I believe I could figure out when my bbox starts moving and print the frame of interes.
if success:
drawBox(image,bbox1)
if bbox1 == bbox:
print('static')
else:
print(cap.get(cv2.CAP_PROP_POS_FRAMES))
else:
cv2.putText(image, "Lost", (75, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
Remaining code
#show video
cv2.imshow("feed", image)
if cv2.waitKey(40) == 27:
break
cv2.destroyAllWindows()
cap.release()

Augmented Reality line that moves with an image in OpenCV

I want to do the following things in openCV. The problem statement that I have is with a bottle, which needs a line on the image and the line needs to rotate as per the movement of bottle.
The first image needs to have red lines as the borders and initiate a green line
The second image needs to have the green line in the middle when the bottle gets rotated. That is the green line has to follow the rotation of the bottle
Finally as per the third image, the application needs to kill itself or save the picture when the green line gets aligned to the red line
I tried doing this in OpenCV using template matching. I tried keeping a template image and then tracking the template image using template matching algorithm. But it does not seem to work properly in this case.
import cv2
from time import sleep
import numpy as np
vid = cv2.VideoCapture(0)
sleep(2)
line_show = False
save_reference = False
template_compare_method = cv2.TM_SQDIFF_NORMED
i = 0
while True:
check, frame = vid.read()
print(check)
frame1 = cv2.line(frame, (500, 0), (500, 720), (255, 0, 0), 7)
frame1 = cv2.line(frame1, (800, 0), (800, 720), (255, 0, 0), 7)
if line_show:
h, w = frame1.shape[:2]
if not save_reference:
reference = frame1[200:500, 780:790]
cv2.imwrite("../../images/white_image.jpg", reference)
save_reference = True
if save_reference:
reference_image = cv2.imread('../../images/white_image.jpg')
result = cv2.matchTemplate(reference_image, frame1, template_compare_method)
mn, _, mnLoc, _ = cv2.minMaxLoc(result)
MPx, MPy = mnLoc
trows, tcols = reference_image.shape[:2]
frame1 = cv2.rectangle(frame1, (MPx, MPy), (MPx+tcols, MPy+trows), (0, 0, 255), 2)
cv2.imshow("image", frame1)
key = cv2.waitKey(1)
if key == ord('l'):
line_show = True
if key == ord('k'):
cv2.imwrite("../../images/saved_image_"+str(i)+".jpg", frame1)
i = i + 1
if key == ord('s'):
cv2.imwrite("../../images/saved_image.jpg", frame)
vid.release()
print("Image saved")
break
elif key == ord('q'):
vid.release()
cv2.destroyAllWindows()
break
Can I use any other algorithms, or am I approaching this problem in a wrong way by looking it as a object tracking task, where I save a small image and track it through template matching ?
Can I use some other algorithms like Meanshift, Frame Difference etc. to achieve this ?
If I were you, I would solve this problem using line algorithm. Of course, you can choose any other robust algorithm. My idea is to solve the problem as quickly as possible.
Assume I have the following image with left and right boundaries (blue), and I have the green-line.
When green-line passes the left-border, quit.
Tracking the green-line
First you need to find the features of the frame to track efficiently the green-line.
while True:
ret, frm = cap.read()
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
Sample output:
Second, find the approximate length of the green-line:
There is no direct way to find the length, do error-trial calculation.
Once you are sure, initialize the line algorithm.
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
Third, get the coordinates, and check if the green-line is in the border.
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
if x1 <= 232:
break
Code:
import cv2
cap = cv2.VideoCapture("sample.mp4")
while True:
ret, frm = cap.read()
if ret:
rgt_bdr = cv2.line(frm, (794, 250), (794, 1250), (255, 0, 0), 7)
lft_bdr = cv2.line(frm, (232, 250), (232, 1250), (255, 0, 0), 7)
frm_gry = cv2.cvtColor(frm, cv2.COLOR_BGR2GRAY)
frm_cny = cv2.Canny(frm_gry, 50, 200)
lns = cv2.ximgproc.createFastLineDetector(_length_threshold=400).detect(frm_cny)
if lns is not None:
for ln in lns:
x1 = int(ln[0][0])
y1 = int(ln[0][1])
x2 = int(ln[0][2])
y2 = int(ln[0][3])
cv2.line(frm,
pt1=(x1, y1),
pt2=(x2, y2),
color=(0, 255, 0),
thickness=3)
print("({}, {})-({}, {})".format(x1, y1, x2, y2))
if x1 <= 232:
break
cv2.imshow("frm", frm)
cv2.waitKey(1)

Categories