Function sent in process can't change value of class field - python

Variable self.process_this_frame is not changed in process.
For now I want to process frame only when previous frame that should be processed is processed and skip other frames.
import cv2
import face_recognition
import multiprocessing
class FaceLocationSender:
def __init__(self, camera_url):
self.video_capture = cv2.VideoCapture(camera_url)
self.face_locations = []
self.process_this_frame = True
def get_faces_from_frame(self, frame):
self.face_locations = face_recognition.face_locations(frame)
self.process_this_frame = True
def start(self):
while True:
ret, frame = self.video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=1/2, fy=1/2)
rgb_small_frame = small_frame[:, :, ::-1]
if self.process_this_frame:
self.process_this_frame = False
process = multiprocessing.Process(target=self.get_faces_from_frame, args=(rgb_small_frame,))
process.start()
for (top, right, bottom, left) in self.face_locations:
top *= 2
right *= 2
bottom *= 2
left *= 2
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 0), 2)
cv2.imshow('Video', cv2.resize(frame, (1200, 600)))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.video_capture.release()
cv2.destroyAllWindows()

Related

CV2: closing webcam and reopening in different function

In this code I want to display 3 types of classification: "Known", "Unknown" and "Hostile" using open cv and face_recognition.
The first function (firtsCase()) contains "Known", "Unknown" and that it's everything ok.
What I want to do is: when the program knows that is in the function secondCase() that is "hostile" and I press "q", the webcam must close and launch the function that sends me back to the cases "Known" and "Unknown" (call at the end of secondCase() with firstCase()) to reopen the webcam and reclassify.
You can find it in the latest part of function secondCase().
I have this error:
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
cv2.error: OpenCV(4.5.5) /Users/runner/work/opencv-python/opencv-python/opencv/modules/imgproc/src/resize.cpp:4052: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
I know this error shows up because it is trying to resize something that has already been manipulated, but I don't know how can I fix it.
This is the code:
video_capture = cv2.VideoCapture(0)
face2 = face_recognition.load_image_file(os.path.abspath("path2"))
face2_face_encoding = face_recognition.face_encodings(face2)[0]
known_face_encodings = [
face2_face_encoding
]
known_face_names = [
"Giulia"
]
face_location = []
face_encodings = []
face_names = []
def firstCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Sconosciuto"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (40,48,48), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (40,48,48), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
else:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
def secondCase():
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_small_frame = small_frame[:, :, ::-1]
process_this_frame = True
face = False
if process_this_frame:
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.6)
name = "Ostile"
face = True
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face = False
face_names.append(name)
process_this_frame = not process_this_frame
if face:
for (top, right, bottom, left), name in zip(face_locations, face_names):
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0,0,0), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0,0,0), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.6, (255, 255, 255), 1)
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
firstCase()
print("Inizio scansione :" + time.ctime() + "\nCitofono attivo. Citofona premendo a... ")
sel = selectors.DefaultSelector()
sel.register(sys.stdin, selectors.EVENT_READ)
sys.stdout.flush()
pairs = sel.select(timeout=5)
if pairs:
if input("Citofona premendo 'a' \n"):
firstCase()
else:
secondCase()
Thank you all for helping.

Camera Not Opening when Deployed On server Django/Python

Unable to open Camera On server, where its the same copy with Same settings
cam = cv2.VideoCapture(0)
Used this to initialise camera (webcam)
and below code for processing the data stream and below image shows the error on server click here to view the error
def identify_faces(video_capture):
buf_length = 10
known_conf = 6
buf = [[]] * buf_length
i = 0
process_this_frame = True
while True:
ret, frame = video_capture.read()
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
rgb_frame = small_frame[:, :, ::-1]
if process_this_frame:
predictions = predict(rgb_frame, model_path="folder/folder/models/trainedmodel.clf")
process_this_frame = not process_this_frame
face_names = []
for name, (top, right, bottom, left) in predictions:
top *= 4
right *= 4
bottom *= 4
left *= 4
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
identify1(frame, name, buf, buf_length, known_conf)
face_names.append(name)
buf[i] = face_names
i = (i + 1) % buf_length
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()

Shifting from OpenCV GUI to Tkinter Form

I have this code that performs the mouse functions using eyes and other facial gestures with opencv and dlib. I am running this code using a button click from a tkinter window. When this code starts to run, that tkinter window freezes (i.e, I cannot click any other button from that).
Is there a way that I can make the frame used by opencv a Top level, like top level frames in tkinter so that it doesn't freezes any other frames, or how can I replace opencv frame with Tkinter toplevel frame.
P.S: I have been on it for two days, literally tried anything I can find on the internet and can't seem to find a solution.
_, frame = vid.read()
frame = cv2.flip(frame, 1)
frame = imutils.resize(frame, width=cam_w, height=cam_h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
This is the part where changes will be made I guess. Full code is shared below.
from imutils import face_utils
from utils import *
import numpy as np
import pyautogui as pag
import imutils
import dlib
import cv2
# Thresholds and consecutive frame length for triggering the mouse action.
MOUTH_AR_THRESH = 0.3
MOUTH_AR_CONSECUTIVE_FRAMES = 3
EYE_AR_THRESH = 0.20
EYE_AR_CONSECUTIVE_FRAMES = 5
WINK_AR_DIFF_THRESH = 0.001
WINK_AR_CLOSE_THRESH = 0.2
WINK_CONSECUTIVE_FRAMES = 4
# Initialize the frame counters for each action as well as
# booleans used to indicate if action is performed or not
MOUTH_COUNTER = 0
EYE_COUNTER = 0
WINK_COUNTER = 0
INPUT_MODE = False
EYE_CLICK = False
LEFT_WINK = False
RIGHT_WINK = False
SCROLL_MODE = False
ANCHOR_POINT = (0, 0)
WHITE_COLOR = (255, 255, 255)
YELLOW_COLOR = (0, 255, 255)
RED_COLOR = (0, 0, 255)
GREEN_COLOR = (0, 255, 0)
BLUE_COLOR = (255, 0, 0)
BLACK_COLOR = (0, 0, 0)
# Initialize Dlib's face detector (HOG-based) and then create
# the facial landmark predictor
shape_predictor = "model/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor)
# Grab the indexes of the facial landmarks for the left and
# right eye, nose and mouth respectively
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
(nStart, nEnd) = face_utils.FACIAL_LANDMARKS_IDXS["nose"]
(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS["mouth"]
# Video capture
vid = cv2.VideoCapture(0)
resolution_w = 1366
resolution_h = 768
cam_w = 640
cam_h = 480
unit_w = resolution_w / cam_w
unit_h = resolution_h / cam_h
while True:
# Grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale
# channels)
_, frame = vid.read()
frame = cv2.flip(frame, 1)
frame = imutils.resize(frame, width=cam_w, height=cam_h)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces in the grayscale frame
rects = detector(gray, 0)
# Loop over the face detections
if len(rects) > 0:
rect = rects[0]
else:
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
continue
# Determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# Extract the left and right eye coordinates, then use the
# coordinates to compute the eye aspect ratio for both eyes
mouth = shape[mStart:mEnd]
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
nose = shape[nStart:nEnd]
# Because I flipped the frame, left is right, right is left.
temp = leftEye
leftEye = rightEye
rightEye = temp
# Average the mouth aspect ratio together for both eyes
mar = mouth_aspect_ratio(mouth)
leftEAR = eye_aspect_ratio(leftEye)
rightEAR = eye_aspect_ratio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
diff_ear = np.abs(leftEAR - rightEAR)
nose_point = (nose[3, 0], nose[3, 1])
# Compute the convex hull for the left and right eye, then
# visualize each of the eyes
mouthHull = cv2.convexHull(mouth)
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [mouthHull], -1, YELLOW_COLOR, 1)
cv2.drawContours(frame, [leftEyeHull], -1, YELLOW_COLOR, 1)
cv2.drawContours(frame, [rightEyeHull], -1, YELLOW_COLOR, 1)
for (x, y) in np.concatenate((mouth, leftEye, rightEye), axis=0):
cv2.circle(frame, (x, y), 2, GREEN_COLOR, -1)
# Check to see if the eye aspect ratio is below the blink
# threshold, and if so, increment the blink frame counter
if diff_ear > WINK_AR_DIFF_THRESH:
if leftEAR < rightEAR:
if leftEAR < EYE_AR_THRESH:
WINK_COUNTER += 1
if WINK_COUNTER > WINK_CONSECUTIVE_FRAMES:
pag.click(button='left')
WINK_COUNTER = 0
elif leftEAR > rightEAR:
if rightEAR < EYE_AR_THRESH:
WINK_COUNTER += 1
if WINK_COUNTER > WINK_CONSECUTIVE_FRAMES:
pag.click(button='right')
WINK_COUNTER = 0
else:
WINK_COUNTER = 0
else:
if ear <= EYE_AR_THRESH:
EYE_COUNTER += 1
if EYE_COUNTER > EYE_AR_CONSECUTIVE_FRAMES:
SCROLL_MODE = not SCROLL_MODE
# INPUT_MODE = not INPUT_MODE
EYE_COUNTER = 0
# nose point to draw a bounding box around it
else:
EYE_COUNTER = 0
WINK_COUNTER = 0
if mar > MOUTH_AR_THRESH:
MOUTH_COUNTER += 1
if MOUTH_COUNTER >= MOUTH_AR_CONSECUTIVE_FRAMES:
# if the alarm is not on, turn it on
INPUT_MODE = not INPUT_MODE
# SCROLL_MODE = not SCROLL_MODE
MOUTH_COUNTER = 0
ANCHOR_POINT = nose_point
else:
MOUTH_COUNTER = 0
if INPUT_MODE:
cv2.putText(frame, "READING INPUT!", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
x, y = ANCHOR_POINT
nx, ny = nose_point
w, h = 60, 35
multiple = 1
cv2.rectangle(frame, (x - w, y - h), (x + w, y + h), GREEN_COLOR, 2)
cv2.line(frame, ANCHOR_POINT, nose_point, BLUE_COLOR, 2)
dir = direction(nose_point, ANCHOR_POINT, w, h)
cv2.putText(frame, dir.upper(), (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
drag = 18
if dir == 'right':
pag.moveRel(drag, 0)
elif dir == 'left':
pag.moveRel(-drag, 0)
elif dir == 'up':
if SCROLL_MODE:
pag.scroll(40)
else:
pag.moveRel(0, -drag)
elif dir == 'down':
if SCROLL_MODE:
pag.scroll(-40)
else:
pag.moveRel(0, drag)
if SCROLL_MODE:
cv2.putText(frame, 'SCROLL MODE IS ON!', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, RED_COLOR, 2)
# Show the frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# If the `Esc` key was pressed, break from the loop
if key == 27:
break
# Do a bit of cleanup
cv2.destroyAllWindows()
vid.release()

How to connect RSTP with OpenCV?

Hi I try to connect OpenCV, Face recognition Library with my RSTP CCTV. But when i run my code i getting an error like below. Hope someone can help me regarding on this issue. I attach here my code
Error:
[h264 # 000002a9659dba00] error while decoding MB 10 94, bytestream -5
[h264 # 000002a953c1e2c0] Invalid NAL unit 8, skipping. Traceback
(most recent call last): File "face-present.py", line 125, in
cv2.imshow('Video', frame) cv2.error: OpenCV(4.0.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:350:
error: (-215:Assertion failed) size.width>0 && size.height>0 in
function 'cv::imshow'
mycode.py
import face_recognition
import cv2
video_capture = cv2.VideoCapture("rtsp://admin:adam12345#192.168.0.158:554/Streaming/channels/101")
roy_image = face_recognition.load_image_file("images/roy1.jpg")
roy_face_encoding = face_recognition.face_encodings(roy_image,num_jitters=100)[0]
# Load a second sample picture and learn how to recognize it.
henrik_image = face_recognition.load_image_file("images/Mr_henrik.jpg")
henrik_face_encoding = face_recognition.face_encodings(henrik_image,num_jitters=100)[0]
stefan_image = face_recognition.load_image_file("images/stefan.jpg")
stefan_face_encoding = face_recognition.face_encodings(stefan_image,num_jitters=100)[0]
hairi_image = face_recognition.load_image_file("images/Hairi.jpeg")
hairi_face_encoding = face_recognition.face_encodings(hairi_image,num_jitters=100)[0]
syam_image = face_recognition.load_image_file("images/syam1.jpeg")
syam_face_encoding = face_recognition.face_encodings(syam_image,num_jitters=100)[0]
#print(syam_face_encoding)
# Create arrays of known face encodings and their names
known_face_encodings = [
roy_face_encoding,
stefan_face_encoding,
henrik_face_encoding,
hairi_face_encoding,
syam_face_encoding
]
known_face_names = [
"roy",
"stefan",
"henrik",
"hairi",
"syam"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
# # Process video frame frequency
# process_frame_freq = 4
# process_this_frame = process_frame_freq
while True:
if video_capture.isOpened():
# Grab a single frame of video
ret, frame = video_capture.read()
if ret:
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, None, fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
if face_locations: # prevent manipulation of null variable
top, right, bottom, left = face_locations[0]
# faces_recognized += 1
# print("[%i] Face recognized..." % faces_recognized)
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
cropped_face = frame[top:bottom, left:right]
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
#print(face_encodings)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding,tolerance=0.5)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
#Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()

Video Processing frame value can not using in main funtion in OpenCv Python

I want to use frame value array which including video loop but I can't get values from video processing function. What I want to do is to take the frame sequence values from the video function, then use those values in the main function. But both the while loop scope and the video function scope do not allow this. I'd appreciate it if you could help. Thank you from now.
def video_processing():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
while True:
ret, frame = cap.read()
if ret == True :
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
def main():
video_processing()
print frame #is not working!
if __name__ == '__main__':
main()
Update (Full Code)
import Tkinter as tk
import ttk
from ttk import Frame
import os
import time
from Tkinter import *
from Tkinter import Tk, Label
import cv2
import threading
import time
import imutils
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageTk, Image
from threading import Thread
Frames = []
def video_processing():
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("C:\Users\eren\OneDrive\Desktop\WIN_20190522_18_16_29_Pro.mp4")
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
global Frames
while True:
global Frames
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
y , x = hsv.shape[:2] #x = 320 , y = 240
# Define 'brown' range in HSV colorspace
lower = np.array([10, 100, 20])
upper = np.array([20, 255, 200])
# Threshold the HSV image to get only brown color
mask1 = cv2.inRange(hsv, lower, upper)
kernel = np.ones((5,5),np.uint8)
thresh = cv2.dilate(mask1,kernel,iterations = 2)
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(thresh, [c], -1, (0, 255, 255), 2)
cv2.circle(thresh, extLeft , 8, (0, 0, 255) , -1)
cv2.circle(thresh, extRight, 8, (0, 255, 0) , -1)
cv2.circle(thresh, extTop , 8, (255, 0, 0) , -1)
cv2.circle(thresh, extBot , 8, (255, 255, 0), -1)
x_center = (extLeft[0] + extRight[0] + extTop[0] + extBot[0])/4
y_center = (extLeft[1] + extRight[1] + extTop[1] + extBot[1])/4
cv2.circle(frame,(x_center, y_center), 3, (0,255,0), -1)
cv2.line(frame,(extLeft[0] ,0),(extLeft[0],y) ,(0,255,0),2) # y axis - binary
cv2.line(frame,(extRight[0],0),(extRight[0],y),(0,255,0),2) # y axis - binary
cv2.line(frame,(0,extTop[1]) ,(x,extTop[1]) ,(0,255,0),2) # x axis - binary
cv2.line(frame,(0,extBot[1]) ,(x,extBot[1]) ,(0,255,0),2) # x axis - binary
# cv2.imshow("mask" , thresh)
cv2.imshow("Image", frame)
Frames = frame
# print frame
# return frame
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
# return frame
def main():
global Frames
video_processing()
print Frames
# print a
if __name__ == '__main__':
main()
This works for me:
import cv2
frame = None
def video_processing():
global frame
cap = cv2.VideoCapture('VIDEO_NAME.mp4')
while True:
ret, frame = cap.read()
if ret == True :
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
def main():
video_processing()
print(frame)
if __name__ == '__main__':
main()

Categories