YOLO object detection in windows - python

i want to change the code i got by the Youtuber Mark Jay which can detect objects in front of a webcam to detect objects in windows(like pygta5).
(i changed the code a bit to something i (noob) thaught could work)
import cv2
from darkflow.net.build import TFNet
import numpy as np
import time
from PIL import ImageGrab
options = {
'model': 'cfg/yolo.cfg',
'load': 'bin/yolo.weights',
'threshold': 0.2,
'gpu': 1.0
}
tfnet = TFNet(options)
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
#capture = cv2.VideoCapture(0)
#capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
#capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
while True:
stime = time.time()
screen = np.array(ImageGrab.grab(bbox=(0,0,1920,1080)))
ret, frame = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
results = tfnet.return_predict(frame)
if ret:
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label, confidence * 100)
frame = cv2.rectangle(frame, tl, br, color, 5)
frame = cv2.putText(frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imshow('frame', frame)
print('FPS {:.1f}'.format(1 / (time.time() - stime)))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
This code returns this error
Traceback (most recent call last):
File "D:\Python_Object_analyzis\YOLO Version\darkflow-master\Person_detection.py", line 23, in <module>
ret, frame = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
ValueError: too many values to unpack (expected 2)
What do i have to change the code to get it working?
(sorry for bad english)
Thanks in advance
Tobias

for those who are interested i got my code working with python mss, which is also faster.
import cv2
from darkflow.net.build import TFNet
import numpy as np
import mss
options = {
'model': 'cfg/tiny-yolo-voc.cfg',
'load': 'bin/tiny-yolo-voc.weights',
'threshold': 0.23,
'gpu': 0.26
}
tfnet = TFNet(options)
colors = [tuple(255 * np.random.rand(3)) for _ in range(10)]
with mss.mss() as sct:
monitor = {'top': 0, 'left': 0, 'width': 1920, 'height': 1080}
while True:
screen = np.array(sct.grab(monitor))
ret, frame = True, cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)
results = tfnet.return_predict(frame)
if ret:
for color, result in zip(colors, results):
tl = (result['topleft']['x'], result['topleft']['y'])
br = (result['bottomright']['x'], result['bottomright']['y'])
label = result['label']
confidence = result['confidence']
text = '{}: {:.0f}%'.format(label, confidence * 100)
frame = cv2.rectangle(frame, tl, br, color, 5)
frame = cv2.putText(frame, text, tl, cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0), 2)
cv2.imshow('frame', cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Have a good time
Tobias

Related

I am getting this error while making drowsiness detection using python please not getting

from scipy.spatial import distance as dist
from imutils import face_utils
import imutils
import dlib
import cv2
import winsound
from tkinter import Tk, Button
frequency = 2500
duration = 5000
def eyeAspectRatio(eye):
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
C = dist.euclidean(eye[0], eye[3])
ear = (A + B) / (2.0 * C)
return ear
def run():
count = 0
earThresh = 0.3 #distance between vertical eye coordinate Threshold
earFrames = 48 #consecutive frames for eye closure
#shapePredictor = "C:\\Users\\ATUL GHUMADE\\Downloads\\code\\code\\shape_predictor_68_face_landmarks.dat"
cam = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
#get the coord of left & right eye
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
while True:
_, frame = cam.read()
frame = imutils.resize(frame, width=450)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
leftEye = shape[lStart:lEnd]
rightEye = shape[rStart:rEnd]
leftEAR = eyeAspectRatio(leftEye)
rightEAR = eyeAspectRatio(rightEye)
ear = (leftEAR + rightEAR) / 2.0
leftEyeHull = cv2.convexHull(leftEye)
rightEyeHull = cv2.convexHull(rightEye)
cv2.drawContours(frame, [leftEyeHull], -1, (0, 0, 255), 1)
cv2.drawContours(frame, [rightEyeHull], -1, (0, 0, 255), 1)
if ear < earThresh:
count += 1
if count >= earFrames:
cv2.putText(frame, "DROWSINESS DETECTED", (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
winsound.Beep(frequency, duration)
else:
count = 0
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cam.release()
cv2.destroyAllWindows()
'''
tk=Tk()
tk.geometry('500x500')
sub=Button(tk,text="Inspect",command=run).pack()
tk.mainloop()
'''
run()
Traceback (most recent call last):
File "C:\Users\ASUS\Desktop\Drowssiness detection - Copy\drowsiness detection\code\main.py", line 82, in
run()
File "C:\Users\ASUS\Desktop\Drowssiness detection - Copy\drowsiness detection\code\main.py", line 35, in run
frame = imutils.resize(frame, width=450)
File "C:\Users\ASUS\AppData\Local\Programs\Python\Python39\lib\site-packages\imutils\convenience.py", line 69, in resize
(h, w) = image.shape[:2]
AttributeError: 'NoneType' object has no attribute 'shape'

how to stop the execution of a cv2 program

i am currently workingon a project which requires body detection, so i created a file for it and when i tried to use it my main file but the body detection keeps on running and never stops making my rest of the program non executabale . I know it is because of the infinite for loop. But do i have any other ways where i could use in my main file
i have attached the body detection program below
kindly help
import cv2
import mediapipe as mp
import time
mpDraw = mp.solutions.drawing_utils
mppose = mp.solutions.pose
pose = mppose.Pose()
cap = cv2.VideoCapture('open cv/squidgamee/3.mp4')
cap.set(3, 400)
cap.set(4, 800)
ptime =0
while True:
succ, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
#print(results.pose_landmarks)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mppose.POSE_CONNECTIONS)
lmlist= []
if results.pose_landmarks:
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx , cy = int(lm.x*w), int(lm.y*h)
lmlist.append([id, cx, cy])
cv2.circle(img, (lmlist[0][1], lmlist[0][2]), 15 , (255, 0, 255), cv2.FILLED)
print(lmlist[0])
ctime = time.time()
fps = 1/(ctime-ptime)
ptime = ctime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_COMPLEX, 3, (255,255,0), 3)
cv2.imshow("image", img)
key= cv2.waitKey(1)
if key == ord('q'):
break
You can call the content in the file in function on a separate thread.
import cv2
import mediapipe as mp
import time
def func():
mpDraw = mp.solutions.drawing_utils
mppose = mp.solutions.pose
pose = mppose.Pose()
cap = cv2.VideoCapture('open cv/squidgamee/3.mp4')
cap.set(3, 400)
cap.set(4, 800)
ptime =0
while True:
succ, img = cap.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mppose.POSE_CONNECTIONS)
lmlist= []
if results.pose_landmarks:
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
lmlist.append([id, cx, cy])
cv2.circle(img, (lmlist[0][1], lmlist[0][2]), 15 , (255, 0, 255), cv2.FILLED)
print(lmlist[0])
ctime = time.time()
fps = 1/(ctime-ptime)
ptime = ctime
cv2.putText(img, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_COMPLEX, 3, (255,255,0), 3)
cv2.imshow("image", img)
key= cv2.waitKey(1)
if key == ord('q'):
break
Main:
import threading
from file import func
x = threading.Thread(target=func, args=())
x.start()

CV2 error in face detection program via Python

So, I was making a face detection program and everything was going right but as soon as I run it, it showed me as error saying:
line 40, in <module>
faceNet=cv2.dnn.readNet(faceModel, faceProto)
cv2.error: OpenCV(4.5.4-dev) D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\caffe\caffe_io.cpp:1138: error: (-2:Unspecified error) FAILED: fs.is_open(). Can't open "opencv_face_detector_uint8.pb" in function 'cv::dnn::ReadProtoFromBinaryFile'
The code is:
import cv2
import math
import argparse
def highlightFace(net, frame, conf_thershold=0.7):
frameOpencvDnn=frame.copy()
frameHight=frameOpencvDnn.shape[0]
frameWidth=frameOpencvDnn.shape[1]
blob=cv2.dnnblobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], True, False)
net.setInput(blob)
detections=net.forword()
faceBoxes=[]
for i in range(detections.shape[2]):
confidence=detections[0,0,1,2]
if confidence>conf_thershold:
x1=int(detections[0,0,i,3]*frameWidth)
y1=int(detections[0,0,i,4]*frameHight)
x2=int(detections[0,0,i,5]*frameWidth)
y2=int(detections[0,0,i,6]*frameHight)
faceBoxes.append([x1,y1,x2,y2])
cv2.rectangel(frameOpencvDnn, (x1,y1), (x2,y2), (0,225,0), int(round(frameHight/150)), 8)
return frameOpencvDnn,faceBoxes
parser=argparse.ArgumentParser()
parser.add_argument('--image')
args=parser.parse_args()
faceProto="opencv_face_detector.pbtxt"
faceModel="opencv_face_detector_uint8.pb"
ageProto="age_deploy.prototxt"
ageModel="age_net.caffemodel"
genderProto="gender_deploy.prototxt"
genderModel="gender_net.caffmodel"
MODEL_MEAN_VALUES=(78.4263377603, 87.7689143744, 144.895847746)
ageList=['(0-2)', '(4-6)', '(8-12)','(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList=['Male','Female']
faceNet=cv2.dnn.readNet(faceModel, faceProto)
ageNet=cv2.dnn.readNet(ageModel, ageProto)
genderNet=cv2.dnn.readNet(genderModel, genderProto)
video=cv2.VideoCapture(args.image if args.image else 0)
padding=20
while cv2.waitKey(1)<0 :
hasFrame,frame=video.read()
if not hasFrame:
cv2.waitKey()
break
resultImg,faceBoxes=highlightFace(faceNet,frame)
if not faceBoxes:
print("No Face is being Detected")
for faceBox in faceBoxes:
face=frame[max(0,faceBox[1]-padding):
min(faceBox[3]+padding,frame.shape[0]-1),max(0,faceBox[0]-padding)
:min(faceBox[2]+padding, frame.shape[1]-1)]
blob=cv2.dnn.blobFromImage(face, 1.0, (227.227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds=genderNet.forword()
gender=genderList[genderPreds[0].argmax()]
print(f'Gender: {gender}')
ageNet.setInput(blob)
agePreds=ageNet.forword()
age=ageList[agePreds[0].argmax()]
print(f'Age: {age[1:-1]} years')
cv2.putText(resultImg, f'{gender}, {age}', (faceBox[0], faceBox[1]-10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0,225,225), 2, cv2.LINE_AA)
cv2.imshow("Detecting age and gender", resultImg)
The code is a little dodgy mate, it does not display the video. Try this.
import cv2
import math
import argparse
""" Identification """
faceProto = "opencv_face_detector.pbtxt"
faceModel = "opencv_face_detector_uint8.pb"
ageProto = "age_deploy.prototxt"
ageModel = "age_net.caffemodel"
genderProto = "gender_deploy.prototxt"
genderModel = "gender_net.caffemodel"
faceNet=cv2.dnn.readNet(faceModel, faceProto)
ageNet=cv2.dnn.readNet(ageModel,ageProto)
genderNet=cv2.dnn.readNet(genderModel,genderProto)
MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
ageList = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
genderList = ['Male', 'Female']
padding=20
""" Face highliting """
def faceBox(faceNet, frames):
frameHeight=frames.shape[0]
frameWidth=frames.shape[1]
blob=cv2.dnn.blobFromImage(frames, 1.0, (300,300), [104,117,123], swapRB=False)
faceNet.setInput(blob)
detection=faceNet.forward()
bboxs=[]
for i in range(detection.shape[2]):
confidence=detection[0,0,i,2]
if confidence>0.7:
x1=int(detection[0,0,i,3]*frameWidth)
y1=int(detection[0,0,i,4]*frameHeight)
x2=int(detection[0,0,i,5]*frameWidth)
y2=int(detection[0,0,i,6]*frameHeight)
bboxs.append([x1,y1,x2,y2])
cv2.rectangle(frames, (x1,y1),(x2,y2),(0,255,0), 1)
return frames, bboxs
""" Video display """
def DisplayVid():
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('testvideo', fourcc, 20.0, (640, 480))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
while (True):
ret, frame = cap.read()
frameFace, bboxes = faceBox(faceNet, frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(frame)
for bbox in bboxes:
face = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]]
blob = cv2.dnn.blobFromImage(face, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
genderNet.setInput(blob)
genderPreds = genderNet.forward()
gender = genderList[genderPreds[0].argmax()]
ageNet.setInput(blob)
agePreds = ageNet.forward()
age = ageList[agePreds[0].argmax()]
label = "{},{}".format(gender, age)
cv2.rectangle(frameFace, (bbox[0], bbox[1] - 30), (bbox[2], bbox[1]), (0, 255, 0), -1)
cv2.putText(frameFace, label, (bbox[0], bbox[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2,
cv2.LINE_AA)
cv2.imshow("Age-Gender", frameFace)
k = cv2.waitKey(1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not (cap.isOpened()):
print("Could not open video device")
cap.release()
out.release()
cv2.destroyAllWindows()
DisplayVid()

Converting an OpenCV program into a Module reduces frame rate drastically

I wrote a code for pose estimation using OpenCV and mediapipe library. The program was working well and I was getting around 30-35 fps. When I tried to convert the same program to a module so that I can use it easily in future for different projects, the fps of the new code(module) reduced drastically to 3-4 fps.
My original Program:
import cv2
import mediapipe as mp
import time
cap = cv2.VideoCapture(1)
pTime = 0
cTime = 0
mpDraw = mp.solutions.drawing_utils
mpPose = mp.solutions.pose
pose = mpPose.Pose()
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = pose.process(imgRGB)
if results.pose_landmarks:
mpDraw.draw_landmarks(img, results.pose_landmarks, mpPose.POSE_CONNECTIONS)
for id, lm in enumerate(results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, "FPS : " + str(int(fps)), (10, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 8), 2)
cv2.imshow("Live Feed", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
My attempt at converting it into a module :
import cv2
import mediapipe as mp
import time
class poseDetector():
def __init__(self, mode=False, upBody=False, smooth=True, detectionCon = 0.5, trackingCon=0.5):
self.mode = mode
self.upBody = upBody
self.smooth = smooth
self.detectionCon = detectionCon
self.trackingCon = trackingCon
self.mpDraw = mp.solutions.drawing_utils
self.mpPose = mp.solutions.pose
self.pose =self.mpPose.Pose(self.mode, self.upBody, self.smooth, self.detectionCon, self.trackingCon)
def findPose(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.pose.process(imgRGB)
if self.results.pose_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)
return img
def findPosition(self, img, draw=True):
lmList = []
if self.results.pose_landmarks:
for id, lm in enumerate(self.results.pose_landmarks.landmark):
h, w, c = img.shape
cx, cy = int(lm.x*w), int(lm.y*h)
lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 0), cv2.FILLED)
return lmList
def main():
cap = cv2.VideoCapture(1)
pTime = 0
cTime = 0
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
detector = poseDetector()
img = detector.findPose(img)
lmList = detector.findPosition(img)
cTime = time.time()
fps = 1/(cTime - pTime)
pTime = cTime
cv2.putText(img, "FPS : " + str(int(fps)), (10, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 0, 8), 2)
cv2.imshow("Live Feed", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if __name__ == '__main__':
main()
According to me , both the code should have been working in the same manner, but they are not. Can anyone tell where am I making mistake ?
You need to place detector = poseDetector() to be before the while True::
detector = poseDetector()
while True:
success, img1 = cap.read()
...
Your "module" implementation creates a new poseDetector object every iteration of the main loop.
Each execution of detector = poseDetector() includes a call to poseDetector.__init__ that calls self.pose =self.mpPose.Pose...
There is a lot of overhead...
while True:
success, img1 = cap.read()
img = cv2.flip(img1, 1)
detector = poseDetector()
...
In your original ("non-module") implementation, you are executing pose = mpPose.Pose() only once (before the loop).
pose = mpPose.Pose()
while True:
success, img1 = cap.read()
...
I have tested your code before and after moving detector = poseDetector() outside the loop.
After moving the line above the loop, the frame rate is the same as the "non-module" implementation.

Video Processing frame value can not using in main funtion in OpenCv Python

I want to use frame value array which including video loop but I can't get values from video processing function. What I want to do is to take the frame sequence values from the video function, then use those values in the main function. But both the while loop scope and the video function scope do not allow this. I'd appreciate it if you could help. Thank you from now.
def video_processing():
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
while True:
ret, frame = cap.read()
if ret == True :
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
def main():
video_processing()
print frame #is not working!
if __name__ == '__main__':
main()
Update (Full Code)
import Tkinter as tk
import ttk
from ttk import Frame
import os
import time
from Tkinter import *
from Tkinter import Tk, Label
import cv2
import threading
import time
import imutils
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageTk, Image
from threading import Thread
Frames = []
def video_processing():
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture("C:\Users\eren\OneDrive\Desktop\WIN_20190522_18_16_29_Pro.mp4")
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
global Frames
while True:
global Frames
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
y , x = hsv.shape[:2] #x = 320 , y = 240
# Define 'brown' range in HSV colorspace
lower = np.array([10, 100, 20])
upper = np.array([20, 255, 200])
# Threshold the HSV image to get only brown color
mask1 = cv2.inRange(hsv, lower, upper)
kernel = np.ones((5,5),np.uint8)
thresh = cv2.dilate(mask1,kernel,iterations = 2)
# find contours in thresholded image, then grab the largest
# one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# determine the most extreme points along the contour
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
cv2.drawContours(thresh, [c], -1, (0, 255, 255), 2)
cv2.circle(thresh, extLeft , 8, (0, 0, 255) , -1)
cv2.circle(thresh, extRight, 8, (0, 255, 0) , -1)
cv2.circle(thresh, extTop , 8, (255, 0, 0) , -1)
cv2.circle(thresh, extBot , 8, (255, 255, 0), -1)
x_center = (extLeft[0] + extRight[0] + extTop[0] + extBot[0])/4
y_center = (extLeft[1] + extRight[1] + extTop[1] + extBot[1])/4
cv2.circle(frame,(x_center, y_center), 3, (0,255,0), -1)
cv2.line(frame,(extLeft[0] ,0),(extLeft[0],y) ,(0,255,0),2) # y axis - binary
cv2.line(frame,(extRight[0],0),(extRight[0],y),(0,255,0),2) # y axis - binary
cv2.line(frame,(0,extTop[1]) ,(x,extTop[1]) ,(0,255,0),2) # x axis - binary
cv2.line(frame,(0,extBot[1]) ,(x,extBot[1]) ,(0,255,0),2) # x axis - binary
# cv2.imshow("mask" , thresh)
cv2.imshow("Image", frame)
Frames = frame
# print frame
# return frame
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
# return frame
def main():
global Frames
video_processing()
print Frames
# print a
if __name__ == '__main__':
main()
This works for me:
import cv2
frame = None
def video_processing():
global frame
cap = cv2.VideoCapture('VIDEO_NAME.mp4')
while True:
ret, frame = cap.read()
if ret == True :
cv2.imshow("Image", frame)
if cv2.waitKey(1) & 0xFF == 27 :
cap.release()
break
def main():
video_processing()
print(frame)
if __name__ == '__main__':
main()

Categories