Hi I would like to run this code to detect cars using raspicam on a raspberry pi B with OpenCV but encountered errors.
import numpy as np
import cv2
car_cascade = cv2.CascadeClassifier('cars3.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in cars:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
After running the code it returns
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in cvtColor, file /home/pi/installopencv/opencv-3.1.0/modules/imgproc/src/color.cpp, line 8000
Traceback (most recent call last):
File "test.py", line 14, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: /home/pi/installopencv/opencv-3.1.0/modules/imgproc/src/color.cpp:8000: error: (-215) scn == 3 || scn == 4 in function cvtColor
Is the error happening because I'm using raspicam and "cap = cv2.VideoCapture(0)" only work for webcam? I trying enabling V4L2 module but it didn't work as well
If you want to use the Raspberry PI camera module, use the picamera module to get the frames, not OpenCV'2 videoCapture module. In particular you want to install module with array support:
pip install "picamera[array]"
This will allow you to easily pass the frames to OpenCV.
There's a very good tutorial on how start from scratch here
and here is the gist of it:
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import cv2
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image, then initialize the timestamp
# and occupied/unoccupied text
image = frame.array
# show the frame
cv2.imshow("Frame", image)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
In your case, you may want to change the format from "rgb" to "yuv".
This way, you can extract the y(luminosity) channel directly which will be your grayscale method. Hopefully you'll gain a small boost in speed not having to do the colourspace conversion (from BGR to grayscale) and fetching the frames from CSI (instead of USB).
Related
I'm using OpenCV with Python to try to detect faces with multidetect but it seems to have a problem with it.
This is my code:
#import required libraries
import cv2
import time
#point to the haar cascade file in the directory
cascPath = "haarcascade.xml"
#start the camera
video_capture = cv2.VideoCapture(0)
#give camera time to warm up
time.sleep(0.1)
#start video frame capture loop
while True:
# take the frame, convert it to black and white, and look for facial features
faceCascade = cv2.CascadeClassifier(cascPath)
ret, frame = video_capture.read()
if not ret: break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# use appropriate flag based on version of OpenCV
if int(cv2.__version__.split('.')[0]) >= 3:
cv_flag = cv2.CASCADE_SCALE_IMAGE
else:
cv_flag = cv2.cv.CV_HAAR_SCALE_IMAGE
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv_flag
)
#for each face, draw a green rectangle around it and append to the image
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#display the resulting image
cv2.imshow('Video', frame)
#set "q" as the key to exit the program when pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# clear the stream capture
video_capture.release()
cv2.destroyAllWindows()
and this is the error I'm getting:
faces = faceCascade.detectMultiScale(
cv2.error: OpenCV(4.5.2) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-_8k9tw8n\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
Solution:
The important part of your error is:
error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
Basically the problem is it can't find the file in the given directory or it could be a format problem. So, try to download the file again or use the file that comes with the opencv pip package like so:
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
I am trying to get input from my webcam using OpenCv and send it to a virtual camera using pyvirtualcam. For some reason when my webcam is displayed it gives it a blue filter. When i display my webcam without sending it to virtual camera there is no filter and everything works well.
import pyvirtualcam
import cv2
cap = cv2.VideoCapture(0)
with pyvirtualcam.Camera(width=1280, height=720, fps=20) as cam:
while True:
ret_val, frame = cap.read()
frame = cv2.resize(frame, (1280, 720), interpolation=cv2.BORDER_DEFAULT)
# cv2.imshow('my webcam', frame)
cam.send(frame)
cam.sleep_until_next_frame()
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
OpenCV uses BGR as pixel format. pyvirtualcam expects RGB by default, but it supports BGR as well:
fmt = pyvirtualcam.PixelFormat.BGR
with pyvirtualcam.Camera(width=1280, height=720, fps=20, fmt=fmt) as cam:
I'm new to openCV and am trying to get openCV to work my USB webcam on Win7 with Python 3.8. I've got the basic tutorial from here modified from Raspberry Pi cam by the same author here.
which is:
#!/usr/bin/python3
import time
import numpy as np
import cv2
#point to the haar cascade file in the directory
cascPath = "haarcascade.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#start the camera
video_capture = cv2.VideoCapture(0)
#give camera time to warm up
time.sleep(0.1)
#start video frame capture loop
while True:
# take the frame, convert it to black and white, and look for facial features
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# use appropriate flag based on version of OpenCV
if int(cv2.__version__.split('.')[0]) >= 3:
cv_flag = cv2.CASCADE_SCALE_IMAGE
else:
cv_flag = cv2.cv.CV_HAAR_SCALE_IMAGE
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv_flag
)
#for each face, draw a green rectangle around it and append to the image
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
#display the resulting image
cv2.imshow('Video', frame)
#set "q" as the key to exit the program when pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# clear the stream capture
video_capture.release()
cv2.destroyAllWindows()
It should run out of the box, but I get the error below and I'm not sure why. CV_flag and gray have data and the other parameters are filled. Any ideas.
C:\Users\Ghoul>py D:\LearnPython\open_cv_face_track_test.py -3.8
[ WARN:0] global C:\projects\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (674)
SourceReaderCB::~SourceReaderCB
terminating async callback
Traceback (most recent call last):
File "D:\LearnPython\open_cv_face_track_test.py", line 31, in <module>
faces = faceCascade.detectMultiScale(
cv2.error: OpenCV(4.1.2) C:\projects\opencv-
python\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Ass
ertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
The faceCascade classifier is empty, which means it was unable to retrieve the classifier from the path provided.
You can replace the line
cascPath = "haarcascade.xml"
with:
cascPath = '../../haarcascade.xml'
where you provide the full path of the xml file for cascPath.
I am using a Raspberry Pi 3 Model B, with Raspbian, opencv 2.x and Python 3 installed.
I want to access my USB Webcam and take a picture with it. I've found tons of code but none are of any use. I found one which is better but when I run the command
cascPath = sys.argv[1]
I get the error
Traceback (most recent call last):
File "/home/pi/test.py", line 4, in
cascPath = sys.argv[1]
IndexError: list index out of range
I simply need to access my webcam to take a picture.
I am using the following code :
import cv2
import sys
cascPath = sys.argv[1]
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#When everything is done, release the capture
video_capture.release()
This code try to recognize faces on image and sys.argv[1] expects you run script with path to XML file which help recognize faces.
If you don't want to recognize faces then you need only this code to display on monitor video from camera.
import cv2
import sys
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
# Display the resulting frame
cv2.imshow('Video', frame)
# exit if you press key `q`
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#When everything is done, release the capture
video_capture.release()
Or this to save image
import cv2
video_capture = cv2.VideoCapture(0)
# Capture frame
ret, frame = video_capture.read()
# Write frame in file
cv2.imwrite('image.jpg', frame)
# When everything is done, release the capture
video_capture.release()
I am a newbie with python and opencv i am trying to build a face detection project with raspberry pi. i am getting this error and here is my code
Traceback (most recent call last):
File "/home/pi/Desktop/picamera-code/FaceDetection1.0", line 19, in <module>
for frame in
camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
Code:
import numpy as np
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
face_cascade = cv2.CascadeClassifier('/home/pi/Downloads/haarcascade_frontalface_default.xml')
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
img=np.asarray(frame.array)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.Rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
The problem is in your camera.capture_continuos. First value, output, cannot be just an array as it records with an infinite iteration as the docs says. Instead of this you should put an output file. If you want an stream to capture this you can use the io.Bytes as well.
In this link it explains you examples on how tu use the frame and where should you redirect the output.
You can do something like what suggest on the API docs. Take the stream and truncate it to get the image that you are currently getting:
import io
import time
import picamera
with picamera.PiCamera() as camera:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, format='jpeg'):
# YOURS: for frame in camera.capture_continuous(stream, format="bgr", use_video_port=True):
# Truncate the stream to the current position (in case
# prior iterations output a longer image)
stream.truncate()
stream.seek(0)
if process(stream):
break
The correct answer is that you need to truncate the stream at the end of the loop. Add
rawCapture.truncate(0)
at the end of the first for loop.
if you change the part in line 11 640, 420 to 160, 120 it should work