I'm trying to run a Python program in Terminal, but I face the following error
> pi#raspberrypi:~ $ python3 /home/pi/Cascades2/03_face_recognition.py
Traceback (most recent call last):
File "/home/pi/Cascades2/03_face_recognition.py", line 15, in <module>
recognizer.read('trainer/trainer.yml')
cv2.error: OpenCV(4.1.0) /home/pi/opencv-python/opencv_contrib/modules/face/src/facerec.cpp:61: error: (-2:Unspecified error) File can't be opened for reading! in function 'read'
The Python program works well when I run it with Thonny.
What's the problem?
Here's the program (03_face_recognition.py) code:
import cv2
import numpy as np
import os
import time
import datetime
import RPi.GPIO as GPIO
relay = 23
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(relay, GPIO.OUT)
GPIO.output(relay, 0)
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('trainer/trainer.yml')
cascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascadePath);
font = cv2.FONT_HERSHEY_SIMPLEX
#initiate id counter
id = 0
# names related to ids: example ==> Jenifer: id=1, etc
names = ['None', 'Jenifer', 'Luciola']
# Initialize and start realtime video capture
cam = cv2.VideoCapture('ipcamera')
frame_rate = 1
prev = 0
# Define min window size to be recognized as a face
minW = 0.1*cam.get(3)
minH = 0.1*cam.get(4)
while True:
time_elapsed = time.time() - prev
res, image = cam.read()
if time_elapsed > 1./frame_rate:
prev = time.time()
ret, img =cam.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(minW), int(minH)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
# Check if confidence is less than 100 ==> "0" is perfect match
if (confidence < 80):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
GPIO.output(relay, 1)
print("Ouverture du portail", datetime.datetime.now(), id, confidence)
time.sleep(1)
GPIO.output(relay, 0)
cam.release()
time.sleep(30)
cam = cv2.VideoCapture('ipcamera')
else:
GPIO.output(relay, 0)
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
k = cv2.waitKey(10) & 0xff # Press 'ESC' for exiting video
if k == 27:
break
# Do a bit of cleanup
print("\n [INFO] Exiting Program and cleanup stuff")
cam.release()
cv2.destroyAllWindows()
Thank you in advance for your help!
It’s likely to do with what the current working directory is.
In your error you are in your home directory. If you run the program with ‘thonny’ I imagine that ‘thonny’ will set the script’s folder.
When you specify the file to open you are using a relative path. Maybe you could use the absolute path instead.
It is an easy fix, I had to replace trainer/trainer.yml by /home/pi/Cascades2/trainer/trainer.yml, the absolute path.
Related
I am working on a facial detector script.
I have managed to create a dataset by capturing images from a webcam, saving them to a local directory and storing the data on my local database.
but when I try to run the main app to recognize the faces and display them to me, I am getting the following error:
runfile('C:/Users/JeanCamargo/Google Drive/python/college/face recognition/face recognition.py', wdir='C:/Users/JeanCamargo/Google Drive/python/college/face recognition')
Reloaded modules: dbconnect
Traceback (most recent call last):
File "C:\Users\JeanCamargo\Google Drive\python\college\face recognition\face recognition.py", line 27, in <module>
recognizer.read(r"trainner\trainningData.yml")
error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-6lylwdcz\opencv\modules\core\src\persistence.cpp:2089: error: (-215:Assertion failed) isMap() in function 'cv::FileNode::operator []'
Any ideas on what's causing this? the file I am running goes as following.
import cv2
import sys
import numpy as np
import pickle
from PIL import Image
from dbconnect import mySQL
import os
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read(r"trainner\trainningData.yml")
cascPath = r"Classifiers\haarcascade_frontalface_alt.XML"
faceCascade = cv2.CascadeClassifier(cascPath)
#Id = 0
path = 'dataSet'
def getProfile(Id):
query = "SELECT * FROM people WHERE ID ="+ Id
cursor = query.fetchall()
mySQL.close()
profile = None
for row in cursor:
profile = row
return profile
video_capture = cv2.VideoCapture(1)
font = cv2.cv.InitFont(cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1,.5,0,2,1)
profiles={}
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
if ret==False:
continue
frame = cv2.flip(frame, 1) # Flip image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
Id, conf = recognizer.predict(gray[y:y+h,x:x+w])
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
profile = getProfile(id)
if (profile !=None):
cv2.cv.PutText(cv2.cv.fromarray(frame),profile[1],(x,y+h+30),255)
cv2.cv.PutText(cv2.cv.fromarray(frame),profile[2],(x,y+h+60),255)
cv2.cv.PutText(cv2.cv.fromarray(frame),profile[3],(x,y+h+90),255)
cv2.cv.PutText(cv2.cv.fromarray(frame),profile[4],(x,y+h+120),255)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
I eventually found an answer to this.
this is happening due to:
cv2.face.LBPHFaceRecognizer_create()
This is the correct invocation for OpenCV. but most likely, you do not have the face submodule, because your cv2.pyd was built without opencv_contrib
there's a couple of options:
rebuild from src with opencv_contrib, you need a c++ compiler and CMake for this.
fall back to opencv2.4 and use
cv2.createLBPHFaceRecognizer()
once this is done and train the data again it will work ok
I wrote a program that will play a warning sound when a face is detected (play audio file)
However, the sound file will always start playing after 10 ~ 30 seconds when the detection condition is triggered.
And if it is removed from the conditional sentence and played as background music,there is no problem.
May I ask how to solve it?
Thanks!
THE code I use to play sound:
winsound.PlaySound('1.wav', winsound.SND_FILENAME|winsound.SND_ASYNC)
The full code:
# -*- coding: utf-8 -*-
import dlib
import cv2
import imutils
import winsound
import os
import multiprocessing
import winsound
import time
from winsound import SND_ASYNC
import pygame
from pygame import mixer
#chose camera
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
#change size
cap.set(cv2. CAP_PROP_FRAME_WIDTH, 640)
cap.set(cv2. CAP_PROP_FRAME_HEIGHT, 360)
#Get the default face detector
detector = dlib.get_frontal_face_detector()
#Load 68 feature point models according to the shape_predictor method, this method is a detector for facial expression recognition
predictor = dlib.shape_predictor( 'shape_predictor_68_face_landmarks.dat')
#When the camera is turned on, each frame is detected
#pygame.mixer.init()
#mixer.music.load('incoming.mp3')
#mixer.music.play(-1)
#winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
while(cap.isOpened()):
#Read frame information
ret, frame = cap.read()
#Detect faces
face_rects, scores, idx = detector.run(frame, 0)
#Retrieve the detection result
for i, d in enumerate(face_rects):
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
text = " %2.2f ( %d )" % (scores[i], idx[i])
#Draw a rectangular area for detecting faces
cv2.rectangle(frame, (x1, y1), (x2, y2), ( 0, 255, 0), 4, cv2. LINE_AA)
#Mark the face detection score and face direction sub-detector number
cv2.putText(frame, text, (x1, y1), cv2. FONT_HERSHEY_DUPLEX,
0.7, ( 255, 255, 255), 1, cv2. LINE_AA)
#play sound
if scores[i]>0.3 and idx[i]==0 :
print(text)
#pygame.mixer.pre_init(48000, 16, 2, 4096)
winsound.PlaySound('incoming.wav', winsound.SND_FILENAME| winsound.SND_ASYNC )
#Output to screen
cv2.imshow( "Face Detection", frame)
#If you press the ESC key, you exit
if cv2.waitKey( 10) == 27:
break
#Free memory
cap.release()
#Close all windows
cv2.destroyAllWindows()
and the gprof2dothere
I've been following an opencv tutorial from Sir Adrian Rosebrock for a home surveillance system. This is working. I also have an analog sensor that is using an analog to digital converter which is ADS1115. This is also working.
the problem is once I insert the ADS library inside the surveillance code I get an error.
Error:
Traceback (most recent call last):
File "ss_security.py", line 17, in <module>
import Adafruit_ADS1x15
ImportError: No module named Adafruit_ADS1x15
This is the code:
# import the necessary packages
from pyimagesearch.tempimage import TempImage
from picamera.array import PiRGBArray
from picamera import PiCamera
from imutils.video import VideoStream
import warnings
import dropbox
import json
import datetime
import argparse
import imutils
import time
import cv2
import math
# Import the ADS1x15 module.
import Adafruit_ADS1x15
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
help="path to the JSON configuration file")
ap.add_argument("-p", "--picamera", type=int, default=-1,
help="whether or not the Raspberry Pi camera should be used")
args = vars(ap.parse_args())
# filter warnings, load the configuration and initialize the Dropbox
# client
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
client = None
# check to see if the Dropbox should be used
if conf["use_dropbox"]:
# connect to dropbox and start the session authorization process
client = dropbox.Dropbox(conf["dropbox_access_token"])
print("[SUCCESS] dropbox account linked")
# initialize the video stream and allow the cammera sensor to warmup
vs = VideoStream(usePiCamera=args["picamera"] > 0).start()
vs.resolution = tuple(conf["resolution"])
vs.framerate = conf["fps"]
rawCapture = PiRGBArray(vs, size=tuple(conf["resolution"]))
# allow the camera to warmup, then initialize the average frame, last
# uploaded timestamp, and frame motion counter
print("[INFO] warming up...")
time.sleep(conf["camera_warmup_time"])
avg = None
lastUploaded = datetime.datetime.now()
motionCounter = 0
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=500)
text = "Unoccupied"
# draw the timestamp on the frame
timestamp = datetime.datetime.now()
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# convert it to grayscale, and blur it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the average frame is None, initialize it
if avg is None:
print("[INFO] starting background model...")
avg = gray.copy().astype("float")
rawCapture.truncate(0)
continue
# accumulate the weighted average between the current frame and
# previous frames, then compute the difference between the current
# frame and running average
cv2.accumulateWeighted(gray, avg, 0.5)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg))
# threshold the delta image, dilate the thresholded image to fill
# in holes, then find contours on thresholded image
thresh = cv2.threshold(frameDelta, conf["delta_thresh"], 255,
cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < conf["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
# draw the text and timestamp on the frame
ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.putText(frame, ts, (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.35, (0, 0, 255), 1)
# display temp
cv2.putText(frame, "Temp: 30 C".format(text), (250, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
# check to see if the room is occupied
if text == "Occupied":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= conf["min_upload_seconds"]:
# increment the motion counter
motionCounter += 1
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= conf["min_motion_frames"]:
# check to see if dropbox sohuld be used
if conf["use_dropbox"]:
# write the image to temporary file
t = TempImage()
cv2.imwrite(t.path, frame)
# upload the image to Dropbox and cleanup the tempory image
print("[UPLOAD] {}".format(ts))
path = "/{base_path}/{timestamp}.jpg".format(
base_path=conf["dropbox_base_path"], timestamp=ts)
client.files_upload(open(t.path, "rb").read(), path)
t.cleanup()
# update the last uploaded timestamp and reset the motion
# counter
lastUploaded = timestamp
motionCounter = 0
# otherwise, the room is not occupied
else:
motionCounter = 0
# check to see if the frames should be displayed to screen
if conf["show_video"]:
# display the security feed
cv2.imshow("Security Feed", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
I have just inserted the library. I w\haven't done any computations/readings. As you can see I placed a default temp value.
The library is located inside pi/Adafruit_Python_ADS1x15
Steps taken:
Installed the ADS library inside the cv environment (Error: destination path already exist)
Tried from Adafruit_Python_ADS1x15 import Adafruit_ADS1x15 (Error: No module named Adafruit_Python_ADS1x15)
I writing a code to detect human fist inside a box from the webcam. What i want is after checking there is a fist inside, the program will wait for 2 seconds. After 2 seconds, if there is still a fist inside, then it will put a text "FIST"
Here is my code :
import cv2
import numpy as np
from detect import detect_fist
import time
cap = cv2.VideoCapture(0)
isFist = 0
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img, (50, 50), (150, 150), (0, 255, 0), 0)
confirm = img[50:150, 50:150] # narrow the whole webcam to a box
isFist = detect_fist(confirm) # a function to detect fist inside that box
if isFist:
timeout = int(time.time()) + 2
while 1:
if int(time.time()) == int(timeout):
cv2.putText(img, "Fist", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
break
cv2.imshow('Gesture', img)
k = cv2.waitKey(10)
if k == 27:
break
my problem is after it detect my fist in the box ,the camera display turn slowly , almost freeze. What i want is the camera still working normally and will check the fist again after 2 secs.
I suggest this is due to the checking of condition if int(time.time()) == int(timeout) will be executed following the frame rate of the webcam ( which is really fast ) there for the display part cannot follow.
Anyone have any idea to archive this ?
When a fist is detected, your code is stuck in the while loop, which is why you see the image window as being 'stuck'. I recommend using a bool to keep track of the status. I've modified your code to show the effect, although I have not debugged this.
import cv2
import numpy as np
from detect import detect_fist
import time
cap = cv2.VideoCapture(0)
isFist = 0
current_time = 0
target_time = 0
first_fist_detection = False
while (cap.isOpened()):
ret, img = cap.read()
img = cv2.flip(img, 1)
cv2.rectangle(img, (50, 50), (150, 150), (0, 255, 0), 0)
confirm = img[50:150, 50:150] # narrow the whole webcam to a box
isFist = detect_fist(confirm) # a function to detect fist inside that box
current_time = int(time.time())
if isFist and not first_fist_detection:
first_fist_detection = True
target_time = current_time + 2
elif isFist and first_fist_detection:
if current_time > target_time:
cv2.putText(img, "Fist", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
else:
#that is, no fist is detected
first_fist_detection = False
cv2.imshow('Gesture', img)
k = cv2.waitKey(10)
if k == 27:
break
I am new to OpenCV and Python as well.
I am getting some errors in my code which I have tried my best but .....I hope anyone would help me out. I want that when I run the code it should capture 10-15 frames per second...below is the error I am getting...could anyone help me with the changes
Code...
import cv2
import time
import sys
import numpy as np
if __name__ == '__main__':
faceCascade = cv2.CascadeClassifier('C:\\Users\\Mudit\\Desktop\\Thesis\\CNN-master\\haarcascade_frontalface_default')
faceNeighborsMax = 10
neighborStep = 1
# Start default camera
video = cv2.VideoCapture(0);
# Number of frames to capture
num_frames = 10;
print("Capturing {0} frames".format(num_frames))
# Start time
start = time.time()
# Grab a few frames
for i in range(0, num_frames):
ret, frame = video.read()
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
for neigh in range(1, faceNeighborsMax, neighborStep):
faces = faceCascade.detectMultiScale(frameGray, 1.2, neigh)
frameClone = np.copy(frame)
# Display the image
for (x, y, w, h) in faces:
cv2.rectangle(frameClone, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(frameClone, "# Neighbors = {}".format(neigh), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.imshow('Face Detection', frameClone)
if cv2.waitKey(500) & 0xFF == 27:
cv2.destroyAllWindows()
sys.exit()
# End time
end = time.time()
# Time elapsed
seconds = end - start
print("Time taken : {0} seconds".format(seconds))
# Calculate frames per second
fps = num_frames / seconds;
print("Estimated frames per second : {0}".format(fps))
# Release video
video.release()
Error:
Traceback (most recent call last): File "C:/Users/Mudit/PycharmProjects/CNN/findframerateofacamera/webcam.py", line 28, in <module>
faces = faceCascade.detectMultiScale(frameGray, 1.2, neigh) cv2.error: C:\projects\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp:1698: error: (-215) !empty() in function cv::CascadeClassifier::detectMultiScale
Since
frameGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
does not result in an assertion, I think the image capture works correctly. You can add
cv2.imshow('Gray', frameGray)
to verify that the grayscale image is not empty
The next suspect is the cascade classifier faceCascade. Do check if the path and the filename is correct and it has been loaded correctly.