Unable to display text and bounding box around face in Streamlit - python

I'm doing a facial expressions recognition system that includes a real-time webcam and expected to host on Streamlit. But, as a result in Streamlit, web-cam can work successfully but no text or bounding box is displayed around the face in the Streamlit frame.
Full Code:
import cv2
import streamlit as st
import numpy as np
from keras.models import model_from_json
st.title("Facial Expressions Recognition System")
start = st.checkbox('Start')
FRAME_WINDOW = st.image([])
camera = cv2.VideoCapture(0)
while start:
_, frame = camera.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
FRAME_WINDOW.image(frame)
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
# load json and create model
json_file = open('./model/model4.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
emotion_model = model_from_json(loaded_model_json)
# load weights into new model
emotion_model.load_weights("./model/model4.h5")
print("Loaded model from disk")
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces available on camera
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
# take each face available on the camera and Preprocess it
for (x, y, w, h) in num_faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, (48, 48)), -1), 0)
# predict the emotions
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
else:
st.write('Stopped')
Are there any mistakes in the code or additional codes that need to be included? Much appreciated for the help.

Related

Adding OpenCV data to a database or excel sheet

import cv2
thres = 0.45
# img = cv2.imread('b.jpg')
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
cap.set(3,640)
cap.set(4,480)
classFile = 'coco.names'
classNames = []
with open(classFile, 'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
configpath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightspath = 'frozen_inference_graph.pb'
net = cv2.dnn_DetectionModel(weightspath, configpath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=0.5)
print(classIds, bbox)
if len(classIds) != 0:
for classId, confidence, box in zip(classIds.flatten(), confs.flatten(), bbox):
cv2.rectangle(img, box, color=(0, 255, 50), thickness=3)
cv2.putText(img, classNames[classId-1], (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("output", img)
cv2.waitKey(1)
This code i have made to detect objects using coco dataset. Now my aim is to store the detected object names and quantity in a database. What will be the approach to do so? I am finding it a bit tricky

Save Not Recognize Faces In a Array

I am working on a "Face Recognition" project.
I have done the recognition part, my project is based on 3 code file
Face Detection (Taking samples Of Person approx 50)
Face Train (Will train the captured images)
Face Recognition (Will be able to recognize trained Images at real
time)
Now what I have to do is if an unknown person which is not in dataset detected more than 3 times in camera, the face recognition program will wait for a wile and the face detection will run which will take samples of the person and train so the next time that person is on the camera he/she will be recognizable. Here is the code For my Face Recognition
import cv2
from picamera.array import PiRGBArray
from picamera import PiCamera
import picamera
import numpy as np
import pickle
import RPi.GPIO as GPIO
from time import sleep
from subprocess import call
import time
import datetime
import boto3
from botocore.client import Config
import serial
# port = serial.Serial('/dev/ttyUSB0',9600)
now = datetime.datetime.now()
currentDate = str(now.month) + "_" + str(now.day) + "_" + str(now.year)+ "_" + str(now.hour)
cloudpath ='Videos/cctvfootage'+currentDate+'.mp4'
with open('labels', 'rb') as f:
dict= pickle.load(f)
f.close()
#setup the camera
camera =PiCamera()
camera.resolution = (600,500)
camera.framerate = 30
rawCapture = PiRGBArray(camera, size=(600, 500))
# Load prebuilt model for Frontal Face detection
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# Create Local Binary Patterns Histograms for face recognization
recognizer = cv2.face.createLBPHFaceRecognizer()
# Load the trained mode
recognizer.load("trainer.yml")
font = cv2.FONT_HERSHEY_SIMPLEX
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'X264')
out = cv2.VideoWriter(cloudpath, fourcc, 2.0, (camera.resolution))
lastRes=''
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
frame = frame.array
cv2.rectangle(frame, (0, 0), (455, 30), (0,0,0), thickness=-1)
cv2.putText(frame, time.asctime(), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA, thickness=2)
data = open(cloudpath, 'rb')
# Convert the captured frame into grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.5, minNeighbors = 5)
# For each face in faces
for (x, y, w, h) in faces:
# Create rectangle around the face
roiGray = gray[y:y+h, x:x+w]
# Recognize the face belongs to which ID
id_, conf = recognizer.predict(roiGray)
for name, value in dict.items():
if value == id_:
break
print(name)
print(conf)
#agar confidence <=70 hoga toh door open hoga wrna nhi
# Put text describe who is in the picture
if conf <= 70:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,name,(x,y-40), font, 1, (255,255,255), 3)
else:
cv2.rectangle(frame,(x-20,y-20), (x+w+20,y+h+20), (0,255,0), 4)
cv2.putText(frame,"Unknown", (x,y-40), font, 1, (255,255,255),3)
cv2.imshow('frame', frame)
out.write(frame)
key = cv2.waitKey(1)
rawCapture.truncate(0)
#if cross button is pressed close the cam
if key == 27:
print("Video Saved Successfully..")
break
cv2.destroyAllWindows()
Did you try deepface? Its stream function accesses your web cam and applies real time face recognition, and facial attribute analysis (age, gender and emotion prediction) as well. You can switch web cam streaming content to video as well.
#!pip install deepface
from deepface import DeepFace
DeepFace.stream("my_db")
Here, my_db is a folder stores my facial database.

LBPHFaceRecognizer predict function always return 1

I am developing a facial recognition system and for that I have selected LBPH algorithm for doing the task. I have collected the sample images of user and trained it. The problem is while recognizing the face, the predict() of LBPHRecognizer always return same value for label but different value for confidence. Even if the face is unknown it returns 1.
Technologies I have been using : Python 3.7.4, OpenCV 4.1.2
Code to collect sample image
import cv2
import numpy as np
import os
import requests
import time
from PIL import Image
class CollectFaceWebCam():
def __init__(self, sid):
self.studentId = sid
#capture webcam
self.LiveWebCamera = cv2.VideoCapture(0)
#pre-trained dataset (haar-cascade classifier)
self.faceDataSet = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
#sample image capture counter
self.imgCounter = 0
self.directoryName = 'sampleImgFolder'
#check path
if not os.path.exists(self.directoryName):
os.makedirs(self.directoryName)
if not os.path.exists(self.directoryName + '/' + self.studentId):
os.makedirs(self.directoryName + '/' + self.studentId)
def gen(self):
while True:
condition, frame = self.LiveWebCamera.read() #capture frame
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale
#face detection
faces = self.faceDataSet.detectMultiScale( # Detect face sizes
img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
end_crd_x = x + w # face start coordinates
end_crd_y = y + h #face end coordinate
#draw rectangle
##parms image, start plot, end plot, thickness, color
cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)
#accepts multiple face
if len(faces) >= 0:
#face must be single in frame
if len(faces) == 1:
detectedImg = img[y:y + h, x:x + w]
#checking blurness of image
blurValue = cv2.Laplacian(detectedImg, cv2.CV_64F).var()
#ignoring the blury images
if not blurValue <= 60:
newImg = img[y:y + h, x:x + w] #new img
#saving the detected faces
filename = '{}\{}\{}\{}_{}'.format(os.getcwd(), self.directoryName, self.studentId, self.studentId, self.imgCounter) + '.jpg'
cv2.imwrite(filename, newImg)
self.imgCounter += 1
else:
cv2.putText(frame,"Multiple Face not allowed", (50,150), cv2.FONT_HERSHEY_SIMPLEX, 1, (237, 20, 5), thickness=2)
cv2.putText(frame,"Collecting Sample", (50,100), cv2.FONT_HERSHEY_SIMPLEX, 1, (250, 250, 250), thickness=3)
cv2.putText(frame,"Image Count " + str(self.imgCounter), (50,200), cv2.FONT_HERSHEY_SIMPLEX, 2, (237, 20, 5), thickness=2)
cv2.imshow('Collecting Sample', frame) # display frames
k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
if k == 27:
break
elif self.imgCounter == 110:
break
self.LiveWebCamera.release() #stop video capture
cv2.destroyAllWindows() #close all windows
class CleanSampleImages():
def __init__(self):
self.faceDataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
self.eyeDataset = cv2.CascadeClassifier('resources/haarcascade_eye.xml')
self.targetFolder = 'sampleImgFolder'
def checkFace(self):
os.chdir(self.targetFolder)
for directory in os.listdir():
os.chdir(directory)
for files in os.listdir():
imagePath = '{}/{}'.format(os.getcwd(), files)
imagePil = Image.open(imagePath).convert('L')
imageNumpy = np.array(imagePil) #conversion of normal image to numpy array
#detect face
faces = self.faceDataset.detectMultiScale(imageNumpy)
#deleting image file if face is not found
if not len(faces) == 1:
os.remove(files)
break
for (x, y, w, h) in faces:
#detect eye from selected
eyes = self.eyeDataset.detectMultiScale(imageNumpy)
if not len(eyes) > 0 and len(eyes) <=2:
#deleting image file if eye count of image is less than 0 or more than 2
os.remove(files)
os.chdir('../')
os.chdir('../')
#id must be in X-X-ID eg. a-b-342
t = CollectFaceWebCam('sa-t-1')
t.gen()
clean = CleanSampleImages
c.checkFace()
Above code consist of two class CollectFaceWebCam and CleanSampleImages. CollectFaceWebCam works for collecting the sample images. and CleanSampleImages works for cleaning the collected data. if the image does not consist of face the file is deleted.
Code to Train images
import os
import cv2
import numpy as np
from PIL import Image
class Trainer():
def __init__(self):
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.targetImagesDirectory="sampleImgFolder"
self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
def getImgwithId(self):
sampleImage, sampleImageId = [], []
filename = '{}\\{}'.format(os.getcwd(), self.targetImagesDirectory)
if os.path.exists(filename):
os.chdir(filename)
print('current path is ' + os.getcwd())
for f in os.listdir():
imgPath = os.path.join(filename, f)
os.chdir(imgPath)
for file in os.listdir():
#reteving id from filename (filename format : ta-s-ID_Filename.jpg)
id = file.split('_')
id = id[0].split('-')
id = id[2]
imageFilePath = imgPath + '\\' + file
imagePil = Image.open(imageFilePath).convert('L')
#conversion to numpy array
imageNp = np.array(imagePil, 'uint8')
faces = self.dataset.detectMultiScale(imageNp)
for (x, y, w, h) in faces:
sampleImage.append(imageNp)
sampleImageId.append(id)
os.chdir('../')
os.chdir('../')
return sampleImage, np.array(sampleImageId, dtype = int)
def train(self, data, label):
try:
self.recognizer.train(data, label)
self.msg = 'Training Successful'
print('writting')
self.recognizer.write('date.yml')
print('writing finished')
except:
self.msg = 'Core: Training Error'
print('except')
tr = Trainer()
sampleFaces, sampleFaceId = (tr.getImgwithId())
tr.train(sampleFaces, sampleFaceId)
Code to recognize face
import os
import cv2
import numpy as np
from PIL import Image
class Recognizer():
def __init__(self):
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read('date.yml')
self.targetImagesDirectory="sampleImgFolder"
self.dataset = cv2.CascadeClassifier('resources/haarcascade_frontalface_default.xml')
self.captureVideo = cv2.VideoCapture(0)
self.font = cv2.FONT_HERSHEY_SIMPLEX = 2 # Font
self.predictedUser = []
def gen(self):
while True:
condition, frame = self.captureVideo.read() #capture frame
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#conversion to gray scale
#face detection
faces = self.dataset.detectMultiScale( # Detect face sizes
img,
scaleFactor=1.3,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
end_crd_x = x + w # face start coordinates
end_crd_y = y + h #face end coordinate
#draw rectangle
##parms image, start plot, end plot, thickness, color
cv2.rectangle(frame, (x, y), (end_crd_x, end_crd_y), (0, 255, 33), 1)
predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])
self.predictedUser.append(predictUser)
cv2.imshow('test', frame)
k = cv2.waitKey(100) & 0xff # capture when user press 'esc'
if k == 27:
break
self.captureVideo.release()
cv2.destroyAllWindows()
r = Recognizer()
r.gen()
print(r.predictedUser)
"predictUser, confidence = self.recognizer.predict(img[y:y+h,x:x+w])" line of code in Recognizer class always return same value for label. The output of code to recognize face is attached below:
I would love to know why and where the problem is, as My skills and research could not lead me to identification of problem.
It might because of the data-collection process. I see you using a cascade classifier multiple times, you could limit it. While checking for a face on webcam, you could use classifiers at the time and store only the extracted/cropped faces. Also during prediction, use confidence as a threshold to limit the false prediction.

OpenCV face recognition is too slow

I tried to use OpenCV with face_recognition to get real-time face recognition, but it end up being slow and laggy. Any help please?
Code:
import face_recognition
import os
import cv2
from PIL import Image
appdata = os.getenv('APPDATA') + "/Project/"
face_cascade = cv2.CascadeClassifier((appdata + "data/face_cascade.xml").replace("\\", "/"))
cap = cv2.VideoCapture(0) #Capture video (OpenCV)
def Main():
while True: #OpenCV start video capture from webcam
ret, frame = cap.read()
gray_scale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_scale, 1.5, 5)
for x, y, w, h in faces:
#Start face recognition (face_recognition)
roi_color = frame[y: (y + h) - 15, x: (x + w) - 15] #This cuts the background leaving only the face
cv2.rectangle(frame, (x, y), ((x + w) - 15, (y + h) - 15), (255, 35, 36), 2)
face_encodings = face_recognition.face_encodings(roi_color) #This encodes face features and causes lag
cv2.imshow('sysPy', frame)
if cv2.waitKey(20) == 27:
break
cap.release()
cv2.destroyAllWindows()
Main()
Also I was thinking about "face_encodings = face_recognition.face_encodings(roi_color)" to not encode every frame, skip frames to reduce the lag. Thanks!

How to fetch live stream video details from webcam via browser to python?

I am working to detect faces and emotions from live stream video using python,tensorflow,opencv and usbcamera.
Its working fine via terminal in local. I try to run webcam through browser.
How to send video stream via webrtc to python.
Is it possible to fetch webrtc result frame by frame using python because i want to do some process in here.
I converted my keras trained model to tfjs i had model.json file.
How to load the converted model into js file
I need model.predict_class() for detect face emotions.
How to import webcam.js for live stream in js for access web cam
convert_model_krs_tfjs.py
-------------------------
import keras
from keras.models import model_from_json
from keras.optimizers import SGD
import numpy as np
from time import sleep
import tensorflowjs as tfjs
model = model_from_json(open('Face_model_architecture.json').read())
model.load_weights('Face_model_weights.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.save("Keras-64x2-10epoch")
tfjs.converters.save_keras_model(model, "face")
My python worked code:
----------------------
detectemotion.py
----------------
from keras.models import model_from_json
from keras.optimizers import SGD
import numpy as np
from time import sleep
model = model_from_json(open('./models/Face_model_architecture.json').read())
#model.load_weights('_model_weights.h5')
model.load_weights('./models/Face_model_weights.h5')
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
print(model)
def extract_face_features(gray, detected_face, offset_coefficients):
(x, y, w, h) = detected_face
#print x , y, w ,h
horizontal_offset = np.int(np.floor(offset_coefficients[0] * w))
vertical_offset = np.int(np.floor(offset_coefficients[1] * h))
extracted_face = gray[y+vertical_offset:y+h,
x+horizontal_offset:x-horizontal_offset+w]
#print extracted_face.shape
new_extracted_face = zoom(extracted_face, (48. / extracted_face.shape[0],
48. / extracted_face.shape[1]))
new_extracted_face = new_extracted_face.astype(np.float32)
new_extracted_face /= float(new_extracted_face.max())
return new_extracted_face
from scipy.ndimage import zoom
def detect_face(frame):
cascPath = "./models/haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detected_faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=6,
minSize=(48, 48),
flags=cv2.CASCADE_SCALE_IMAGE
)
return gray, detected_faces
import cv2
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
# sleep(0.8)
ret, frame = video_capture.read()
# detect faces
gray, detected_faces = detect_face(frame)
face_index = 0
# predict output
for face in detected_faces:
(x, y, w, h) = face
if w > 100:
# draw rectangle around face
cv2.rectangle(frame, (x, y), (x+w, y+h), (0,255,0), 2)
# extract features
extracted_face = extract_face_features(gray, face, (0.075, 0.05)) #(0.075, 0.05)
# predict smile
prediction_result = model.predict_classes(extracted_face.reshape(1,48,48,1))
# draw extracted face in the top right corner
frame[face_index * 48: (face_index + 1) * 48, -49:-1, :] = cv2.cvtColor(extracted_face * 255, cv2.COLOR_GRAY2RGB)
# annotate main image with a label
if prediction_result == 3:
cv2.putText(frame, "Happy!!",(x,y), cv2.FONT_ITALIC, 2, (255,255,255), 2)
elif prediction_result == 0:
cv2.putText(frame, "Angry",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)
elif prediction_result == 1:
cv2.putText(frame, "Disgust",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)
elif prediction_result == 2:
cv2.putText(frame, "Fear",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)
elif prediction_result == 4:
cv2.putText(frame, "Sad",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 2)
elif prediction_result == 5:
cv2.putText(frame, "Surprise",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2,(255,255,255), 2)
else :
cv2.putText(frame, "Neutral",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2,(255,255,255), 2)
# increment counter
face_index += 1
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()

Categories