trouble in face detection program using python - python

I am working on this face detection project ,as i have run your code also, the one you had given for reference. Whenever i run it i am getting this error-'module' object has no attribute 'CascadeClassifier'. What should i do ?
Code-
from __future__ import print_function
import numpy as np
import cv2
# local modules
from video import create_capture
from common import clock, draw_str
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
import sys, getopt
print(__doc__)
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try:
video_src = video_src[0]
except:
video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn)
cam = create_capture(video_src, fallback='synth:bg=../data/lena.jpg:noise=0.05')
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
vis = img.copy()
draw_rects(vis, rects, (0, 255, 0))
if not nested.empty():
for x1, y1, x2, y2 in rects:
roi = gray[y1:y2, x1:x2]
vis_roi = vis[y1:y2, x1:x2]
subrects = detect(roi.copy(), nested)
draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
if cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()

Related

urllib.error.HTTPError: HTTP Error 404: Not Found in python using esp32

I'm working on face detection attendance project using esp32 cam on python which detect the student face and then add it to excel file, I already did the arduino side using camera web server example, it gives me an error in python code related to url it can not read it, and here is my code:
import pandas as pd
import cv2
import urllib.request
import numpy as np
import os
from datetime import datetime
import face_recognition
path = r'C:\Users\96655\Downloads\ATTENDANCE\image_folder'
url = 'http://192.168.8.135/cam-hi.jpg'
##'''cam.bmp / cam-lo.jpg /cam-hi.jpg / cam.mjpeg '''
if 'Attendance.csv' in os.listdir(os.path.join(os.getcwd(), 'ATTENDANCE')):
print("there iss..")
os.remove("Attendance.csv")
else:
df = pd.DataFrame(list())
df.to_csv("Attendance.csv")
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
def markAttendance(name):
with open("Attendance.csv", 'r+') as f:
myDataList = f.readlines()
nameList = []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name},{dtString}')
encodeListKnown = findEncodings(images)
print('Encoding Complete')
#cap = cv2.VideoCapture(0)
while True:
#success, img = cap.read()
img_resp = urllib.request.urlopen(url)
imgnp = np.array(bytearray(img_resp.read()), dtype=np.uint8)
img = cv2.imdecode(imgnp, -1)
# img = captureScreen()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].upper()
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2),
(0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6),
cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
markAttendance(name)
cv2.imshow('Webcam', img)
key = cv2.waitKey(5)
if key == ord('q'):
break
cv2.destroyAllWindows()
cv2.imread
and this is the error it gives me :
urllib.error.HTTPError: HTTP Error 404: Not Found

OpenCV how to detect in specific position

Can i detect already in square fires in specific positions? I already have a code for detect fire and specific position for view but can i change to minimal position?
Also can i run a "main" (function name is "main") function every 5 minute but different times? Now my code:
import cv2
import numpy as np
import math
import time
import asyncio
from asyncio import sleep
yukseklik = int(input("Yukseklik giriniz "))
hiz = input("Hizi giriniz ")
global kez
kez=0
cap = cv2.VideoCapture(0)
while True:
_, frame = cap.read()
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Red color
low_red = np.array([0, 80, 20])
high_red = np.array([35, 255, 255])
kernal = np.ones((5, 5), "uint8")
low_red1 = np.array([160, 100, 20])
high_red1 = np.array([190, 255, 255])
red_mask = cv2.inRange(hsv_frame, low_red, high_red)
red = cv2.bitwise_and(frame, frame, mask=red_mask)
contours, hierarchy = cv2.findContours(red_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (213, 160), (426, 320), (255,255,255), 4)
for pic, contour in enumerate(contours):
area = cv2.contourArea(contour)
if(area > 300):
x, y, w, h = cv2.boundingRect(contour)
imframe = cv2.rectangle(frame, (x-20, y+20),(x + w, y + h),(255, 255,255),2)
if(x>213 and x<426 and y<320 and y>160):
if(kez == 0):
def main():
g = 9.80
y = 2*(yukseklik-15)
u = float(y)/9.80
x_ = math.sqrt(u)
x_ = x_*float(hiz)
xi = float(yukseklik)*1.73205080756887729352744463415059
print("x= ",str(xi))
print("x'= ", str(x_))
t = (float(xi)-float(x_))/float(hiz)
print("t= ",t)
global kez
kez = kez+1
asyncio.sleep(5)
kez = 0
main()
cv2.imshow("Frame", frame)
cv2.imshow("Red", red)
key = cv2.waitKey(1)
if key == 27:
break
Output:

Python mediapipe handdetector error: create_int(): incompatible function arguments

HandTracking.py (Main Python File) this is my main Python File where I run the code using VSCODE
I'm just following the Youtube Video from
Murtaza's Workshop - Robotics and AI
import cv2
import numpy as np
import HandTrackingModule as htm
import time
import autopy
##########################
wCam, hCam = 640, 480
##########################
cap = cv2.VideoCapture(0)
cap.set(3, wCam)
cap.set(4, hCam)
pTime = 0
detector = htm.handDetector(maxHands=1)
while True:
# 1. Find Hnd Landmarks
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
# 2. Get the tip of the index and middle fingers
# 3. Check wwhich fingers are up
# 4. Only Index Finger : Moving Mode
# 5. Convert Coordinates
# 6. Smoothen the values for smooth cursor
# 7. Move the MOUSE CURSOR!
# 8. Both Index and middle fingers are up : CLICK MODE
# 9. Find Distance between fingers
# 10. Click mouse if distance short
# 11. Frame rate
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10,50), cv2.FONT_HERSHEY_PLAIN, 2,
(255, 0, 0), 2)
# 12. Display
cv2.imshow("Image", img)
k = cv2.waitKey(1)
if k==ord('1'):
break
This is the other python file named "HandTrackingModule.py"
Hand Tracing Module
By: Murtaza Hassan
Youtube: http://www.youtube.com/c/MurtazasWorkshopRoboticsandAI
Website: https://www.computervision.zone/
"""
import cv2
import mediapipe as mp
import time
import math
import numpy as np
class handDetector():
def __init__(self, mode=False, maxHands=2, model_complexity=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms,
self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (xmin - 20, ymin - 20), (xmax + 20, ymax + 20),
(0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
# totalFingers = fingers.count(1)
return fingers
def findDistance(self, p1, p2, img, draw=True,r=15, t=3):
x1, y1 = self.lmList[p1][1:]
x2, y2 = self.lmList[p2][1:]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
pTime = 0
cTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList, bbox = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
I'm just following the code from youtube and it seems that I have this error even though its perfectly working from the video.
THE ERROR:
Traceback (most recent call last):
File "d:\Users\Romwald\Desktop\OpenCV Code tutorial\HANDGESTURE\HandTracking.py", line 15, in <module>
detector = htm.handDetector(maxHands=1)
File "d:\Users\Romwald\Desktop\OpenCV Code tutorial\HANDGESTURE\HandTrackingModule.py", line 24, in __init__
self.detectionCon, self.trackCon)
File "C:\Users\Romwald\AppData\Local\Programs\Python\Python37\lib\site-packages\mediapipe\python\solutions\hands.py", line 129, in __init__
'multi_handedness'
File "C:\Users\Romwald\AppData\Local\Programs\Python\Python37\lib\site-packages\mediapipe\python\solution_base.py", line 260, in __init__
for name, data in (side_inputs or {}).items()
File "C:\Users\Romwald\AppData\Local\Programs\Python\Python37\lib\site-packages\mediapipe\python\solution_base.py", line 260, in <dictcomp>
for name, data in (side_inputs or {}).items()
File "C:\Users\Romwald\AppData\Local\Programs\Python\Python37\lib\site-packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_int(): incompatible function arguments. The following argument types are supported:
1. (arg0: int) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5
[ WARN:0#2.743] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (539) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Nvm fixed it.
Just did everything here:
Installed python Python 3.7.4
python -m pip install --upgrade pip
pip install opencv-python
pip install mediapipe or use or py -m pip install mediapipe
pip install mediapipe==0.8.8 (for Hands())
pip install autopy
pip install --upgrade virtualenv
pip install tensorflow
You missed some code. Just replace your constructor code with this one:
def __init__(self, mode=False, maxHands=2, model_complexity=1, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.model_complexity = model_complexity
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(
self.mode, self.maxHands, self.model_complexity, self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils

How to find direction turn by turn using camera in opencv python?

Below code is finding edges and yellow lines.Basically I would like to print left turn as yellow line turn left.
import cv2
import numpy as np
video = cv2.VideoCapture(0)
while True:
ret, orig_frame = video.read()
if not ret:
video = cv2.VideoCapture(0)
continue
frame = cv2.GaussianBlur(orig_frame, (5, 5), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
low_yellow = np.array([18, 94, 140])
up_yellow = np.array([48, 255, 255])
mask = cv2.inRange(hsv, low_yellow, up_yellow)
edges = cv2.Canny(mask, 75, 150)
lines = cv2.HoughLinesP(edges, 1, np.pi/180, 50, maxLineGap=50)
if lines is not None:
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 5)
cv2.imshow("frame", frame)
cv2.imshow("edges",edges)
key = cv2.waitKey(25)
if key == 27:
break
video.release()
cv2.destroyAllWindows()

CamShift + Face detection in OpenCv

I'm currently combing two examples from OpenCv which let you detect your face and track object. The purpose is to first detect the face and then track it.
My code currently :
import numpy as np
import cv2
import cv2.cv as cv
import video
import math
cascade = 0
counter = 0
class App(object):
def __init__(self, video_src):
self.cam = video.create_capture(video_src)
ret, self.frame = self.cam.read()
cv2.namedWindow('camshift')
self.selection = None
self.drag_start = None
self.tracking_state = 0
self.show_backproj = False
def show_hist(self):
bin_count = self.hist.shape[0]
bin_w = 24
img = np.zeros((256, bin_count*bin_w, 3), np.uint8)
for i in xrange(bin_count):
h = int(self.hist[i])
cv2.rectangle(img, (i*bin_w+2, 255), ((i+1)*bin_w-2, 255-h), (int(180.0*i/bin_count), 255, 255), -1)
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
cv2.imshow('hist', img)
'''
#param: img the image for the face detection
#param: cascade the cascade of the ViolaJones face detection
#return: rects, an array of the cornors of the detected face. [x1 y1 x2 y2]
'''
def detect(self,img, cascade):
# Detect the faces
rects = cascade.detectMultiScale(img, scaleFactor=1.1, minNeighbors=3, minSize=(150, 150), flags = cv.CV_HAAR_SCALE_IMAGE)
# Check if any faces are detected
if len(rects) == 0:
# return empty array
return []
else:
# Get the correct x and y values
rects[:,2:] += rects[:,:2]
# loop over the recs and shrink the width with 40%
for rec in rects:
rec[0] = rec[0] + int(math.floor(((rec[2] - rec[0])*0.4)/2))
rec[2] = rec[2] - int(math.floor(((rec[2] - rec[0])*0.4)/2))
return rects
def draw_rects(self,img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
def getFace(self,img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
rects = self.detect(gray, cascade)
self.rects = rects
img = self.draw_rects(img, rects, (0, 255, 0))
if len(rects) != 0:
self.selection = rects[0][1], rects[0][0], rects[0][3], rects[0][2]
return rects
def run(self):
counter= 0
rects = None
while True:
counter +=1;
ret, self.frame = self.cam.read()
vis = self.frame.copy()
if counter % 150 == 0:
rects = self.getFace(vis);
hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.)))
if rects is not None:
self.draw_rects(vis, rects, (0, 255, 0))
if self.selection:
print 'test0'
x0, y0, x1, y1 = self.selection
self.track_window = (x0, y0, x1-x0, y1-y0)
hsv_roi = hsv[x0:x1,y0:y1]
mask_roi = mask[x0:x1,y0:y1]
hist = cv2.calcHist( [hsv_roi], [0], mask_roi, [16], [0, 180] )
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX);
self.hist = hist.reshape(-1)
self.show_hist()
vis_roi = vis[x0:x1,y0:y1]
cv2.bitwise_not(vis_roi, vis_roi)
vis[mask == 0] = 0
self.tracking_state = 1
self.selection = None
if self.tracking_state == 1:
self.selection = None
prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
prob &= mask
term_crit = ( cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1 )
track_box, self.track_window = cv2.CamShift(prob, self.track_window, term_crit)
if self.show_backproj:
vis[:] = prob[...,np.newaxis]
try: cv2.ellipse(vis, track_box, (0, 0, 255), 2)
except: print track_box
cv2.imshow('camshift', vis)
ch = 0xFF & cv2.waitKey(5)
if ch == 27:
break
if ch == ord('b'):
self.show_backproj = not self.show_backproj
cv2.destroyAllWindows()
if __name__ == '__main__':
import sys, getopt
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
cascade_fn = args.get('--cascade', "haarcascade_frontalface_alt.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
App(video_src).run()
Currently I show where the face was initially (in a green rectangle) and what is tracked at the moment (in a red oval). I am able to detect the face, but the face tracker keeps tracking all other stuff except for my face (always on one or two shoulders). I suspected it had something to do with the coordinates, but I've checked them and they seem fine (mask_roi, hsv_roi, vis_roi). An example :
Can anybody point out my mistake ?
I was unable to run your code (no module named video). I'm using OpenCV 2.4.4 and my solution to your problem is as follows:
Make sure your face is properly lit (no shadows, bright natural skin color, dark background helps a lot)
play with hsv values in mask = cv2.inRange(hsv, np.array((0., 60., 32.)), np.array((180., 255., 255.))). I'm using those: np.array((0., 51., 89.)), np.array((17., 140., 255.))
Tip:
you could make a window just for your mask so you can see how well it works
after: cv2.namedWindow('camshift') put cv2.namedWindow('mask')
and after: mask = cv2.inRange... put cv2.imshow('mask', mask) or mask_roi.

Categories