NameError even when variable is initialize - python

I'm coding on Python (first timer) trying to program an artificial vision algorithm which recognizes when a TV channel is showing a spot or a regular TV show (there are no TV logos when a spot is running on TV).
The code I'm working with is this
import numpy as np
import cv2
cap = cv2.VideoCapture('Telecinco.mp4')
count = 0
template = cv2.imread('telecinco_logo.png',0)
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame,None,fx=1,fy=1,interpolation = cv2.INTER_CUBIC)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if count == 0:
count = 1
cv2.imwrite('image.png', frame)
w, h = template.shape[::-1]
res = cv2.matchTemplate(gray,template,cv2.TM_CCOEFF_NORMED)
threshold = 0.7
loc = np.where( res >= threshold)
for pt in zip(*loc[::-1]):
logo_img = frame[pt[1]:pt[1]+h,pt[0]:pt[0]+w]
cv2.rectangle(frame, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 1)
cv2.imshow('Captura',frame)
cv2.imshow('Logo encontrado', logo_img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
However, python is giving me an error:
Traceback (most recent call last):
File "logoDeteccion.py", line 27, in <module>
cv2.imshow('Logo encontrado', logo_img)
NameError: name 'logo_img' is not defined
And I'm really confused. logo_img is always declared (or should be). When commenting that line program is running fine, but of course doesn't do what it should do.
Any ideas?
Thanks!

Inside the while loop, you try to show logo_img to the screen.
If it does not exists, then you can wait for zip(*loc[::-1]) to have an actual value.
See the code below:
while(cap.isOpened()):
try:
cv2.imshow('Logo encontrado', logo_img)
except NameError:
pass #Go for next loop and check to see if logo_img exists

Related

Is there a way to determine which camera has recognized a person in live stream when using multiple cameras for facial recognition in python

This is a facial recognition program and is programmed with python and opencv with database insertion model through MySQL.
I have one query in the program.
In the first part, there is real-time data insertion in a table when the camera recognizes a person through the pictures given in which a date, time, name and ID stamp is inserted in the database. I also want to insert the camera name which has recognized the person to know the particular location also.
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("trainner.yml")
labels = {"person_name": 1}
with open("labels.pickle", 'rb') as f:
og_labels = pickle.load(f)
labels = {v: k for k, v in og_labels.items()}
cap = cv2.VideoCapture(0)
#cap1 = cv2.VideoCapture('rtsp://admin:umerkhan261#192.168.226.201')
while True:
ret, frame = cap.read()
# ret, frame1 = cap1.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors=5)
now = datetime.now()
print("Today's date: ", str(now))
for (x, y, w, h) in faces:
roi_gray = gray[y:y + h, x:x + w]
# roi_gray1 = gray1[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
# roi_color1 = frame1[y:y + h, x:x + w]
# deep Learned Model for recognizing the person in live stream
id_, conf = recognizer.predict(roi_gray)
# id_, conf = recognizer.predict(roi_gray1)
if 45 <= conf <= 85:
print(id_)
print(labels[id_])
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
new = datetime.now()
tString = new.strftime('%H:%M:%S')
dtString = new.strftime('%D:%m')
I figured out a way to solve the problem. I made the frame reading from my webcam a function with a variable named cam1, then I made another python file copied the whole program and named that variable to cam2. I made another file calling those functions on the main file and by the multi threading function I called them simultaneously. It turned out to be successful

TypeError: __init__(): incompatible constructor arguments with face_recognition call

I was trying to develop a face recognition attendance system, I coded 100% just like the tutorial, but I still got some errors, here's the code:
from face_recognition.api import face_distance
import face_recognition as fr
import os
import cv2
import face_recognition
import numpy as np
import time
from PIL import ImageFont
def getEncodedFaces():
encoded = {}
for dirPath, dNames, fNames in os.walk("./faces"):
for f in fNames:
if f.endswith(".jpg") or f.endswith(".png"):
face = fr.load_image_file("faces/" + f)
encoding = fr.face_encodings(face)[0]
encoded[f.split(".")[0]] + encoding
return encoded
def unknownImageEncoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classifyFace(im):
faces = getEncodedFaces()
facesEncoded = list(faces.values())
knownFaceNames = list(faces.keys())
img = cv2.imread(im, 1)
faceLocations = face_recognition.face_encodings(img)
unknownFaceEncodings = face_recognition.face_encodings(img, faceLocations)
faceNames = []
for faceEncoding in unknownFaceEncodings:
matches = face_recognition.compare_faces()
name = "Unknown"
faceDistances = face_recognition.face_distance(facesEncoded, faceEncoding)
bestMatchIndex = np.argmin(face_distance)
if matches[bestMatchIndex]:
name = knownFaceNames[bestMatchIndex]
faceNames.append(name)
for (top, right, bottom, left), name in zip(faceLocations, faceNames):
cv2.rectangle(img, (left-20, top-20, right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = ImageFont.truetype("./NotoSansTC-Black.ttf")
cv2.putText(img, name, (left-20, bottom+15), font, 1.0, (255, 255, 255), 2)
while True:
cv2.imshow("Attendance System", img)
if cv2.waitkey(1) & 0xFF == ord("q"):
return faceNames
print(classifyFace("test.jpg"))
And here's the error:
Traceback (most recent call last):
File "main.py", line 51, in <module>
print(classifyFace("test.jpg"))
File "main.py", line 31, in classifyFace
unknownFaceEncodings = face_recognition.face_encodings(img, faceLocations)
File "C:\Users\family\AppData\Local\Programs\Python\Python36\lib\site-packages\face_recognition\api.py", line 213, in face_encodings
raw_landmarks = _raw_face_landmarks(face_image, known_face_locations, model)
File "C:\Users\family\AppData\Local\Programs\Python\Python36\lib\site-packages\face_recognition\api.py", line 158, in _raw_face_landmarks
face_locations = [_css_to_rect(face_location) for face_location in face_locations]
File "C:\Users\family\AppData\Local\Programs\Python\Python36\lib\site-packages\face_recognition\api.py", line 158, in <listcomp>
face_locations = [_css_to_rect(face_location) for face_location in face_locations]
File "C:\Users\family\AppData\Local\Programs\Python\Python36\lib\site-packages\face_recognition\api.py", line 49, in _css_to_rect
return dlib.rectangle(css[3], css[0], css[1], css[2])
TypeError: __init__(): incompatible constructor arguments. The following argument types are supported:
1. _dlib_pybind11.rectangle(left: int, top: int, right: int, bottom: int)
2. _dlib_pybind11.rectangle(rect: dlib::drectangle)
3. _dlib_pybind11.rectangle(rect: _dlib_pybind11.rectangle)
4. _dlib_pybind11.rectangle()
Invoked with: -0.035434916615486145, -0.12049627304077148, 0.08377307653427124, 0.062198664993047714
Here's the directory structure:
test.JPG
main.py
NotoSansTC-Black.otf
faces
|- 呂AA.jpg
|- 呂BB.jpg
Can someone tell me what does it mean and how to fix it? I have tried installing many different versions of dlib and face-recognition. I've also installed pybind11, but it is still giving me the same error. I'm in a hurry and I really need some help. Thank you!
Why are you importing face_recognition 2 times? It is not proper way to import face_recognition 2 times.
compare_faces() takes 2 arguments, already encoded vector list and unknown encoded vector.
And now focusing on your main error, you have not used face_locations which means it wont locate your face from the image.
Your error is here:
faceLocations = face_recognition.face_encodings(img)
Instead use this:
faceLocations = face_recognition.face_locations(img)
full code:
from face_recognition.api import face_distance
import face_recognition as fr
import os
import cv2
import numpy as np
import time
from PIL import ImageFont
def getEncodedFaces():
encoded = {}
for f in os.listdir(os.path.join('faces')):
face = fr.load_image_file(os.path.join('faces',f))
encoding = fr.face_encodings(face)
if not len(encoding):
continue
encoding = encoding[0]
encoded[f.split(".")[0]] = encoding
return encoded
def unknownImageEncoded(img):
face = fr.load_image_file("faces/" + img)
encoding = fr.face_encodings(face)[0]
return encoding
def classifyFace(im):
faces = getEncodedFaces()
facesEncoded = list(faces.values())
knownFaceNames = list(faces.keys())
img = cv2.imread(im, 1)
faceLocations = fr.face_locations(img)
unknownFaceEncodings = fr.face_encodings(img, faceLocations)
faceNames = []
for faceEncoding, faceloc in zip(unknownFaceEncodings, faceLocations):
top, right, bottom, left = faceloc
matches = fr.compare_faces(facesEncoded, faceEncoding)
name = "Unknown"
faceDistances = fr.face_distance(facesEncoded, faceEncoding)
bestMatchIndex = np.argmin(faceDistances)
if matches[bestMatchIndex]:
name = knownFaceNames[bestMatchIndex]
faceNames.append(name)
cv2.rectangle(img, (left-20, top-20, right+20, bottom+20), (255, 0, 0), cv2.FILLED)
font = ImageFont.truetype("./NotoSansTC-Black.ttf")
cv2.putText(img, name, (left-20, bottom+15), font, 1.0, (255, 255, 255), 2)
image = img[:, :, ::-1]
cv2.imwrite("result.jpg", image)
return faceNames
print(classifyFace("test.jpg"))
This line: for (top, right, bottom, left), name in zip(faceLocations, faceNames): .
Make sure that top, right, bottom, left values are integer values and not float values. Just print them once to confirm. If they are float values convert them to int using int(). Like this:
for (top, right, bottom, left), name in zip(faceLocations, faceNames):
cv2.rectangle(img, int(left)-20, int(top)-20, int(right)+20, int(bottom)+20), (255, 0, 0), cv2.FILLED)

OpenCV: lossless capture?

I wrote a simple python program that uses OpenCV and scantailor-cli to scan documents with my document camera and store them as high-contrast, low filesize PDFs.
Approach:
capture the document in full color with OpenCV at 1920x1080 in JPG format
convert to high-contrast black-and-white PDF with scantailor-cli
This generally works, however I have discovered an issue with my usage of OpenCV that is limiting the quality of my scans. I'm hoping someone here can give me the nudge I need to improve this.
When I use the UI-based capture software that came with my IPEVO document camera (Visualizer), I get a nice, clearn capture that makes a great conversion into high-contrast. When I use OpenCV, I get unwanted compression artifacts and conversion to high-contrast yields unsatisfactory results.
Full program listing is here: https://github.com/jonashw/Document-Scanner/blob/master/scan.py
Relevant excerpt below
def do_camera(device_id):
cam = cv2.VideoCapture(device_id)#,cv2.CAP_DSHOW)
w,h = 1920,1080
cam.set(cv2.CAP_PROP_FRAME_WIDTH, w)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
cam.set(cv2.CAP_PROP_FORMAT, 0)
w = cam.get(cv2.CAP_PROP_FRAME_WIDTH)
h = cam.get(cv2.CAP_PROP_FRAME_HEIGHT)
grab_mode = cam.get(cv2.CAP_PROP_MODE)
format = cam.get(cv2.CAP_PROP_FORMAT)
print("capturing with w={}, h={}, grab_mode={}, format={}".format(w,h,grab_mode, format))
#ok = False
#i = -100
#while (not ok) and i < 10:
#if i != 0:
#print("setting grab_mode {}".format(grab_mode + i))
#ok = cam.set(cv2.CAP_PROP_MODE, grab_mode + i)
#i += 1
#if ok:
#gm = cam.get(cv2.CAP_PROP_MODE)
#printf("Grab mode = {}", format(gm))
cv2.namedWindow("test", cv2.WINDOW_NORMAL)
img_counter = 0
img_paths = []
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
cv2.resizeWindow("test",1280,720)
cv2.moveWindow("test",1920,0)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "capture_{}.png".format(img_counter)
img_path = os.path.join(capture_path, img_name)
img_paths.append(img_path)
os.makedirs(capture_path, exist_ok=True)
cv2.imwrite(img_path, frame, [cv2.IMWRITE_PNG_COMPRESSION, 0])
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
return img_paths
I suspect the exact line I need to alter is this one.
cv2.imwrite(img_path, frame, [cv2.IMWRITE_PNG_COMPRESSION, 0])
As you can see, I have opted for PNG format, which should not result in any JPEG-like compression artifacts. Despite this selection, I get artifacts.
I have also tried embracing JPEG and setting the quality to max. This does not improve matters:
cv2.imwrite(img_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
What can I do? Please and thank you

OpenCV + RaspberyPI like a room monitor

I'm new using opencv and python, my project its about a smarthome.
I managed to install opencv on a raspberrypi and using a webcam.
My program will work on the following three scenarios.
1.A person enters the room, detects face and person, sends message "Dad is in room 1."
2.A person enters the room, detects face but not person, sends message "Unknown person is in room 1"
3. No one is in the room, send a message "No one is in room 1"
Scenarios 1 and 2 I have idea how to solve them, but where I am stuck is in the esceanrio 3. I tried to save the name of the detected person in a variable, if this is empty should send the message, but it has not worked for me.
The code I am using is the following, the problem I have is at the end of the code:
import cv2, sys, numpy, os
size = 1
fn_haar = 'haarcascade_frontalface_default.xml'
fn_dir = 'att_faces'
# Part 1: Create fisherRecognizer
print('Training...')
# Create a list of images and a list of corresponding names
(images, lables, names, id) = ([], [], {}, 0)
# Get the folders containing the training data
for (subdirs, dirs, files) in os.walk(fn_dir):
# Loop through each folder named after the subject in the photos
for subdir in dirs:
names[id] = subdir
subjectpath = os.path.join(fn_dir, subdir)
# Loop through each photo in the folder
for filename in os.listdir(subjectpath):
# Skip non-image formates
f_name, f_extension = os.path.splitext(filename)
if(f_extension.lower() not in
['.png','.jpg','.jpeg','.gif','.pgm']):
print("Skipping "+filename+", wrong file type")
continue
path = subjectpath + '/' + filename
lable = id
# Add to training data
images.append(cv2.imread(path, 0))
lables.append(int(lable))
id += 1
(im_width, im_height) = (112, 92)
# Create a Numpy array from the two lists above
(images, lables) = [numpy.array(lis) for lis in [images, lables]]
# OpenCV trains a model from the images
model = cv2.face.createFisherFaceRecognizer()
model.train(images, lables)
# Part 2: Use fisherRecognizer on camera stream
haar_cascade = cv2.CascadeClassifier(fn_haar)
webcam = cv2.VideoCapture(0)
while True:
# Loop until the camera is working
rval = False
while(not rval):
# Put the image from the webcam into 'frame'
(rval, frame) = webcam.read()
if(not rval):
print("Failed to open webcam. Trying again...")
# Flip the image (optional)
frame=cv2.flip(frame,1,0)
# Convert to grayscalel
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Resize to speed up detection (optinal, change size above)
mini = cv2.resize(gray, (int(gray.shape[1] / size), int(gray.shape[0] / size)))
# Detect faces and loop through each one
faces = haar_cascade.detectMultiScale(mini)
for i in range(len(faces)):
face_i = faces[i]
# Coordinates of face after scaling back by `size`
(x, y, w, h) = [v * size for v in face_i]
face = gray[y:y + h, x:x + w]
face_resize = cv2.resize(face, (im_width, im_height))
# Try to recognize the face
prediction = model.predict(face_resize)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3)
# [1]
# Write the name of recognized face
cv2.putText(frame,
'%s - %.0f' % (names[prediction[0]],prediction[1]),
(x-10, y-10), cv2.FONT_HERSHEY_PLAIN,1,(0, 255, 0))
face = '%S' % (names[prediction[0]]) #Guardar nombre en variable
#Start to validate the name
if face != "" : #If a name is detected
print(face + "Is in the room..") #Print the name in terminal
elif face == "" : #If a name is not detected
print("The room is empty...") #Print the text in terminal
#This last part is where I have problem, when a face is not detected, the text is not printed in the terminal
# Show the image and check for ESC being pressed
cv2.imshow('OpenCV', frame)
key = cv2.waitKey(10)
if key == 27:
break
The code I am using is based on the following tutorial: Face Detection
Any help is appreciated, thank you. Greetings
If there are no faces detected in the room, your code does not enter the for i in range(len(faces)) loop, which by the way could be simplified to for i in faces.
Thus, an else at the end of your for loop solves your problem:
for face in faces:
do_stuff(face)
else:
print("room is empty")

error: (-215) nimages > 0 in function calibrateCamera using Python and OpenCV

I'm trying to calibrate my web-cam based on the example given in the opencv samples but when i run the code provided here:
def caliLeftCam():
args, img_mask = getopt.getopt(sys.argv[1:], '', ['save=', 'debug=', 'square_size='])
args = dict(args)
try: img_mask = img_mask[0]
except: img_mask = '../cpp/img*.jpg'
img_names = glob(img_mask)
debug_dir = args.get('--debug')
square_size = float(args.get('--square_size', 1.0))
pattern_size = (7, 5)
pattern_points = np.zeros( (np.prod(pattern_size), 3), np.float32 )
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= square_size
obj_points = []
img_pointsL = []
h, w = 0, 0
for fn in img_names:
print "processing %s..." % fn,
imgL = cv2.imread(fn, 0)
h, w = imgL.shape[:2]
found, corners = cv2.findChessboardCorners(imgL, pattern_size)
if found:
term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )
cv2.cornerSubPix(imgL, corners, (5, 5), (-1, -1), term)
if debug_dir:
vis = cv2.cvtColor(imgL, cv2.COLOR_GRAY2BGR)
cv2.drawChessboardCorners(vis, pattern_size, corners, found)
path, name, ext = splitfn(fn)
cv2.imwrite('%s/%s_chess.bmp' % (debug_dir, name), vis)
if not found:
print "chessboard not found"
continue
img_pointsL.append(corners.reshape(-1, 2))
obj_points.append(pattern_points)
print 'ok'
rmsL, cameraL_matrix, dist_coefsL, rvecsL, tvecsL = cv2.calibrateCamera(obj_points, img_pointsL, (w, h))
i got this error:
Traceback (most recent call last):
File "/home/sabrine/Downloads/opencv-2.4.9/samples/python2/Memo.py", line 293, in <module>
Img_pointsL, Cam_MatL, DisL = caliLeftCam()
File "/home/sabrine/Downloads/opencv-2.4.9/samples/python2/Memo.py", line 124, in caliLeftCam
rmsL, cameraL_matrix, dist_coefsL, rvecsL, tvecsL = cv2.calibrateCamera(obj_points, img_pointsL, (w, h))
error: /build/buildd/opencv-2.4.8+dfsg1/modules/calib3d/src/calibration.cpp:3415:
error: (-215) nimages > 0 in function calibrateCamera
what does this error mean?
and how can i solve it?
The error says that one of the vectors provided as arguments is empty.
The function has an assertion that prevents You from using it, if not all conditions are met. This time it checks if there are enough image points (nimages > 0 must be true).
Recheck the value of nx and ny (pattern_size) for the function "cv2.findChessboardCorners(image, (nx, ny))/ cv2.drawChessboardCorners(image, pattern_size)", this should be number of inner corners row and column in the chessboard. It worked for me.
I know I am too late, but hope it helps other people. Replace the following line:
except: img_mask = '../cpp/img*.jpg'
with:
except: img_mask = './cpp/img*.jpg'
In my case I was not using a big enough checkerboard in the sample pictures. Printing a 7x10 checkerboard instead than 6x7 solved it.
import numpy as np
import cv2
cap = cv2.VideoCapture('output.avi')
i=1
while(cap.isOpened()):
ret, img = cap.read()
print img
if img==None:#sale el error por que ya termino los frames del video
break #si no hay frames, terminarel programa
cv2.imshow('img2',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
elif cv2.waitKey(1) & 0xFF==ord('p'):
cv2.imwrite('image'+str(i)+'.jpg',img)
i=i+1
cap.release()
cv2.destroyAllWindows()

Categories