Python OpenCV Error - python

Simple Code used to identify playing cards through a cam but I have Been trying to run this code but I keep getting this error when attempting to use any of the methods
TypeError: src is not a numpy array, neither a scalar
import sys
import numpy as np
sys.path.insert(0, "/usr/local/lib/python2.7/site-packages/")
import cv2
###############################################################################
# Utility code
###############################################################################
def rectify(h):
h = h.reshape((4,2))
hnew = np.zeros((4,2),dtype = np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis = 1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
###############################################################################
# Image Matching
###############################################################################
def preprocess(img):
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),2 )
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,1)
return thresh
def imgdiff(img1,img2):
img1 = cv2.GaussianBlur(img1,(5,5),5)
img2 = cv2.GaussianBlur(img2,(5,5),5)
diff = cv2.absdiff(img1,img2)
diff = cv2.GaussianBlur(diff,(5,5),5)
flag, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)
return np.sum(diff)
def find_closest_card(training,img):
features = preprocess(img)
return sorted(training.values(), key=lambda x:imgdiff(x[1],features))[0][0]
###############################################################################
# Card Extraction
###############################################################################
def getCards(im, numcards=4):
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(1,1),1000)
flag, thresh = cv2.threshold(blur, 120, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea,reverse=True)[:numcards]
for card in contours:
peri = cv2.arcLength(card,True)
approx = rectify(cv2.approxPolyDP(card,0.02*peri,True))
# box = np.int0(approx)
# cv2.drawContours(im,[box],0,(255,255,0),6)
# imx = cv2.resize(im,(1000,600))
# cv2.imshow('a',imx)
h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)
transform = cv2.getPerspectiveTransform(approx,h)
warp = cv2.warpPerspective(im,transform,(450,450))
yield warp
def get_training(training_labels_filename,training_image_filename,num_training_cards,avoid_cards=None):
training = {}
labels = {}
for line in file(training_labels_filename):
key, num, suit = line.strip().split()
labels[int(key)] = (num,suit)
print "Training"
im = cv2.imread(training_image_filename)
for i,c in enumerate(getCards(im,num_training_cards)):
if avoid_cards is None or (labels[i][0] not in avoid_cards[0] and labels[i][1] not in avoid_cards[1]):
training[i] = (labels[i], preprocess(c))
print "Done training"
return training
if __name__ == '__main__':
if len(sys.argv) == 6:
filename = sys.argv[1]
num_cards = int(sys.argv[2])
training_image_filename = sys.argv[3]
training_labels_filename = sys.argv[4]
num_training_cards = int(sys.argv[5])
training = get_training(training_labels_filename,training_image_filename,num_training_cards)
im = cv2.imread("test")
width = im.shape[0]
height = im.shape[1]
if width < height:
im = cv2.transpose(im)
im = cv2.flip(im,1)
# Debug: uncomment to see registered images
#for i,c in enumerate(getCards(im,num_cards)):
# card = find_closest_card(training,c,)
# cv2.imshow(str(card),c)
# cv2.waitKey(0)
cards = [find_closest_card(training,c) for c in getCards(im,num_cards)]
print cards
else:
print __doc__
here is the entire error message
Traceback (most recent call last):
File "<pyshell#15>", line 1, in <module>
preprocess("test.JPG")
File "C:\Users\Don Ellison\Desktop\Class Programs\CSC 490 Project\Cards\Python\Playing-Card-Recognition-master\card_img.py", line 43, in preprocess
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
TypeError: src is not a numpy array, neither a scalar

you are passing in wrong argument to preprocess function, I guess you are calling it from a python shell.
You are not suppose to pass in a image file name, but rather a numpy array. It seems you called proprocess function and passed in "test.JPG" in your python shell. If you want to test your preprocess function, do
test_img = cv2.imread("test.JPG")
preprocess(test_img)

Related

TypeError: Only Size-1 Arrays Can Be Converted To Python Scalars Error

The error is Only Size-1 Arrays Can Be Converted To Python Scalars Error and i just fixed the problem regarding the sizing. But now I'm running this code with a 320x320 model but I'm still receiving this error does anyone know how to fix it? I've been receiving errors on lines 45, 68, 88 but I'm not sure what to change in them?
import cv2
from tflite_runtime.interpreter import Interpreter
import numpy as np
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
def load_labels(path='labels.txt'):
"""Loads the labels file. Supports files with or without index numbers."""
with open(path, 'r', encoding='utf-8') as f:
lines = f.readlines()
labels = {}
for row_number, content in enumerate(lines):
pair = re.split(r'[:\s]+', content.strip(), maxsplit=1)
if len(pair) == 2 and pair[0].strip().isdigit():
labels[int(pair[0])] = pair[1].strip()
else:
labels[row_number] = pair[0].strip()
return labels
def set_input_tensor(interpreter, image):
"""Sets the input tensor."""
tensor_index = interpreter.get_input_details()[0]['index']
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = np.expand_dims((image-255)/255, axis=0)
def get_output_tensor(interpreter, index):
"""Returns the output tensor at the given index."""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details['index']))
return tensor
def detect_objects(interpreter, image, threshold):
"""Returns a list of detection results, each a dictionary of object info."""
set_input_tensor(interpreter, image)
interpreter.invoke()
# Get all output details
boxes = get_output_tensor(interpreter, 0)
classes = get_output_tensor(interpreter, 1)
scores = get_output_tensor(interpreter, 2)
count = int(get_output_tensor(interpreter, 3))
results = []
for i in range(count):
if scores[i] >= threshold:
result = {
'bounding_box': boxes[i],
'class_id': classes[i],
'score': scores[i]
}
results.append(result)
return results
def main():
labels = load_labels()
interpreter = Interpreter('detect.tflite')
interpreter.allocate_tensors()
_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
img = cv2.resize(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB), (320,320))
res = detect_objects(interpreter, img, 0.8)
print(res)
for result in res:
ymin, xmin, ymax, xmax = result['bounding_box']
xmin = int(max(1,xmin * CAMERA_WIDTH))
xmax = int(min(CAMERA_WIDTH, xmax * CAMERA_WIDTH))
ymin = int(max(1, ymin * CAMERA_HEIGHT))
ymax = int(min(CAMERA_HEIGHT, ymax * CAMERA_HEIGHT))
cv2.rectangle(frame,(xmin, ymin),(xmax, ymax),(0,255,0),3)
cv2.putText(frame,labels[int(result['class_id'])],(xmin, min(ymax, CAMERA_HEIGHT-20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5,(255,255,255),2,cv2.LINE_AA)
cv2.imshow('Pi Feed', frame)
if cv2.waitKey(10) & 0xFF ==ord('q'):
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()```
I've tried changing some sections in line 45 and 88 to it gives me the same error.

Image Classifire feature detector

I have a code that I learn from this link https://www.youtube.com/watch?v=nnH55-zD38I&t=1047s
but I got an error that said:
line 62, in <module>
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
IndexError: list index out of range
[ WARN:0] global C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-m8us58q4\opencv\modules\videoio\src\cap_msmf.cpp
(438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
Here is the code:
import cv2
import numpy as np
import os
path = '#2 Image descriptor\Assets\Query'
orb = cv2.ORB_create(nfeatures=1000)
#IMPORT IMAGES
images = []
classNames = []
myList = os.listdir(path)
print('Jumlah Classes = ', len(myList))
#print(myList)
for cl in myList:
imgCur = cv2.imread(f'{path}/{cl}', 0)
images.append(imgCur)
classNames.append(os.path.splitext(cl)[0])
print (classNames)
def findDes(images) :
desList = []
for img in images :
kp, des = orb.detectAndCompute(img, None)
desList.append(des)
return desList
def findID(img, desList, thres=15):
kp2, des2 = orb.detectAndCompute(img, None)
bf = cv2.BFMatcher()
matchList = []
finalVal = -1
try :
for des in desList:
matches = bf.knnMatch(des, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append([m])
matchList.append(len(good))
except :
pass
#print(matchList)
if len(matchList) != 0:
if max(matchList) > thres:
finalVal = matchList.index(max(matchList))
return finalVal
desList = findDes(images)
print(len(desList))
cap = cv2.VideoCapture(0)
#cap = cv2.imread('#2 Image descriptor\Assets\Train\RE8.jpg')
while True:
success, img2 = cap.read()
imgOriginal = img2.copy()
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
id = findID(img2, desList)
if id != -1:
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
cv2.imshow('img2', imgOriginal)
if cv2.waitKey(1) == ord('q'):
break
cap.release() #to make sure disable the camera from the code
cv2.destroyAllWindows
I think the error at:
imgCur = cv2.imread(f'{path}/{cl}', 0)
The problem is at this part of your code:
if id != -1:
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
The error is basically telling you that you are trying to index id from the classNames list at an index that doesn't exist in the list. For example, if the classNames list is empty, doing classNames[0] will return the error. You can try debugging like so:
if id != -1:
if id < len(classNames):
cv2.putText(imgOriginal,classNames[id], (50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),1)
else:
print(classNames)
print(f"Error: Attempted to index {id} from list of length {len(classNames)}.")

Multiprocessing a loop inside another loop

This code processes multiple PDFs one at a time. Within each PDF, it is looping through each page using opencv to detect if each page is a 4up page vs 1up page, and if it is a 4up page, the loop appends the index of that page to a list named "ind". Since the order of the pages being processed doesn't matter, I would like to multiprocess the opencv detection. But I'm a complete newb in Python, so can't seem to get it working using Pool function.
This is the original code (single-threaded)
from pdf2image import convert_from_path
import cv2
import numpy as np
pdffiles = 'sample.pdf'
for p in pdffiles:
pages=convert_from_path(pdffiles)
ind = []
for i in range(len(pages)):
page = pages[i]
gray = np.array(page)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
(thresh, bw) = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
edges = cv2.Canny(bw,0,255)
minLineLength=850
lines = cv2.HoughLinesP(image=edges,rho=5,theta=np.pi/90, threshold=1000,lines=np.array([]), minLineLength=minLineLength,maxLineGap=3)
linessub = lines[((lines[:,0,0]>750) & (lines[:,0,0]<950))|((lines[:,0,1]>1000) & (lines[:,0,1]<1200))]
if len(linessub) > 1: ind.append(i)
print(ind)
edit: I've simplified what I posted to just include the section I'm trying to multiprocess. You should be able to run the code on the sample.pdf that I've uploaded here: on this file, the ind that prints should be [1,2]
https://www.dropbox.com/s/g2dvpex7njvon6r/sample.pdf?dl=0
There are 2 ways you can process things simultaneously. Multiprocessing and Threading. You'd have to try which works better for you.
Threading
This is a basis example and to help you on the way.
import threading
results = []
threads = []
def task(arg):
results.append(arg)
for i in range(10):
t = threading.Thread(target=task)
threads.append(t)
t.start()
print results
Below would be an example applied on your code. I couldn't run your example so this isn't tested.
import threading
import timeit
curdir = os.path.dirname(os.path.realpath(__file__))
os.chdir(curdir)
files = os.listdir(curdir)
if os.environ.get('OS','') == 'Windows_NT':
dstdir = os.path.join(curdir, '1up\\')
else:
dstdir = os.path.join(curdir, '1up/')
if not os.path.exists(dstdir):
os.makedirs(dstdir)
pdffiles = [f for f in files if f.endswith('.pdf')]
ind = []
def 4up_detect(pages):
gray = np.array(pages)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
(thresh, bw) = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
edges = cv2.Canny(bw,0,255)
minLineLength=1100
lines = cv2.HoughLinesP(image=edges,rho=5,theta=np.pi/90, threshold=1000,lines=np.array([]), minLineLength=minLineLength,maxLineGap=3)
linessub = lines[((lines[:,0,0]>750) & (lines[:,0,0]<950))|((lines[:,0,1]>1000) & (lines[:,0,1]<1200))]
if len(linessub) > 1:
ind.append(pages)
threads = []
for p in pdffiles:
pages=convert_from_path(p)
t = threading.Thread(target=4up_detect, args=[pages])
threads.append(t)
t.start()
startpg = min(ind)
endpg = max(ind)
page = pages[startpg]
image = np.array(page)
height = int(math.floor(image.shape[0])/2)
width = int(math.floor(image.shape[1])/2)
Multiprocessing
With processing you want to add a shared variable to pass on the results.
A minimal working example can look like this:
import multiprocessing
def task(arg, results):
'''worker function'''
results.append(arg)
manager = multiprocessing.Manager()
results = manager.list()
procs = []
for i in range(10):
p = multiprocessing.Process(target=task, args=(i,results))
procs.append(p)
p.start()
for i in procs:
i.join()
print(results)
This is roughly how that could look in your code. However again, I couldn't run it:
import multiprocessing
import timeit
def 4up_detect(pages, results):
gray = np.array(pages)
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
(thresh, bw) = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
edges = cv2.Canny(bw,0,255)
minLineLength=1100
lines = cv2.HoughLinesP(image=edges,rho=5,theta=np.pi/90, threshold=1000,lines=np.array([]), minLineLength=minLineLength,maxLineGap=3)
linessub = lines[((lines[:,0,0]>750) & (lines[:,0,0]<950))|((lines[:,0,1]>1000) & (lines[:,0,1]<1200))]
if len(linessub) > 1:
results.append(pages)
curdir = os.path.dirname(os.path.realpath(__file__))
os.chdir(curdir)
files = os.listdir(curdir)
if os.environ.get('OS','') == 'Windows_NT':
dstdir = os.path.join(curdir, '1up\\')
else:
dstdir = os.path.join(curdir, '1up/')
if not os.path.exists(dstdir):
os.makedirs(dstdir)
pdffiles = [f for f in files if f.endswith('.pdf')]
manager = multiprocessing.Manager()
ind = manager.list()
procs = []
for p in pdffiles:
pages=convert_from_path(p)
p = multiprocessing.Process(target=task, args=(pages, ind))
procs.append(p)
p.start()
for p in procs:
p.join()
startpg = min(ind)
endpg = max(ind)
page = pages[startpg]
image = np.array(page)
height = int(math.floor(image.shape[0])/2)
width = int(math.floor(image.shape[1])/2)

How can I convert from JSON to png in a dataset labeled with labelbox?

I have a JSON file with the next structure:
json
{'featureId': 'ckek0ugf2061y0ybwgunbdrt5',
'schemaId': 'ckek0jkvp081j0yaec2ap9a3w',
'title': 'Tree',
'value': 'tree',
'color': '#FFFF00',
'instanceURI': 'https://api.labelbox.com/masks/feature/ckek0ugf2061y0ybwgunbdrt5?token=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...'}
InstanceURI is one tree that I segmented from the original image used Labelbox (https://labelbox.com/). I am using PSPNet-cityscapes. That model requires a mask to the validation stage in png format. Some images have several trees (several instances URIs).
How can I convert this JSON element in a png image?
Not the fastest and most beautiful script - but it works for me...
from PIL import Image, ImageColor, ImageDraw
from PIL import UnidentifiedImageError
import requests
import json
import argparse
import pathlib
import os.path
def manual_classes():
"""
Change your preferenced color-coding below.
If you want to use manual coloring, you also need to change the Label-Classes (Title)
"""
manual_dict = {
'Tree': 255,
'Flower': 85,
}
return manual_dict
def open_img(url):
try:
return Image.open(requests.get(url, stream=True).raw)
except UnidentifiedImageError:
return None
def open_json(path):
with open(path) as file:
return json.load(file)
def color_extractor(data, color_coding):
"""takes the given dictionary part and extracts all needed information. returns also colors for 3 different types"""
if color_coding == 'auto':
color = ImageColor.getcolor(data['color'], 'RGBA')
elif color_coding == 'manual':
color = (manual_classes()[data['title']],manual_classes()[data['title']],manual_classes()[data['title']],255)
elif color_coding == 'binar':
color = (255,255,255,255)
else:
print('no valid color-code detected - continue with binarized Labels.')
color = (255,255,255,255)
return color
def img_color(img, color):
"""change color of label accordingly"""
if color == (255,255,255,255):
return img
img = img.convert('RGBA')
width, height = img.size
for x in range(width):
for y in range(height):
if img.getpixel((x,y)) == (255,255,255,255):
img.putpixel((x,y), color)
return img
def img_draw_polygon(size, polygon, color):
"""draw polygons on image"""
img = Image.new('RGBA', size, (0,0,0,0))
img = img.convert('RGBA')
draw = ImageDraw.Draw(img)
# read points
points = []
for i in range(len(polygon)):
points.append((int(polygon[i]['x']),int(polygon[i]['y'])))
draw.polygon(points, fill = (color))
return img
def progressBar(current, total, barLength = 20):
percent = float(current) * 100 / total
arrow = '-' * int(percent/100 * barLength - 1) + '>'
spaces = ' ' * (barLength - len(arrow))
print('Progress: [%s%s] %d %%' % (arrow, spaces, percent), end='\r')
def main(input_dir, output_dir, color_type='auto'):
if os.path.exists(input_dir) and os.path.exists(output_dir) and color_type in ['auto', 'manual', 'binar']:
input_path = pathlib.Path(input_dir)
label_paths_sorted = sorted(list(input_path.glob("*.json")))
for image_path in label_paths_sorted:
print('converting: {}'.format(os.path.basename(image_path)))
# open json file
data = open_json(image_path)
# create image list for Labels
img_list = []
# read original image
original_img = open_img(data[0]['Labeled Data'])
try:
width, height = original_img.size
except Exception:
print('Original image data not callable. Please provide image width and height.')
for i in range(len(data[0]['Label']['objects'])):
# read path and open image
img = open_img(data[0]['Label']['objects'][i]['instanceURI'])
# if path is not readable try to read polygon-data-points
if not img is None:
img = img_color(img, color_extractor(data[0]['Label']['objects'][i], color_type))
img_list.append(img)
else:
try:
# img = img_draw_polygon(img, data[0]['Label']['objects'][i]['polygon'], data[0]['Label']['objects'][i]['title'])
img = img_draw_polygon((width,height), data[0]['Label']['objects'][i]['polygon'], color_extractor(data[0]['Label']['objects'][i], color_type))
img_list.append(img)
except Exception:
print('Note: There are no available polygon-data-points & web-data-information for Label #{}.'.format(i))
# print current progress status
progressBar(i, len(data[0]['Label']['objects']))
img = img_list[0]
for i in range(1, len(img_list)):
img.paste(img_list[i], (0,0), mask= img_list[i])
img.save(output_dir + os.path.basename(image_path).replace('.json', '.png'))
else:
print('One of your given inputs is incorrect - please try again.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="convert annotations from labelbox2png")
parser.add_argument("--input", help="input-directory")
parser.add_argument("--output", help="output-directory")
parser.add_argument("--color", help="binar, auto or manual")
args = parser.parse_args()
main(args.input, args.output, args.color)
To run it - just save this python-script and execute it in your command:
C:\Users>python script.py --input input_directory/ --output output_directory --color auto
With the input color you can modify the color-coding of your Labels. auto takes the colors from the JSON, manual you have to modify and binar white-labels everything.

This is my code for recognition of faces. I am getting the following error. i am using dlib shape predictor

align = openface.AlignDlib(dlibFacePredictor) subject=input("Enter Subject:") a=['BI','SIC','PGIS','SQA','ITSM'] path = 'C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/images/' if subject in a: print("Success") wbook = load_workbook(filename = "C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Attendance/"+subject+".xlsx") sheet = wbook.get_sheet_by_name('TYBSCIT'+subject) else: print("Invalid")
def getDateColumns(): for i in range(1, len(sheet.rows[0]) + 1): cols = get_column_letter(i) if sheet.cell('%s%s'% (col,'1')).value == currentDate: return cols def getProfileId(Ids): connect = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/sqlite3/Studentdb.db") cmd = "SELECT * FROM Students WHERE ID=" + str(Ids) cursor = connect.execute(cmd) profile = None for row in cursor: profile = row connect.close() return profile
attend = [0 for i in range(60)] rec = cv2.face.LBPHFaceRecognizer_create() # Local Binary Patterns Histograms rec.read('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/Training/trainingData.yml') # loading the trained data picNumber = 2 image= cv2.imread('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/Implementation/PYTHON FILES/images/'+subject+currentDate+'.jpg') font = cv2.FONT_HERSHEY_SIMPLEX # the font of text on face recognition gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale dets = detector(image, 1) totalConf = 0.0 faceRec = 0 for i, d in enumerate(dets): image2 = image[d.top():d.bottom(), d.left():d.right()] rgbImg = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB) bb = align.getLargestFaceBoundingBox(rgbImg) alignedFace = align.align(96, rgbImg, bb=None, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE) alignedFace= cv2.cvtColor(alignedFace, cv2.COLOR_BGR2GRAY) # conveting the camera input into GrayScale Ids, conf = rec.predict(alignFace) # Comparing from the trained data
Traceback (most recent call last): File "C:\Users\ACER\Desktop\PROJECT ALL RESOURCE\Implementation\PYTHON FILES\facerecognition.py", line 60, in <module> Ids, conf = rec.predict(alignFace) # Comparing from the trained data NameError: name 'alignFace' is not defined
As the TraceBack states, alignFace is not defined. You have made a typo of your previously-defined alignedFace - use this instead.

Categories