UnboundLocalError: local variable 'x1' referenced before assignment - python

import numpy as np
from PIL import ImageGrab
import cv2
import time
import pyautogui
import matplotlib.pyplot as plt
def make_coords(img,line_param):
slope,intercept=line_param
y1 = img.shape[0]
y2 = int((y1*(3/5)))
x1 = int((y1-intercept)/slope)
x2 = int((y2-intercept)/slope)
try:
return np.array((x1,y1,x2,y2)) #HERE IS WHERE THE PROBLEM HAPPENS
except UnboundLocalError:
pass
def avg_slope(img,lines):
left_fit =[]
right_fit=[]
if lines is not None:
for line in lines:
x1,y1,x2,y2=line.reshape(4)
parameters = np.polyfit((x1,x2),(y1,y2),1)
try:
slope = parameters[0]
except TypeError:
slope = 0
try:
intercept = parameters[1]
except TypeError:
intercept = 0
if slope <0:
left_fit.append((slope,intercept))
else:
right_fit.append((slope,intercept))
if left_fit:
left_fit_avg=np.average(left_fit,axis=0)
left_line=make_coords(img,left_fit_avg)
if right_fit:
right_fit_avg=np.average(right_fit,axis=0)
right_line=make_coords(img,right_fit_avg)
return np.array((x1,y1,x2,y2))
def draw_lines(img, lines):
try:
for line in lines:
if line is not None:
coords = line[0]
cv2.line(img, (coords[0],coords[1]), (coords[2],coords[3]), [255,0,0], 3)
except:
pass
def roi(img):
vertices = np.array([[10,500],[10,300], [300,200], [500,200], [800,300], [800,500]], np.int32)
mask = np.zeros_like(img)
cv2.fillPoly(mask, [vertices], 255)
masked = cv2.bitwise_and(img, mask)
return masked
def process_img(image):
original_image = image
# convert to gray
processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edge detection
processed_img = cv2.GaussianBlur(processed_img,(5,5),0) #new
processed_img = cv2.Canny(processed_img, threshold1 = 50, threshold2=150) #new
# processed_img = cv2.Canny(processed_img, threshold1 = 200, threshold2=300)
lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, np.array([]), minLineLength=15,maxLineGap=5)
avg_lines = avg_slope(processed_img,lines)
draw_lines(process_img,avg_lines)
processed_img = roi(processed_img)
return processed_img
def main():
last_time = time.time()
while True:
screen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
if screen is not None:
new_screen = process_img(screen)
print('Frame took {} seconds'.format(time.time()-last_time))
cv2.imshow('window', new_screen)
else:
pass
last_time = time.time()
# plt.imshow(new_screen)
#cv2.imshow('window',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
# cv2.waitKey(0)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
THE TERMINAL SHOWS:
avg_lines = avg_slope(processed_img,lines)
Frame took 0.12310576438903809 seconds
Traceback (most recent call last):
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 107, in <module>
main()
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 91, in main 91,
in main
new_screen = process_img(screen) 78, in process_img
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 78, in process_img 50, in avg_slope
avg_lines = avg_slope(processed_img,lines)
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 50, in avg_slope
return np.array((x1,y1,x2,y2))
UnboundLocalError: local variable 'x1' referenced before assignment
... even though I'm doing ...
try:
return np.array((x1,y1,x2,y2))
except UnboundLocalError:
pass

Your Error is actually not occuring where you say it is. By looking at the Traceback you can see that the error is occuring in the function avg_slope.
It might be because you use return np.array((x1,y1,x2,y2)) while in that function you have only declared these values inside an if statement. If the if block would be skipped (when lines is None) then x1, x2, y1and y2 haven't been declared in the function. In other words: it could be that these never exist inside the function, so you can't return something depending on them. The interpreter prevents you from doing this.
You can learn a lot by just reading the error message carefully. Local variable referenced before assignment is in a nutshell what I explained above.

Your problem is here:
def avg_slope(img,lines):
left_fit =[]
right_fit=[]
if lines is not None:
for line in lines:
x1,y1,x2,y2=line.reshape(4)
If lines is "falsey" (empty or None), you never assign to x1.

Related

'tuple' object is not callable

I am trying to convert any .png images with a transparent background to a white background.
however I am getting an error that says tuple object is not callable.
I have tried this:
def transparent_to_white(img):
color = (255, 255, 255)
for x in range(img.size()):
for y in range(img.size()):
r, g, b, a = img.getpixel((x, y))
if a == 0:
img.putpixel((x, y), color)
return img
but I get this error:
Original Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/worker.py", line 302, in _worker_loop
data = fetcher.fetch(index)
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/fetch.py", line 58, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/fetch.py", line 58, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/content/gdrive/My Drive/All_Deep_Learning/PythonCustomLibraries/pix2pixdatasetlib.py", line 49, in __getitem__
y_label = self.resize(transparent_to_white(y_label))
File "/content/gdrive/My Drive/All_Deep_Learning/PythonCustomLibraries/pix2pixdatasetlib.py", line 33, in transparent_to_white
for x in range(img.size()):
TypeError: 'tuple' object is not callable
I am called it in my dataset class :
class Pix2PixDataset(Dataset):
def __init__(self, data_points, transforms = None):
self.data_points = data_points
self.transforms = transforms
self.resize = T.Resize((512,512))
def __getitem__(self, index) :
image, y_label = process_images(self.data_points[index].reference_image, self.data_points[index].drawing )
image = self.resize(image)
y_label = self.resize(transparent_to_white(y_label))
if self.transforms:
image = self.transforms(image)
y_label = self.transforms(y_label)
return(image, y_label)
def __len__(self):
return len(self.data_points)
I tried removing the open and close parenthesis but that did not help, I still get the same error
TypeError: Caught TypeError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/worker.py", line 302, in _worker_loop
data = fetcher.fetch(index)
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/fetch.py", line 58, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/usr/local/lib/python3.8/dist-packages/torch/utils/data/_utils/fetch.py", line 58, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/content/gdrive/My Drive/All_Deep_Learning/PythonCustomLibraries/pix2pixdatasetlib.py", line 49, in __getitem__
y_label = self.resize(transparent_to_white(y_label))
File "/content/gdrive/My Drive/All_Deep_Learning/PythonCustomLibraries/pix2pixdatasetlib.py", line 33, in transparent_to_white
for x in range(img.size()):
TypeError: 'tuple' object is not callable
Disclaimer: I'm assuming img is an instance of Image class, from module PIL or it's fork Pillow
img.size is a tuple. For example, if you do:
print(img.size)
It prints a tuple with (width, height).
So, your code could be
def transparent_to_white(img):
color = (255, 255, 255)
width, height = img.size # unpacking width/height beforehand
for x in range(width): # using unpacked values in range
for y in range(height)): # same as above
r, g, b, a = img.getpixel((x, y))
if a == 0:
img.putpixel((x, y), color)
return img
Or, alternatively, you could store x and y into a tuple of coordinates, to simplify passing it around:
def transparent_to_white(img):
color = (255, 255, 255)
width, height = img.size # unpacking width/height beforehand
for x in range(width): # using unpacked values in range
for y in range(height)): # same as above
coords = (x, y) # tuple of coordinates
r, g, b, a = img.getpixel(coords) # used here
if a == 0:
img.putpixel(coords, color) # and here
return img

TypeError: create_bool(): incompatible function arguments - mediapipe, cv2

I made a python program to detect faces with mediapipe and OpenCV (following this tutorial: https://www.youtube.com/watch?v=01sAkU_NvOY&t=7775s). When I run it returns with errors. I have tried different fixes, but they all seem not to work. Thanks in advance.
This is my code:
import time
import cv2
import mediapipe as mp
class FaceMeshDetector():
def __init__(self, staticMode = False, maxFaces = 2, minDetectionCon = 0.5, minTrackCon = 0.5):
self.staticMode = staticMode
self.maxFaces = maxFaces
self.minDetectionCon = minDetectionCon
self.minTrackCon = minTrackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpFaceMesh = mp.solutions.face_mesh
self.faceMesh = self.mpFaceMesh.FaceMesh(self.staticMode, self.maxFaces, self.minDetectionCon, self.minTrackCon)
self.drawSpec = self.mpDraw.DrawingSpec(thickness = 1, circle_radius =1)
def findFaceMesh(self, img, draw=True):
self.imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.faceMesh.process(self.imgRGB)
faces = []
if self.results.multi_face_landmarks:
for faceLms in self.results.multi_face_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.faceLms, self.mpFaceMesh.FACE_CONNECTIONS,
self.drawSpec, self.drawSpec)
face = []
for id,lm in enumerate(faceLms.landmark):
# print(lm)
ih, iw, ic = img.shape
x, y = int(lm.x*iw), int(lm.y*ih)
# print(id, x,y)
face.append([x,y])
faces.append(face)
return img, faces
def main():
cap = cv2.VideoCapture(0)
pTime = 0
detector = FaceMeshDetector()
while True:
success, img = cap.read()
img, faces = detector.findFaceMesh(img)
if len(faces)!= 0:
print(faces)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,f'FPS: {int(fps)}', (20,70), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,255), 3 )
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
This is the full error message:
Traceback (most recent call last):
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 58, in <module>
main()
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 44, in main
detector = FaceMeshDetector()
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 18, in __init__
self.faceMesh = self.mpFaceMesh.FaceMesh(self.staticMode, self.maxFaces, self.minDetectionCon, self.minTrackCon)
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solutions\face_mesh.py", line 94, in __init__
super().__init__(
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 258, in __init__
self._input_side_packets = {
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 259, in <dictcomp>
name: self._make_packet(self._side_input_type_info[name], data)
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_bool(): incompatible function arguments. The following argument types are supported:
1. (arg0: bool) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5

Through this piece of code i want to collect sample of face but it is giving error

import cv2
import numpy
face_classifier = cv2.CascadeClassifier('C:/Users/my pc/AppData/Local/Programs/Python/Python38/Lib/site-packages/cv2/data/haarcascade_frontalface_default.xml')
def face_extractor(img): # to extract face feature
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces= face_classifier.detectMultiScale(gray,1,3,5)
if faces is():
#if there is no face on screen
return None
for(x,y,w,h) in faces: # if face present, crop face,return it
cropped_face = img[y:y+h,x:x+w]
return cropped_face
cap= cv2.VideoCapture(0,cv2.CAP_DSHOW) # to open camera
count = 0
while True:
ret ,frame = cap.read()
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame),(200,200))
face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
file_name_path = 'C:/Users/my pc/faces/user' + str(count)+'.jpg'
cv2.imwrite(file_name_path,face)
cv2.putText(face,str(count),(50,50),cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2) #count how mnay pics clicked
cv2.imshow('face cropper',face)
else:
print('face not found')
pass
if cv2.waitKey1 == 13 or count == 100:
break
cap.release()
cv2.destroyAllWindows()
print('collecting samples complete')
below is the error
ace.py:9: SyntaxWarning: "is" with a literal. Did you mean "=="?
if faces is():
Traceback (most recent call last):
File "face.py", line 24, in <module>
if face_extractor(frame) is not None:
File "face.py", line 7, in face_extractor
faces= face_classifier.detectMultiScale(gray,1,3,5)
cv2.error: OpenCV(4.4.0) C:\Users\appveyor\AppData\Local\Temp\1\pip-req-build-j8nxabm_\opencv\modules\objdetect\src\cascadedetect.cpp:1689: error: (-215:Assertion failed) !empty() in function 'cv::CascadeClassifier::detectMultiScale'
Error message shows problem with if faces is(): because there is no is() in Python.
detectMultiScale() gives list with objects and you can check if this list is empty:
if len(faces) == 0:
return None
or more readable
if not faces:
return None

opencv TypeError: 'interpolation' is an invalid keyword argument for this function

An error occurred when I try to resize an image with "interpolation=cv2.INTER_CUBIC". I don't know what happend. I just following the guide at https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.html?highlight=resize
# from PIL import Image
import cv2
def read_img(frompath):
# return Image.open(frompath)
return cv2.imread(frompath)
def resize_one(img, size, outpath):
# out = img.resize(size)
# out.save(outpath)
out = cv2.resize(img, size)
cv2.imwrite(outpath, out, interpolation=cv2.INTER_CUBIC)
def resize_all(img, tasks):
for (size, outpath) in tasks:
resize_one(img, size, outpath)
def build_tasks(prefix, sizes):
t = []
for (x, y) in sizes:
t.append(((x, y), prefix + '_' + str(x) + '_' + str(y) + '.png'))
return t
def square_tasks(widths):
t = []
for w in widths:
t.append((w, w))
return t
def main():
s = [72, 48, 96, 144, 192]
p = 'logo'
i = './logo_1280.png'
t = build_tasks(p, square_tasks(s))
img = read_img(i)
resize_all(img, t)
if __name__ == '__main__':
main()
libpng warning: iCCP: known incorrect sRGB profile
Traceback (most recent call last):
File ".\main.py", line 39, in <module>
main()
File ".\main.py", line 36, in main
resize_all(img, t)
File ".\main.py", line 16, in resize_all
resize_one(img, size, outpath)
File ".\main.py", line 12, in resize_one
cv2.imwrite(outpath, out, interpolation=cv2.INTER_CUBIC)
TypeError: 'interpolation' is an invalid keyword argument for this function
I'm using opencv-contrib-python 4.2.0.34
You have to do interpolation in resize instead of your imwrite.
def resize_one(img, size, outpath):
# out = img.resize(size)
# out.save(outpath)
out = cv2.resize(img, size, interpolation=cv2.INTER_CUBIC )
cv2.imwrite(outpath, out)

Error when flipping an image horizontally in Python

I need to flip a picture horizontally, without using the reverse function, I thought I had it right but the error I get is
Traceback (most recent call last):
File "<pyshell#9>", line 1, in <module>
Flip("bm.gif","bm.ppm")
File "C:\Users\....ImageProcessingSKLT.py", line 133, in Flip
pic1 = graphics.getPixel(x,y)
AttributeError: 'module' object has no attribute 'getPixel'
The code I have is
def Flip(image1, image2):
img = graphics.Image(graphics.Point(0, 0), image1)
X = img.getWidth()
Y = img.getHeight()
for y in range(Y//2):
for x in range(X):
pic1 = graphics.getPixel(x,y)
pic2 = graphics.setPixel(X-x,y)
temp = graphics.getColor(pic1)
graphics.setColor(pic1,getColor(pic2))
graphics.setColor(pic2,temp)
image2 = pic2
return image2
What does the error mean? and how do I fix it?
pic1 = graphics.getPixel(x,y)
pic2 = graphics.setPixel(X-x,y)
Probably should be:
pic1 = img.getPixel(x,y)
pic2 = img.setPixel(X-x,y)
The interpreter is complaining that it can't find the getPixel function inside the module graphics; it's img.getPixel, not graphics.getPixel.

Categories