cannot get pixel RGB values from window in background using win32 - python

With using below code i get correct outputs:
import time
import win32ui
import win32gui
pos=[253, 565]
pos2=[357, 425]
def GetPixelRGBColor(nazwa ,pos):
okno = win32gui.FindWindow(None, nazwa)
print(okno)
rect = win32gui.GetWindowRect(okno)
w = abs(rect[2] - rect[0])
h = abs(rect[3] - rect[1])
hwndDC = win32gui.GetWindowDC(okno)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
ret=win32gui.GetPixel(hwndDC,pos[0],pos[1])
r, g, b = ret & 0xff, (ret >> 8) & 0xff, (ret >> 16) & 0xff
mfcDC.DeleteDC()
saveDC.DeleteDC()
win32gui.ReleaseDC(okno,hwndDC)
win32gui.DeleteObject(saveBitMap.GetHandle())
return [r, g, b]
while True:
print(GetPixelRGBColor('Wyświetlanie na pełnym ekranie (podgląd)', pos2))
time.sleep(1)
print(GetPixelRGBColor('Wyświetlanie na pełnym ekranie (podgląd)', pos))
Output:
1574752
[43, 45, 54]
1574752
[46, 49, 56]
1574752
[43, 45, 54]
But with below code:
def healer(self):
global HPButtonStan
HP = config['USER']['hpcoord'].partition(', ')
coordy = [int(HP[0]), int(HP[2])]
print(coordy)
print(type(coordy))
print(type(coordy[0]))
print(type(coordy[1]))
obsfull = config['USER']['obsscreen']
print(obsfull)
while HPButtonStan == True:
time.sleep(0.95) # 255, 122, 122
# -------------------------------------------
okno = win32gui.FindWindow(None, 'Wyświetlanie na pełnym ekranie (podgląd)')
print(okno)
rect = win32gui.GetWindowRect(okno)
w = abs(rect[2] - rect[0])
h = abs(rect[3] - rect[1])
hwndDC = win32gui.GetWindowDC(okno)
mfcDC = win32ui.CreateDCFromHandle(hwndDC)
saveDC = mfcDC.CreateCompatibleDC()
saveBitMap = win32ui.CreateBitmap()
saveBitMap.CreateCompatibleBitmap(mfcDC, w, h)
saveDC.SelectObject(saveBitMap)
ret=win32gui.GetPixel(hwndDC, coordy[0], coordy[1])
r, g, b = ret & 0xff, (ret >> 8) & 0xff, (ret >> 16) & 0xff
mfcDC.DeleteDC()
saveDC.DeleteDC()
win32gui.ReleaseDC(okno,hwndDC)
win32gui.DeleteObject(saveBitMap.GetHandle())
kolory = [r, g, b]
#--------------------------------------------
#kolory = self.GetPixelRGBColor(obsfull, coordy)
if kolory == [100, 146, 4]:
pyautogui.press(config['USER']['lowhpkey'])
elif kolory == [184, 140, 8]:
pyautogui.press(config['USER']['midhpkey'])
elif kolory == [175, 44, 44]:
pyautogui.press(config['USER']['highhpkey'])
else:
print(kolory)
def hp_clicked(self):
global HPButtonStan
thread = threading.Thread(target=self.healer)
thread.daemon = True
if HPButtonStan:
self.HPButton.config(bg='#990000')
HPButtonStan = False
else:
self.HPButton.config(bg='green')
HPButtonStan = True
thread.start()
i get output like this:
[2627, 271]
<class 'list'>
<class 'int'>
<class 'int'>
'Wyświetlanie na pełnym ekranie (podgląd)'
1574752
Exception in thread Thread-1 (healer):
Traceback (most recent call last):
File "C:\Users\Widget\AppData\Local\Programs\Python\Python311\Lib\threading.py", line 1038, in _bootstrap_inner
self.run()
File "C:\Users\Widget\AppData\Local\Programs\Python\Python311\Lib\threading.py", line 975, in run
self._target(*self._args, **self._kwargs)
File "d:\My scripts\TibiaBot\classgui.py", line 744, in healer
ret=win32gui.GetPixel(hwndDC, coordy[0], coordy[1])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
pywintypes.error: (0, 'GetPixel', 'No error message is available')
What is also strange is the fact that if i change line
okno = win32gui.FindWindow(None, 'Wyświetlanie na pełnym ekranie (podgląd)')
into
okno = win32gui.FindWindow(None, obsfull)
terminal output from "print(okno)" not gonna show "1574752" like before but it bring 0, even when the name of the window u can see, its correct. Of course i dont wanna stable one name of window, i wanna to get it from file cuz user can change it.

Related

TypeError: create_bool(): incompatible function arguments - mediapipe, cv2

I made a python program to detect faces with mediapipe and OpenCV (following this tutorial: https://www.youtube.com/watch?v=01sAkU_NvOY&t=7775s). When I run it returns with errors. I have tried different fixes, but they all seem not to work. Thanks in advance.
This is my code:
import time
import cv2
import mediapipe as mp
class FaceMeshDetector():
def __init__(self, staticMode = False, maxFaces = 2, minDetectionCon = 0.5, minTrackCon = 0.5):
self.staticMode = staticMode
self.maxFaces = maxFaces
self.minDetectionCon = minDetectionCon
self.minTrackCon = minTrackCon
self.mpDraw = mp.solutions.drawing_utils
self.mpFaceMesh = mp.solutions.face_mesh
self.faceMesh = self.mpFaceMesh.FaceMesh(self.staticMode, self.maxFaces, self.minDetectionCon, self.minTrackCon)
self.drawSpec = self.mpDraw.DrawingSpec(thickness = 1, circle_radius =1)
def findFaceMesh(self, img, draw=True):
self.imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.faceMesh.process(self.imgRGB)
faces = []
if self.results.multi_face_landmarks:
for faceLms in self.results.multi_face_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, self.faceLms, self.mpFaceMesh.FACE_CONNECTIONS,
self.drawSpec, self.drawSpec)
face = []
for id,lm in enumerate(faceLms.landmark):
# print(lm)
ih, iw, ic = img.shape
x, y = int(lm.x*iw), int(lm.y*ih)
# print(id, x,y)
face.append([x,y])
faces.append(face)
return img, faces
def main():
cap = cv2.VideoCapture(0)
pTime = 0
detector = FaceMeshDetector()
while True:
success, img = cap.read()
img, faces = detector.findFaceMesh(img)
if len(faces)!= 0:
print(faces)
cTime = time.time()
fps = 1/(cTime-pTime)
pTime = cTime
cv2.putText(img,f'FPS: {int(fps)}', (20,70), cv2.FONT_HERSHEY_PLAIN, 3,(0,0,255), 3 )
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main()
This is the full error message:
Traceback (most recent call last):
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 58, in <module>
main()
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 44, in main
detector = FaceMeshDetector()
File "c:\Users\noahb\OneDrive\Programming\ACVwP\Ch. 4 - Face Mesh\FaceMeshModule.py", line 18, in __init__
self.faceMesh = self.mpFaceMesh.FaceMesh(self.staticMode, self.maxFaces, self.minDetectionCon, self.minTrackCon)
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solutions\face_mesh.py", line 94, in __init__
super().__init__(
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 258, in __init__
self._input_side_packets = {
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 259, in <dictcomp>
name: self._make_packet(self._side_input_type_info[name], data)
File "C:\Users\noahb\AppData\Local\Packages\PythonSoftwareFoundation.Python.3.10_qbz5n2kfra8p0\LocalCache\local-packages\Python310\site-packages\mediapipe\python\solution_base.py", line 513, in _make_packet
return getattr(packet_creator, 'create_' + packet_data_type.value)(data)
TypeError: create_bool(): incompatible function arguments. The following argument types are supported:
1. (arg0: bool) -> mediapipe.python._framework_bindings.packet.Packet
Invoked with: 0.5

I am using LBPH algorithm but I got this error "raise KeyError(key) from err KeyError: 'Id' " for my face recognize and attendance function

Here is the code for my face recognize and attendance:
import datetime
import os
import time
import cv2
import pandas as pd
#-------------------------
def recognize_attendence():
recognizer = cv2.face.LBPHFaceRecognizer_create()
# cv2.createLBPHFaceRecognizer()
recognizer.read("TrainingImageLabel"+os.sep+"Trainner.yml")
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails"+os.sep+"StudentDetails.csv")
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Id', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)
# Initialize and start realtime video capture
cam = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cam.set(3, 640) # set video width
cam.set(4, 480) # set video height
# Define min window size to be recognized as a face
minW = 0.1 * cam.get(3)
minH = 0.1 * cam.get(4)
while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5,minSize = (int(minW), int(minH)),flags = cv2.CASCADE_SCALE_IMAGE)
for(x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (10, 159, 255), 2)
Id, conf = recognizer.predict(gray[y:y+h, x:x+w])
if conf < 100:
aa = df.loc[df['Id'] == Id]['Name'].values
confstr = " {0}%".format(round(100 - conf))
tt = str(Id)+"-"+aa
else:
Id = ' Unknown '
tt = str(Id)
confstr = " {0}%".format(round(100 - conf))
if (100-conf) > 67:
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
aa = str(aa)[2:-2]
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]
tt = str(tt)[2:-2]
if(100-conf) > 67:
tt = tt + " [Pass]"
cv2.putText(im, str(tt), (x+5,y-5), font, 1, (255, 255, 255), 2)
else:
cv2.putText(im, str(tt), (x + 5, y - 5), font, 1, (255, 255, 255), 2)
if (100-conf) > 67:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font,1, (0, 255, 0),1 )
elif (100-conf) > 50:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 255, 255), 1)
else:
cv2.putText(im, str(confstr), (x + 5, y + h - 5), font, 1, (0, 0, 255), 1)
attendance = attendance.drop_duplicates(subset=['Id'], keep='first')
cv2.imshow('Attendance', im)
if (cv2.waitKey(1) == ord('q')):
break
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance"+os.sep+"Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv"
attendance.to_csv(fileName, index=False)
print("Attendance Successful")
cam.release()
cv2.destroyAllWindows()
Whenever I try to compile my code I see the following error:
Traceback (most recent call last):
File "C:\Python\Python39\lib\site-packages\pandas\core\indexes\base.py", line 3080, in get_loc
return self._engine.get_loc(casted_key)
File "pandas\_libs\index.pyx", line 70, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\index.pyx", line 101, in pandas._libs.index.IndexEngine.get_loc
File "pandas\_libs\hashtable_class_helper.pxi", line 4554, in
pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas\_libs\hashtable_class_helper.pxi", line 4562, in
pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'Id'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "I:\image processing\data\fyp\main.py", line 101, in <module>
mainMenu()
File "I:\image processing\data\fyp\main.py", line 47, in mainMenu
RecognizeFaces()
File "I:\image processing\data\fyp\main.py", line 95, in RecognizeFaces
Recognize.recognize_attendence()
File "I:\image processing\data\fyp\Recognize.py", line 38, in recognize_attendence
aa = df.loc[df['Id'] == Id]['Name'].values
File "C:\Python\Python39\lib\site-packages\pandas\core\frame.py", line 3024, in __getitem__
indexer = self.columns.get_loc(key)
File "C:\Python\Python39\lib\site-packages\pandas\core\indexes\base.py", line 3082, in get_loc
raise KeyError(key) from err
KeyError: 'Id'
>>>
In my case the Id and Name column were missing in StudentDetails.csv, which I was trying to refer. So I put those in the file manually and then this error was gone.

Python: TypeError: item 1 in _argtypes_ passes a union by value, which is unsupported

I just updated my PC and when I try to run the same program I have started receiving this issue. Earlier it was running perfectly fine. I am using anaconda with python 3.7.6. I tried to look up the details but I am not able to sought it out.
When I try to run my program, I receive:
runfile('C:/Users/Namrata/Desktop/untitled0.py', wdir='C:/Users/Namrata/Desktop')
Traceback (most recent call last):
File "C:\Users\Namrata\anaconda3\lib\ctypes\__init__.py", line 121, in WINFUNCTYPE
return _win_functype_cache[(restype, argtypes, flags)]
KeyError: (<class 'ctypes.HRESULT'>, (<class 'comtypes.automation.tagVARIANT'>, <class 'ctypes.wintypes.VARIANT_BOOL'>, <class 'ctypes.wintypes.VARIANT_BOOL'>), 0)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Namrata\Desktop\untitled0.py", line 103, in <module>
app = MyApp()
File "C:\Users\Namrata\Desktop\untitled0.py", line 53, in __init__
self.gd = wx.lib.activex.ActiveXCtrl(p, 'DATARAYOCX.GetDataCtrl.1')
File "C:\Users\Namrata\anaconda3\lib\site-packages\wx\lib\activex.py", line 101, in __init__
self._ax = cc.GetBestInterface(unknown)
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\client\__init__.py", line 110, in GetBestInterface
mod = GetModule(tlib)
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\client\_generate.py", line 110, in GetModule
mod = _CreateWrapper(tlib, pathname)
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\client\_generate.py", line 184, in _CreateWrapper
mod = _my_import(fullname)
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\client\_generate.py", line 24, in _my_import
return __import__(fullname, globals(), locals(), ['DUMMY'])
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\gen\_EAB22AC0_30C1_11CF_A7EB_0000C05BAE0B_0_1_1.py", line 401, in <module>
( ['out', 'retval'], POINTER(VARIANT_BOOL), 'pfEnabled' )),
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\__init__.py", line 329, in __setattr__
self._make_methods(value)
File "C:\Users\Namrata\anaconda3\lib\site-packages\comtypes\__init__.py", line 698, in _make_methods
prototype = WINFUNCTYPE(restype, *argtypes)
File "C:\Users\Namrata\anaconda3\lib\ctypes\__init__.py", line 123, in WINFUNCTYPE
class WinFunctionType(_CFuncPtr):
The code is
import wx.lib.activex
import csv
import comtypes.client
class EventSink(object):
def __init__(self, frame):
self.counter = 0
self.frame = frame
def DataReady(self):
self.counter +=1
self.frame.Title= "DataReady fired {0} times".format(self.counter)
class MyApp( wx.App ):
def OnClick(self,e):
rb_selection = self.rb.GetStringSelection()
if rb_selection == "WinCam":
data = self.gd.ctrl.GetWinCamDataAsVariant()
data = [[x] for x in data]
else:
p_selection = self.cb.GetStringSelection()
if p_selection == "Profile_X":
data = self.px.ctrl.GetProfileDataAsVariant()
data = [[x] for x in data]#csv.writerows accepts a list of rows where each row is a list, a list of lists
elif p_selection == "Profile_Y":
data = self.py.ctrl.GetProfileDataAsVariant()
data = [[x] for x in data]
else:
datax = self.px.ctrl.GetProfileDataAsVariant()
datay = self.py.ctrl.GetProfileDataAsVariant()
data = [list(row) for row in zip(datax,datay)]#Makes a list of lists; X1 with Y1 in a list, X2 with Y2 in a list etc...
filename = self.ti.Value
with open(filename, 'wb') as fp:
w = csv.writer(fp, delimiter=',')
w.writerows(data)
def __init__( self, redirect=False, filename=None ):
wx.App.__init__( self, redirect, filename )
self.frame = wx.Frame( parent=None, id=wx.ID_ANY,size=(1000,760), title='Python Interface to DataRay')
#Panel
p = wx.Panel(self.frame, wx.ID_ANY)
#Get Data
self.gd = wx.lib.activex.ActiveXCtrl(p, 'DATARAYOCX.GetDataCtrl.1')
#The methods of the object are available through the ctrl property of the item
self.gd.ctrl.StartDriver()
self.counter = 0
sink = EventSink(self.frame)
self.sink = comtypes.client.GetEvents(self.gd.ctrl, sink)
#Button Panel
bp = wx.Panel(parent=self.frame, id=wx.ID_ANY, size=(215, 250))
b1 = wx.lib.activex.ActiveXCtrl(parent=bp,size=(200,50), pos=(1, 0),axID='DATARAYOCX.ButtonCtrl.1')
b1.ctrl.ButtonID =297 #Id's for some ActiveX controls must be set
b2 = wx.lib.activex.ActiveXCtrl(parent=bp,size=(100,25), pos=(5, 55),axID='DATARAYOCX.ButtonCtrl.1')
b2.ctrl.ButtonID =171
b3 = wx.lib.activex.ActiveXCtrl(parent=bp,size=(100,25), pos=(110,55),axID='DATARAYOCX.ButtonCtrl.1')
b3.ctrl.ButtonID =172
b4 = wx.lib.activex.ActiveXCtrl(parent=bp,size=(100,25), pos=(5, 85),axID='DATARAYOCX.ButtonCtrl.1')
b4.ctrl.ButtonID =177
b4 = wx.lib.activex.ActiveXCtrl(parent=bp,size=(100,25), pos=(110, 85),axID='DATARAYOCX.ButtonCtrl.1')
b4.ctrl.ButtonID =179
#Custom controls
t = wx.StaticText(bp, label="File:", pos=(5, 115))
self.ti = wx.TextCtrl(bp, value="C:\\Users\\Public\\Documents\\output.csv", pos=(30, 115), size=(170, -1))
self.rb = wx.RadioBox(bp, label="Data:", pos=(5, 140), choices=["Profile", "WinCam"])
self.cb = wx.ComboBox(bp, pos=(5,200), choices=[ "Profile_X", "Profile_Y", "Both"])
self.cb.SetSelection(0)
myb = wx.Button(bp, label="Write", pos=(5,225))
myb.Bind(wx.EVT_BUTTON, self.OnClick)
#Pictures
pic = wx.lib.activex.ActiveXCtrl(parent=self.frame,size=(250,250),axID='DATARAYOCX.CCDimageCtrl.1')
tpic = wx.lib.activex.ActiveXCtrl(parent=self.frame,size=(250,250), axID='DATARAYOCX.ThreeDviewCtrl.1')
palette = wx.lib.activex.ActiveXCtrl(parent=self.frame,size=(10,250), axID='DATARAYOCX.PaletteBarCtrl.1')
#Profiles
self.px = wx.lib.activex.ActiveXCtrl(parent=self.frame,size=(300,200),axID='DATARAYOCX.ProfilesCtrl.1')
self.px.ctrl.ProfileID=22
self.py = wx.lib.activex.ActiveXCtrl(parent=self.frame,size=(300,200),axID='DATARAYOCX.ProfilesCtrl.1')
self.py.ctrl.ProfileID = 23
#Formatting
row1 = wx.BoxSizer(wx.HORIZONTAL)
row1.Add(window=bp,flag=wx.RIGHT, border=10)
row1.Add(pic)
row1.Add(window=tpic, flag=wx.LEFT, border=10)
row2 = wx.BoxSizer(wx.HORIZONTAL)
row2.Add(self.px, 0, wx.RIGHT, 100)# Arguments: item, proportion, flags, border
row2.Add(self.py)
col1 = wx.BoxSizer(wx.VERTICAL)
col1.Add(sizer=row1, flag=wx.BOTTOM, border=10)
col1.Add(sizer=row2, flag=wx.ALIGN_CENTER_HORIZONTAL)
self.frame.SetSizer(col1)
self.frame.Show()
if __name__ == "__main__":
app = MyApp()
app.MainLoop()
Thanks for the help in advance!

opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor',,only cam light open

I have below error when I run my code.Both erorr showing at running time
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\Lenovo\AppData\Local\Programs\Python\Python35\lib\idlelib\run.py", line 119, in main
seq, request = rpc.request_queue.get(block=True, timeout=0.05)
File "C:\Users\Lenovo\AppData\Local\Programs\Python\Python35\lib\queue.py", line 172, in get
raise Empty
queue.Empty
Traceback (most recent call last):
File "C:\Users\Lenovo\AppData\Local\Programs\Python\Python35\lib\tkinter\__init__.py", line 1550, in __call__
return self.func(*args)
File "C:\Users\Lenovo\Desktop\Face recognise attandace system Succes.py", line 89, in TakeImages
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.2.0) C:\projects\opencv-python\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in function 'cv::cvtColor
Here is my code. When I run this I got so many errors which stack trace I mentioned above. When I am trying to handle main exception another exception is occurred. I also mentioned their stack trace.
def TakeImages():
Id =(txt.get())
name =(txt2.get())
if(is_number(Id) and name.isalpha()):
cam = cv2.VideoCapture(1)
harcascadePath ="haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(harcascadePath)
sampleNum = 0
while(True):
ret,img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3,5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), ( x + w, y + h), (255, 0, 0), 2)
sampleNum=sampleNum + 1
cv2.imwrite("TrainingImages"+name +"."+Id +'.'+ str(sampleNum) + ".jpg", gray[y:y + h, x:x + w])
cv2.imshow('frame', img)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif sampleNum>60:
break
cam.release()
cv2.destroyAllWindows()
res = "Images Saved for ID: " + Id +" Name: "+ name
row = [Id, name]
with open('studentDetails\StudentDetails.csv','a+') as csvFile:
writer = csv.writer(csvFile)
writer.writerow(row)
csvFile.close()
message.configure(text = res)
else:
if(is_number(Id)):
res = "Enter Alphabetical Name"
message.configure(text = res)
if(name.isalpha()):
res = "Enter Numeric Id"
message.configure(text = res)

UnboundLocalError: local variable 'x1' referenced before assignment

import numpy as np
from PIL import ImageGrab
import cv2
import time
import pyautogui
import matplotlib.pyplot as plt
def make_coords(img,line_param):
slope,intercept=line_param
y1 = img.shape[0]
y2 = int((y1*(3/5)))
x1 = int((y1-intercept)/slope)
x2 = int((y2-intercept)/slope)
try:
return np.array((x1,y1,x2,y2)) #HERE IS WHERE THE PROBLEM HAPPENS
except UnboundLocalError:
pass
def avg_slope(img,lines):
left_fit =[]
right_fit=[]
if lines is not None:
for line in lines:
x1,y1,x2,y2=line.reshape(4)
parameters = np.polyfit((x1,x2),(y1,y2),1)
try:
slope = parameters[0]
except TypeError:
slope = 0
try:
intercept = parameters[1]
except TypeError:
intercept = 0
if slope <0:
left_fit.append((slope,intercept))
else:
right_fit.append((slope,intercept))
if left_fit:
left_fit_avg=np.average(left_fit,axis=0)
left_line=make_coords(img,left_fit_avg)
if right_fit:
right_fit_avg=np.average(right_fit,axis=0)
right_line=make_coords(img,right_fit_avg)
return np.array((x1,y1,x2,y2))
def draw_lines(img, lines):
try:
for line in lines:
if line is not None:
coords = line[0]
cv2.line(img, (coords[0],coords[1]), (coords[2],coords[3]), [255,0,0], 3)
except:
pass
def roi(img):
vertices = np.array([[10,500],[10,300], [300,200], [500,200], [800,300], [800,500]], np.int32)
mask = np.zeros_like(img)
cv2.fillPoly(mask, [vertices], 255)
masked = cv2.bitwise_and(img, mask)
return masked
def process_img(image):
original_image = image
# convert to gray
processed_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# edge detection
processed_img = cv2.GaussianBlur(processed_img,(5,5),0) #new
processed_img = cv2.Canny(processed_img, threshold1 = 50, threshold2=150) #new
# processed_img = cv2.Canny(processed_img, threshold1 = 200, threshold2=300)
lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, np.array([]), minLineLength=15,maxLineGap=5)
avg_lines = avg_slope(processed_img,lines)
draw_lines(process_img,avg_lines)
processed_img = roi(processed_img)
return processed_img
def main():
last_time = time.time()
while True:
screen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
if screen is not None:
new_screen = process_img(screen)
print('Frame took {} seconds'.format(time.time()-last_time))
cv2.imshow('window', new_screen)
else:
pass
last_time = time.time()
# plt.imshow(new_screen)
#cv2.imshow('window',cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
# cv2.waitKey(0)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
main()
THE TERMINAL SHOWS:
avg_lines = avg_slope(processed_img,lines)
Frame took 0.12310576438903809 seconds
Traceback (most recent call last):
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 107, in <module>
main()
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 91, in main 91,
in main
new_screen = process_img(screen) 78, in process_img
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 78, in process_img 50, in avg_slope
avg_lines = avg_slope(processed_img,lines)
File "c:/Users/Nicole/Documents/Python Scripts/matetest.py", line 50, in avg_slope
return np.array((x1,y1,x2,y2))
UnboundLocalError: local variable 'x1' referenced before assignment
... even though I'm doing ...
try:
return np.array((x1,y1,x2,y2))
except UnboundLocalError:
pass
Your Error is actually not occuring where you say it is. By looking at the Traceback you can see that the error is occuring in the function avg_slope.
It might be because you use return np.array((x1,y1,x2,y2)) while in that function you have only declared these values inside an if statement. If the if block would be skipped (when lines is None) then x1, x2, y1and y2 haven't been declared in the function. In other words: it could be that these never exist inside the function, so you can't return something depending on them. The interpreter prevents you from doing this.
You can learn a lot by just reading the error message carefully. Local variable referenced before assignment is in a nutshell what I explained above.
Your problem is here:
def avg_slope(img,lines):
left_fit =[]
right_fit=[]
if lines is not None:
for line in lines:
x1,y1,x2,y2=line.reshape(4)
If lines is "falsey" (empty or None), you never assign to x1.

Categories