Line in CSV file gets over written each time variable is changed - python

I'm comparitively new to using CSV functions in python and need your help.
I have a python program that calculates distance between contours in opencv as well as angle, and this data is later stored in CSV file each time I press h on keyboard. The issue is that, each time I press h, the earlier line gets overwritten by the new line instead of saving it in new line. Is there any way I can save the new variable in new line in CSV format?
Here's part of my code. The whole code is long, so posting necessary part from it-
def calcDistHex(x6, y6, x5, y5, x4, y4, x3, y3, x2, y2, x, y):
dist1 = round(dist.euclidean((x6, y6), (x5, y5)))
dist2 = round(dist.euclidean((x5, y5), (x4, y4)))
dist3 = round(dist.euclidean((x4, y4), (x3, y3)))
dist4 = round(dist.euclidean((x3, y3), (x2, y2)))
dist5 = round(dist.euclidean((x2, y2), (x, y)))
dist6 = round(dist.euclidean((x, y), (x6, y6)))
#print(dist1)
cv2.putText(frame, str(dist1), (round(0.5 * x6 + 0.5 * x5), round(0.5 * y6 + 0.5 * y5)) , font, 0.5, (0, 0, 0), 1)
cv2.putText(frame, str(dist2), (round(0.5 * x5 + 0.5 * x4), round(0.5 * y5 + 0.5 * y4)) , font, 0.5, (0, 0, 0), 1)
cv2.putText(frame, str(dist3), (round(0.5 * x4 + 0.5 * x3), round(0.5 * y4 + 0.5 * y3)) , font, 0.5, (0, 0, 0), 1)
cv2.putText(frame, str(dist4), (round(0.5 * x3 + 0.5 * x2), round(0.5 * y3 + 0.5 * y2)) , font, 0.5, (0, 0, 0), 1)
cv2.putText(frame, str(dist5), (round(0.5 * x2 + 0.5 * x), round(0.5 * y2 + 0.5 * y)) , font, 0.5, (0, 0, 0), 1)
cv2.putText(frame, str(dist6), (round(0.5 * x + 0.5 * x6), round(0.5 * y + 0.5 * y6)) , font, 0.5, (0, 0, 0), 1)
pt6 = x6, y6
pt5 = x5, y5
pt4 = x4, y4
pt3 = x3, y3
pt2 = x2, y2
pt1 = x, y
m2 = gradient(pt2,pt1)
n2 = gradient(pt2,pt3)
if m2 is not None and n2 is not None:
angR2 = math.atan((n2-m2)/(1+(n2*m2)))
angD2 = math.degrees(angR2)
if math.isnan(angD2) is False:
cv2.putText(frame, str(round(abs(angD2))), (pt2[0]-40,pt2[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD2)),(pt1[0]-40,pt1[1]-20))
m3 = gradient(pt3,pt2)
n3 = gradient(pt3,pt4)
if m3 is not None and n3 is not None:
angR3 = math.atan((n3-m3)/(1+(n3*m3)))
angD3 = math.degrees(angR3)
if math.isnan(angD3) is False:
cv2.putText(frame, str(round(abs(angD3))), (pt3[0]-40,pt3[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD3)),(pt1[0]-40,pt1[1]-20))
m4 = gradient(pt4,pt3)
n4 = gradient(pt4,pt5)
if m4 is not None and n4 is not None:
angR4 = math.atan((n4-m4)/(1+(n4*m4)))
angD4 = math.degrees(angR4)
if math.isnan(angD4) is False:
cv2.putText(frame, str(round(abs(angD4))), (pt4[0]-40,pt4[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD4)),(pt1[0]-40,pt1[1]-20))
m5 = gradient(pt5,pt4)
n5 = gradient(pt5,pt6)
if m5 is not None and n5 is not None:
angR5 = math.atan((n5-m5)/(1+(n5*m5)))
angD5 = math.degrees(angR5)
if math.isnan(angD5) is False:
cv2.putText(frame, str(round(abs(angD5))), (pt5[0]-40,pt5[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD5)),(pt1[0]-40,pt1[1]-20))
m6 = gradient(pt6,pt5)
n6 = gradient(pt6,pt1)
if m6 is not None and n6 is not None:
angR6 = math.atan((n6-m6)/(1+(n6*m6)))
angD6 = math.degrees(angR6)
if math.isnan(angD6) is False:
cv2.putText(frame, str(round(abs(angD6))), (pt6[0]-40,pt6[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD6)),(pt1[0]-40,pt1[1]-20))
m = gradient(pt1,pt6)
n = gradient(pt1,pt2)
if m is not None and n is not None:
angR = math.atan((n-m)/(1+(n*m)))
angD = math.degrees(angR)
if math.isnan(angD) is False:
cv2.putText(frame, str(round(abs(angD))), (pt1[0]-40,pt1[1]-20), font, 1, (0, 0, 0))
#print(round(abs(angD)),(pt1[0]-40,pt1[1]-20))
if cv2.waitKey(1) == ord('h'):
timestamp = int(time.time() * 10000)
with open('dataset.csv', 'w', newline='') as dataset_file:
dataset = csv.DictWriter(
dataset_file,
["timestamp", "shape", "Side1", "Side2", "Side3", "Side4", "Side5", "Side6", "Perimeter", "Angle1", "Angle2", "Angle3", "Angle4", "Angle5", "Angle6", "AngleSum", "Error"]
)
dataset.writeheader()
dataset.writerow({
"timestamp": timestamp,
"shape": "Hexagon",
"Side1": dist1,
"Side2": dist2,
"Side3": dist3,
"Side4": dist4,
"Side5": dist5,
"Side6": dist6,
"Perimeter": (dist1 + dist2 + dist3 + dist4 + dist5 + dist6),
"Angle1": angD,
"Angle2": angD2,
"Angle3": angD3,
"Angle4": angD4,
"Angle5": angD5,
"Angle6": angD6,
"AngleSum": (angD + angD2 + angD3 + angD4 + angD5 + angD6),
"Error": "To Do"
})
return dist1, dist2, dist3, dist4, dist5, dist6, angD, angD2, angD3, angD4, angD5, angD6;
This is the defined function which stores the file.
This function is later called in another loop -
if len(approx) == 6:
for j in n:
if(i % 2 == 0):
x6 = n[i - 10]
y6 = n[i - 9]
x5 = n[i - 8]
y5 = n[i - 7]
x4 = n[i - 6]
y4 = n[i - 5]
x3 = n[i - 4]
y3 = n[i - 3]
x2 = n[i - 2]
y2 = n[i - 1]
x = n[i]
y = n[i + 1]
#print(x, y, x2, y2, x3, y3, x4, y4)
string = str(x) + " " + str(y)
cv2.circle(frame, (x, y), 2, (0,0,100), 2)
cv2.putText(frame, string, (x, y), font, 0.5, (138, 138, 54), 2)
calcDistHex(x6, y6, x5, y5, x4, y4, x3, y3, x2, y2, x, y)
# text on remaining co-ordinates.
i = i + 1
cv2.imshow("Frame", frame)
cv2.imshow("Mask", threshold)
Any help is appreciated. Written in python.

Try changing this line :
with open('dataset.csv', 'w', newline='') as dataset_file:
By :
with open('dataset.csv', 'a', newline='') as dataset_file:
The 'w' means overwrite. The 'a' means append. More info here : https://stackoverflow.com/a/1466036/3922534
Edit : to avoid the duplicated header at each write, delete the line :
dataset.writeheader()
from your current code, and add the following code before your program's loop. It will itinialize the file by overriding it with the CSV header (notice the 'w' mode), then the loop inside your program will only add the rows of data.
with open('dataset.csv', 'w', newline='') as dataset_file:
dataset = csv.DictWriter(
dataset_file,
["timestamp", "shape", "Side1", "Side2", "Side3", "Side4", "Side5", "Side6", "Perimeter", "Angle1", "Angle2", "Angle3", "Angle4", "Angle5", "Angle6", "AngleSum", "Error"]
)
dataset.writeheader()

Related

Camera for face recognition python project not opening

I have a project for face recognition in Python, but when I try to open the camera for recognition, it does not open. May be there is something wrong in my code, but I do not know where the problem is.
its the code app.py
maybe the wrong be in here def of facerecognition
`def face_recognition(): # generate frame by frame from camera
def draw_boundary(img, classifier, scaleFactor, minNeighbors, color, text, clf):
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
features = classifier.detectMultiScale(gray_image, scaleFactor, minNeighbors)
global justscanned
global pause_cnt
pause_cnt += 1
coords = []
for (x, y, w, h) in features:
cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
id, pred = clf.predict(gray_image[y:y + h, x:x + w])
confidence = int(100 * (1 - pred / 300))
if confidence > 85 and not justscanned:
global cnt
cnt += 1
n = (100 / 30) * cnt
# w_filled = (n / 100) * w
w_filled = (cnt / 30) * w
cv2.putText(img, str(int(n))+' %', (x + 20, y + h + 28), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 255, 255), 2, cv2.LINE_AA)
cv2.rectangle(img, (x, y + h + 40), (x + w, y + h + 50), color, 2)
cv2.rectangle(img, (x, y + h + 40), (x + int(w_filled), y + h + 50), (153, 255, 255), cv2.FILLED)
mycursor.execute("select a.img_person, b.prs_name, b.prs_skill "
" from img_dataset a "
" left join prs_mstr b on a.img_person = b.prs_nbr "
" where img_id = " + str(id))
row = mycursor.fetchone()
pnbr = row[0]
pname = row[1]
pskill = row[2]
if int(cnt) == 30:
cnt = 0
mycursor.execute("insert into accs_hist (accs_date, accs_prsn) values('"+str(date.today())+"', '" + pnbr + "')")
mydb.commit()
cv2.putText(img, pname + ' | ' + pskill, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 255, 255), 2, cv2.LINE_AA)
time.sleep(1)
justscanned = True
pause_cnt = 0
else:
if not justscanned:
cv2.putText(img, 'UNKNOWN', (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2, cv2.LINE_AA)
else:
cv2.putText(img, ' ', (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2,cv2.LINE_AA)
if pause_cnt > 80:
justscanned = False
coords = [x, y, w, h]
return coords`

Draw a bounding box of second class on main image which was cropped to get detection of second class

I have a problem.
I have an object detection model that detects two classes, what I want to do is:
Detect class 1 (say c1) on source image (640x640) Draw bounding box and crop bounding box -> (c1 image) and then resize it to (640x640) (DONE)
Detect class 2 (say c2) on c1 image (640x640) (DONE)
Now I want to draw bounding box of c2 on source image
I have tried to explain it here by visualizing it
how can I do it? please help.
Code:
frame = self.REC.ImgResize(frame)
frame, score1, self.FLAG1, x, y, w, h = self.Detect(frame, "c1")
if self.FLAG1 and x > 0 and y > 0:
x1, y1 = w,h
cv2.rectangle(frame, (x, y), (w, h), self.COLOR1, 1)
c1Img = frame[y:h, x:w]
c1Img = self.REC.ImgResize(c1Img)
ratio = c2Img.shape[1] / float(frame.shape[1])
if ratio > 0.35:
c2Img, score2, self.FLAG2, xN, yN, wN, hN = self.Detect(c1Img, "c2")
if self.FLAG2 and xN > 0 and yN > 0:
# What should be the values for these => (__, __),(__,__)
cv2.rectangle(frame, (__, __), (__, __), self.COLOR2, 1)
I had tried a way which could only solve (x,y) coordinates but width and height was a mess
what I tried was
first found the rate of width and height at which the cropped c1 image increased after resize.
for example
x1 = 329
y1 = 102
h1 = 637
w1 = 630
r_w = 630 / 640 # 0.9843
r_h = 637 / 640 # 0.9953
x2 = 158
y2 = 393
h2 = 499
w2 = 588
new_x2 = 158 * 0.9843 # 156
new_y2 = 389 * 0.9953 # 389
new_x2 = x1 + new_x2
new_y2 = y1 + new_y2
this work to find (x,y)
but I am still trying to find a way to get (w,h) of the bounding box.
EDIT
The complete code is:
import cv2
import random
import numpy as np
import onnxruntime as ort
cuda = False
w = "models/model.onnx"
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
session = ort.InferenceSession(w, providers=providers)
names = ['face', 'glasses']
colors = {name:[random.randint(0, 255) for _ in range(3)] for name in names}
img = cv2.imread("test.jpg")
def ImgResize(image, width = 640, height = 640, inter = cv2.INTER_CUBIC):
if image is not None:
resized = cv2.resize(image, (width,height), interpolation = inter)
return resized
def Detect(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleup=True, stride=32):
flag = False
w, h = 0, 0
x, y = 0, 0
score = 0
try:
if im is None:
raise Exception(IOError())
shape = im.shape[:2]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
ratio = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup:
ratio = min(ratio, 1.0)
new_unpad = int(round(shape[1] * ratio)), int(round(shape[0] * ratio))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
if auto:
dw, dh = np.mod(dw, stride), np.mod(dh, stride)
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
image_ = im.transpose((2, 0, 1))
image_ = np.expand_dims(image_, 0)
image_ = np.ascontiguousarray(image_)
im = image_.astype(np.float32)
im /= 255
outname = [i.name for i in session.get_outputs()]
inname = [i.name for i in session.get_inputs()]
inp = {inname[0]:im}
outputs = session.run(outname, inp)[0]
return im, outputs, ratio, (dw, dh)
except IOError:
print("Invalid Image File")
def Detection(img, c_name):
score = 0
name = ""
a, b, c, d = 0, 0, 0, 0
image_, outputs, ratio, dwdh = Detect(img)
ori_images = [img.copy()]
for batch_id, x0, y0, x1, y1, cls_id, score in outputs:
img = ori_images[int(batch_id)]
box = np.array([x0, y0, x1, y1])
box -= np.array(dwdh * 2)
box /= ratio
box = box.round().astype(np.int32).tolist()
cls_id = int(cls_id)
score = round(float(score), 3)
if score > 0.55:
name = names[cls_id]
if name != c_name:
return img, 0, False, 0, 0, 0, 0, "Could Not Detect"
flag = True
a, b, c, d = tuple(box)
score = round(score * 100, 0)
return img, score, flag, a, b, c, d, name
COLORF = (212, 15, 24)
COLORG = (25, 240, 255)
nameW = "Det"
flagF, flagN = False, False
img = ImgResize(img)
c1_img, score, flagF, x1,y1,w1,h1,name = Detection(img,"face")
print(score, flagF, x1,y1,w1,h1,name)
if flagF:
cv2.rectangle(img, (x1,y1), (w1,h1), COLORF, 1)
cv2.putText(img, name, (x1,y1),cv2.FONT_HERSHEY_PLAIN, 2,COLORF,2)
cv2.imshow("face", img)
c1_img = c1_img[y1:h1,x1:w1]
c1_img_orig = c1_img.copy()
c1_img = ImgResize(c1_img)
c2_img, score, flagG, x2,y2,w2,h2,name = Detection(c1_img,"glasses")
if flagG:
c2_img = c2_img[y2:h2,x2:w2]
cv2.rectangle(c1_img_orig, (x2,y2), (w2,h2), COLORG, 1)
cv2.putText(c1_img_orig, name, (x1,y1),cv2.FONT_HERSHEY_PLAIN, 2,COLORG,2)
cv2.imshow("glasses", c2_img)
x3 = x1 + int(x2 * w1 / 640)
y3 = y1 + int(y2 * h1 / 640)
w3 = int(w2 * w1 / 640)
h3 = int(h2 * h1 / 640)
cv2.rectangle(img, (x3,y3), (w3,h3), COLORG, 1)
cv2.imshow(nameW, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
what this code does is for some images it draws the bounding box as required:
but for other images and in video stream this is what happens:
Here is a complete programming example. Please keep in mind that for cv2.rectangle you need to pass top-left corner and bottom-right corner of the rectangle. As you didn't share ImgResize and Detect I made some assumptions:
import cv2
import numpy as np
COLOR1 = (0, 255, 0)
COLOR2 = (0, 0, 255)
DETECT_c1 = (40, 20, 120, 160)
DETECT_c2 = (20, 120, 160, 40)
RESIZE_x, RESIZE_y = 200, 200
frame = np.zeros((RESIZE_y, RESIZE_x, 3), np.uint8)
x1, y1, w1, h1 = DETECT_c1
c1Img = frame[y1:h1, x1:w1]
cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1), COLOR1, 1)
c1Img = cv2.resize(c1Img, (RESIZE_x, RESIZE_y))
x2, y2, w2, h2 = DETECT_c2
x3 = x1 + int(x2 * w1 / RESIZE_x)
y3 = y1 + int(y2 * h1 / RESIZE_y)
w3 = int(w2 * w1 / RESIZE_x)
h3 = int(h2 * h1 / RESIZE_y)
cv2.rectangle(frame, (x3, y3), (x3 + w3, y3 + h3), COLOR2, 1)
cv2.imwrite('out.png', frame)
Output:
I suggest that you treat your bounding box coordinates relatively.
If I understand correctly, your problem is that you have different referential. One way to bypass that is to normalize at each step your bbox coordinates.
c1_box is relative to your image, so :
c1_x = x/640
c1_y = y/640
When you crop, you can record the ratio values between main image and your cropped object.
image_vs_c1_x = c1_x / img_x
image_vs_c1_y = c1_y / img_y
Then you need to multiply your c2 bounding box coordinates by those ratios.
this is how I was able to solve it.
rwf = round((w1-x1)/640, 2)
rhf = round((h1-y1)/640, 2)
x3 = int(x2*rwf )
y3 = int(y2*rhf)
w3 = int(w2*rwf)
h3 = int(h2*rhf)
# these are the top right and bottom left cooridinates
x4 = x1 + x3
y4 = y1 + y3
w4 = x1 + w3
h4 = y1 + h3

CV2 is not saving images

The code is supposed to save the roi I have set using the coordinates of detected objects. No errors were found on this part, but it doesnt save the image.
path = "C:\HelmetDetection"
dt = str(datetime.now().strftime("%Y%m%d-%H:%M:%S"))
overlapping = bool()
instance = None
def check_if_overlapping(x1, y1, trc1, blc1, x2, y2, trc2, blc2):
check_instance(x1, y1, trc1, blc1, x2, y2, trc2, blc2)
if instance == "ins1":
global overlapping
overlapping = True
else:
overlapping = False
def save_image(roi):
status = cv2.imwrite(os.path.join(path, dt + '.jpg'), roi)
print(status)
def check_instance(x1, y1, trc1, blc1, x2, y2, trc2, blc2):
global instance
if x1 < x2 and y1 > y2 and trc1 > trc2 and blc1 < blc2:
instance = "ins1"
if label == "motorcycle":
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
cv2.putText(img, label + " " + f'{confidence * 100}%', (x, y + 20), font, 1, (0, 0, 0), 1)
mcoords = []
mcoords.append((x, y, x + w, y + h))
if len(mcoords) == 1:
x1, y1, trc1, blc1 = x, y, x + w, y + h
else:
x1, y1, trc1, blc1 = mcoords[0]
if label == "bicycle":
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
cv2.putText(img, label + " " + f'{confidence * 100}%', (x, y + 20), font, 1, (0, 0, 0), 1)
x1, y1, trc1, blc1 = x, y, x + w, y + h
bcoords = []
bcoords.append((x, y, x + w, y + h))
if len(bcoords) == 1:
x1, y1, trc1, blc1 = x, y, x + w, y + h
else:
x1, y1, trc1, blc1 = bcoords[0]
if label == "person":
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 0), 1)
cv2.putText(img, label + " " + f'{confidence * 100}%', (x, y + 20), font, 1, (0, 0, 0), 1)
hcoords = []
hcoords.append((x, y, x + w, y + h))
if len(hcoords) == 1:
x2, y2, trc2, blc2 = x, y, x + w, y + h
else:
x2, y2, trc2, blc2 = hcoords[0]
if 'x1' and 'y1' and 'trc1' and 'blc1' and 'x2' and 'y2' and 'trc2' and 'blc2' in locals():
check_if_overlapping(x1, y1, trc1, blc1, x2, y2, trc2, blc2)
!!!
if overlapping == True:
check_instance()
if instance == "ins1":
if (y2 or blc2 or x1 or trc1) > 100:
roi = img[y2 - 100:blc2 + 100, x1 - 100:trc1 + 100]
save_image(roi)
!!!
It returns False, which as I have read, means the image failed to save. Can I get some easily understandable solutions?
A/N: These are just snippets, it is kind of a long project, so just tell me if you need more information about the codes used.
Something I always like to do is to test my filepath setup using open() as it throws much more useful errors.
In your case it throws an invalid path error, which if you start eliminating weird characters is due to the “:##:” in the date/time format
I would recommend switching your format to not contain colons

Python Face-Recognition reach

I have a python code which detects faces, but if I am 3 meters away, it won't recognize me anymore... I am new to python and I think I can do more, if you can tell me the code I have to change.
Can somebody help me?
while True:
success, img = cap.read()
# img = captureScreen()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown, encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown, encodeFace)
matchIndex = np.argmin(faceDis)
name = classNames[matchIndex].upper()
if not matches[matchIndex]:
else:
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)
cv2.imshow('Webcam', img)
cv2.waitKey(1)
```
I guess the problem is rather in the big distance and small face then in algorithm.
face_recognition.face_locations() has an argument:
number_of_times_to_upsample
Try to set it 2 or more: as much as smaller faces to be found.

Type error: Missing required positional arguments with multiple .py files

I want to create a class and then an object which will display a red square in my display i also created.
At this moment, I can only display the blue square, which is not an object of the class "Baustein".
Here are my 2 files im using right now:
Bauklotz_class.py
from gui_backup import Display
class Baustein:
x1, y1, x2, y2 = 10,10,20,20
color = "red"
def __init__(self, x1, y1, x2, y2, color):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.color = color
def show_new_object(self):
quadrat2 = Display.create_rectangle(40, 50, 60, 70, fill = color)
Display.coords(quadrat2, x1, y1, x2, y2)
gui_backup.py
from tkinter import *
import curses
import Bauklotz_class
x1 = 10 #initialise coordinates
y1 = 10
x2 = 20
y2 = 20
root = Tk() #create window
root.wm_title("Raspberry Pi GUI") #window title
root.config(background = "#FFFFFF") #background color
#The whole left frame and widgets involved
leftFrame = Frame(root, width=200, height = 400)
leftFrame.grid(row=0, column = 0, padx = 10, pady = 3)
leftLabel1 = Label(leftFrame, text = "Platzhalter Text")
leftLabel1.grid(row = 0, column = 0, padx = 10, pady = 3)
leftLabel2 = Label(leftFrame, text = "Dies ist ein Text\nmit mehreren Zeilen")
leftLabel2.grid(row = 1, column = 0, padx = 10, pady = 3)
#the whole right frame and widgets involved
rightFrame = Frame(root, width=400, height = 400)
rightFrame.grid(row = 0, column = 1, padx = 10, pady = 3)
E1 = Entry(rightFrame, width = 50)
E1.grid(row = 0, column = 0, padx = 10, pady = 60)
#The two functions for the 2 buttons created
def callback1():
test = Bauklotz_class.Baustein(20, 30, 40, 50, "red")
test.show_new_object()
def callback2():
print(1+1)
buttonFrame = Frame(rightFrame)
buttonFrame.grid(row = 1, column = 0, padx = 10, pady = 60)
B1 = Button(buttonFrame, text = "Button1", bg = "#FF0000", width = 15, command = callback1)
B1.grid(row = 0, column = 0, padx = 10, pady = 60)
B2 = Button(buttonFrame, text = "Button2", bg ="#FFFF00", width = 15, command = callback2)
B2.grid(row = 0, column = 1, padx = 10, pady = 60)
Slider = Scale(rightFrame, from_ = 0, to = 100, resolution = 0.1, orient = HORIZONTAL, length = 400)
Slider.grid(row = 2, column = 0, padx = 10, pady = 3)
Display = Canvas(rightFrame, width = 300, height = 300)
Display.configure(background = 'black')
Display.grid(row = 1, column = 3, padx = 10, pady = 3)
quadrat = Display.create_rectangle(20, 30, 40, 50, fill = "blue")
Display.coords(quadrat, x1, y1, x2, y2)
#following functions are for coordination of the square
#also you can find here the exceptions so that the object
#cant break out of the display widget
def down(event):
global x1, y1, x2, y2
if x2 == 290 or y2 == 300:
pass
else:
y1 += 10
y2 += 10
Display.coords(quadrat, x1, y1, x2, y2)
leftLabel1.config(text = "x1: " + str(x1) + ", x2:" + str(x2) + ", y1:" + str(y1) + ", y2:" + str(y2), width = "40" , )
def up(event):
global x1, y1, x2, y2
if x2 == 0 or y2 == 10:
pass
else:
y1 -= 10
y2 -= 10
Display.coords(quadrat, x1, y1, x2, y2)
leftLabel1.config(text = "x1: " + str(x1) + ", x2:" + str(x2) + ", y1:" + str(y1) + ", y2:" + str(y2), width = "40" , )
def left(event):
global x1, y1, x2, y2
if x1 == 0 or y1 == 10:
pass
else:
x1 -= 10
x2 -= 10
Display.coords(quadrat, x1, y1, x2, y2)
leftLabel1.config(text = "x1: " + str(x1) + ", x2:" + str(x2) + ", y1:" + str(y1) + ", y2:" + str(y2), width = "40" , )
def right(event):
global x1, y1, x2, y2
if x1 == 290 or y1 == 300:
pass
else:
x1 += 10
x2 += 10
Display.coords(quadrat, x1, y1, x2, y2)
leftLabel1.config(text = "x1: " + str(x1) + ", x2:" + str(x2) + ", y1:" + str(y1) + ", y2:" + str(y2), width = "40" , )
root.bind('<a>', left)
root.bind('<w>', up)
root.bind('<s>', down)
root.bind('<d>', right)
root.mainloop()
Now I only have the problem, that the following error is beeing generated: AttributeError: module 'Bauklotz_class' has no attribute 'Baustein'. I cant really figure out what python means by this, Im also a newbie in OOP, especialy in Python. Can someone help me with this problem?
Here is the full error message i get:
Exception in Tkinter callback Traceback (most recent call last):
File "/usr/lib/python3.5/tkinter/init.py", line 1562, in call
return self.func(*args) File "/home/pi/Documents/TKinter_Übung/gui_backup.py", line 31, in
callback1
test = Bauklotz_class.Baustein(20, 30, 40, 50, "red") AttributeError: module 'Bauklotz_class' has no attribute 'Baustein'
This line in your Callback1 function:
test = Bauklotz_class.Baustein()
needs 5 values in the brackets ('x1', 'y1', 'x2', 'y2', and 'color')
because it is calling your baustein class which ask for those parameters in the init function:
def __init__(self, x1, y1, x2, y2, color):

Categories