I've created a class to display my webcam video on a Tkinter screen and I would like to take 3 pictures(waiting 3 seconds after each picture taken) after a Tkinter button is pressed.
Here is my code(reduced), and my logic to take picture is done. Should I use Threads to solve this? I'm new to Python.
import tkinter, cv2, time, dlib, numpy as np, time, threading
from PIL import Image, ImageTk
class Tela:
def __init__(self, janela):
self.janela = janela
self.janela.title("Reconhecimento Facial")
self.janela.config(background="#FFFFFF")
self.image = None
self.cam = cv2.VideoCapture(0)
self.detector = dlib.get_frontal_face_detector()
self.delay = 15
self.update()
self.janela.mainloop()
def update(self): # display image on gui
ret, frame = self.cam.read()
if ret:
faces, confianca, idx = self.detector.run(frame)
for i, face in enumerate(faces):
e, t, d, b = (int(face.left()), int(face.top()), int(face.right()), int(face.bottom()))
cv2.rectangle(frame, (e, t), (d, b), (0, 255, 255), 2)
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
self.image = Image.fromarray(cv2image)
imgtk = ImageTk.PhotoImage(image=self.image)
self.painel.imgtk = imgtk
self.painel.config(image=imgtk)
self.janela.after(self.delay, self.update)
def take_picture(self):
cou = 1 # counter of pictures
start = time.clock() # starts the time
ret, frame = self.cam.read()
if ret:
faces, confianca, idx = self.detector.run(frame)
secs = (time.clock() - start) # count seconds
for i, face in enumerate(faces):
e, t, d, b = (int(face.left()), int(face.top()), int(face.right()), int(face.bottom()))
cv2.rectangle(frame, (e, t), (d, b), (0, 255, 255), 2)
if secs > 3:
imgfinal = cv2.resize(frame, (750, 600))
cv2.imwrite("fotos/pessoa." + str(id[0][0]) + "." + str(cou) + ".jpg", imgfinal)
print("Foto " + str(cou) + " tirada")
cou += 1
start = time.clock() # reset the counter of seconds
if cou > 3:
# here is where the thread should stop
# Creates the window
Tela(tkinter.Tk())
using time.sleep() will freeze your gui, with tkinter you can use after() which will call your method after x seconds, below is an example of how to call a function 4 times every 2 seconds, and you can use this idea in your application
import tkinter as tk
class App():
def __init__(self):
self.root = tk.Tk()
self.label = tk.Label(text="Anything")
self.label.pack()
self.counter = 0
self.take_picture(repeates=4, seconds=2) # our desired function
self.root.mainloop()
def take_picture(self, repeates=0, seconds=1):
if repeates:
self.counter = repeates
if self.counter == 0:
print('no more execution')
self.label.configure(text='Done, no more execution')
return
# doing stuff
text = f'function counting down # {self.counter}'
self.label.configure(text=text)
# schedule another call to this func using after()
self.root.after(seconds * 1000, self.take_picture, 0, seconds)
self.counter -= 1 # our tracker
app=App()
credits to this answer
You should use time.sleep(). It takes an integer as an parameter and waits that many seconds and then the code resumes running.
Related
Hey I am trying to run different face detection models simultaneously. I am using opencv library to open Video Stream and created different process objects for different face detection models. When I run the program the first method is running successfully but second method exits with an error that can't receive frame.
The major challenge is the while loop for reading the capture source(cap) which makes it different from the question posted on stackoverflow before
The code is as follows:
import cv2
import dlib
from multiprocessing import Process
def haar_cascade():
while True:
ret,frame=cap.read()
cv2.imshow('input',frame)
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('harr-cascade',frame)
def dlib_hog():
while True:
ret,frame=cap.read()
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
if cv2.waitKey(1) == ord('q'):
break
cv2.imshow('dlib-hog',frame)
if __name__ == "__main__":
cap =cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
harrProcess=Process(target=haar_cascade)
harrProcess.start()
dlibProcess=Process(target=dlib_hog)
dlibProcess.start()
# When everything done, release the capture
harrProcess.join()
dlibProcess.join()
cap.release()
cv2.destroyAllWindows()
How can I create a multiprocessing model that read source from single source and perform independent operation?
I have made various attempts:
I tried using multiprocessing with a producer process and two consumer processes. The frame created by the producer must be converted to a shared-memory array and then converted back to a numpy array when retrieved by a consumer. There is sufficient overhead in these operations that I was finding that frames were being lost.
I tried using multithreading with a producer thread and two consumer threads. This has less overhead with regards to passing frames from the producer and consumer. The problem, of course, with multithreading is that due to contention for the Global Interpreter Lock, any CPU-intensive processing required by a consumer cannot be run in parallel with CPU-intensive processing required by the other consumer and could even cause the producer to miss frames. Unfortunately, I don't know if when using a camera for input whether there is a way to detect missed frames on the part of the producer. To remediate these problems I pass a multiprocessing pool to the consumer threads to which they can submit tasks that perform the CPU-intensive processing on the frames. Here, too, there is sufficient overhead in passing frames from one process to another and frames are lost.
As in bullet point 2 above, I use multithreading but instead of submitting CPU-intensive work to the multiprocessing pool, I perform it within the consumer thread. This seems to cause fewer missed frames for the consumer. But I can't tell if it is causing the producer now to miss frames it would not otherwise miss. So using a multiprocessing pool for doing the CPU-intensive work seems to be the wiser approach. Of course, if your CPU is fast enough, neither the consumer not producer should miss frames. But option 1 (see second code example), i.e. using just multiprocessing, is probably best.
In the following demos, since I don't have access to your XML file, I have dummied out the processing for one of your consumers. You terminate the program by just hitting the enter key:
Using Multithreading
Set USE_POOL_FOR_COMPUTATION = False to perform CPU-intensive processing by direct call instead of submitting the work to a multiprocessing pool:
#!/usr/bin/env python3
import threading
import multiprocessing
import cv2
import dlib
USE_POOL_FOR_COMPUTATION = True
class Producer:
def __init__(self):
# Create shared memory version of a numpy array:
self._frame = None
self._condition = threading.Condition()
self._running = True
# The latest frame number retrieved
self._latest_frame_number = 0
def run(self, cap):
while self._running:
ret, self._frame = cap.read()
if not ret:
self._running = False
else:
self._latest_frame_number += 1
with self._condition:
self._condition.notify_all()
def stop(self):
self._running = False
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running or self._latest_frame_number >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
return (self._latest_frame_number, None if self._latest_frame_number < sequence_number else self._frame)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
"""
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_haar_cascade, args=(frame,))
else:
frame = process_haar_cascade(frame)
"""
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer, pool):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
if USE_POOL_FOR_COMPUTATION:
frame = pool.apply(process_dlib_hog, args=(frame,))
else:
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
pool = multiprocessing.Pool(2) if USE_POOL_FOR_COMPUTATION else None
# Pass pool for CPU-Intensive work:
consumer1_thread = threading.Thread(target=haar_cascade, args=(producer, pool))
consumer1_thread.start()
consumer2_thread = threading.Thread(target=dlib_hog, args=(producer, pool))
consumer2_thread.start()
cap = cv2.VideoCapture(0)
producer_thread = threading.Thread(target=producer.run, args=(cap,))
producer_thread.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_thread.join()
consumer1_thread.join()
consumer2_thread.join()
cap.release()
cv2.destroyAllWindows()
if USE_POOL_FOR_COMPUTATION:
pool.close()
pool.join()
if __name__ == '__main__':
main()
Using Multiprocessing
The multiprocessing.RawArray that is used to hold the sharable frame must be allocated before the consumer process is run so that all processes have access to this array. This requires knowing in advance how large an array to create:
#!/usr/bin/env python3
import multiprocessing
import ctypes
import cv2
import numpy as np
import dlib
class Producer:
def __init__(self):
# Discover how large a framesize is by getting the first frame
cap = cv2.VideoCapture(0)
ret, frame = cap.read()
if ret:
self._shape = frame.shape
frame_size = self._shape[0] * self._shape[1] * self._shape[2]
self._shared_array = multiprocessing.RawArray(ctypes.c_ubyte, frame_size)
else:
self._arr = None
cap.release()
self._condition = multiprocessing.Condition()
self._running = multiprocessing.RawValue('i', 1)
# The latest frame number retrieved
self._latest_frame_number = multiprocessing.RawValue('i', 0)
self._lock = multiprocessing.Lock()
def run(self):
cap = cv2.VideoCapture(0)
while self._running.value:
ret, frame = cap.read()
if not ret:
self._running.value = 0
with self._condition:
self._condition.notify_all()
cap.release()
break
with self._lock:
self._latest_frame_number.value += 1
# np array to shared_array
temp = np.frombuffer(self._shared_array, dtype=frame.dtype)
temp[:] = frame.flatten(order='C')
with self._condition:
self._condition.notify_all()
def stop(self):
self._running.value = 0
def get_frame(self, sequence_number):
with self._condition:
# We block until we find a frame sequence number >= sequence_number.
self._condition.wait_for(lambda: not self._running.value or self._latest_frame_number.value >= sequence_number)
# Even after the stop method has been called and we are no longer running,
# there could still be an unprocessed frame. But when we are called again, the current
# frame number will be < the expected frame number:
if self._latest_frame_number.value < sequence_number:
return (self._latest_frame_number.value, None)
with self._lock:
# Convert to np array:
return self._latest_frame_number.value, np.ctypeslib.as_array(self._shared_array).reshape(self._shape)
def process_haar_cascade(frame):
classifier = cv2.CascadeClassifier('haarcascade_frontalface2.xml')
faces = classifier.detectMultiScale(frame)
for result in faces:
x, y, w, h = result
x1, y1 = x + w, y + h
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def haar_cascade(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'haar_cascade missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
cv2.imshow('input', frame) # Unmodified frame
# Since I don't have required xml file, just skip processing:
#frame = process_haar_cascade(frame)
cv2.imshow('harr-cascade', frame)
def process_dlib_hog(frame):
detector = dlib.get_frontal_face_detector()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray, 1) # result
#to draw faces on image
for result in faces:
x = result.left()
y = result.top()
x1 = result.right()
y1 = result.bottom()
cv2.rectangle(frame, (x, y), (x1, y1), (0, 0, 255), 2)
return frame
def dlib_hog(producer):
last_sequence_number = 0
while True:
expected = last_sequence_number + 1
sequence_number, frame = producer.get_frame(expected)
if frame is None:
break
cv2.waitKey(1) # allow window to update
if sequence_number != expected:
print(f'dlib_hog missed frames {expected} to {sequence_number-1}', flush=True)
last_sequence_number = sequence_number
frame = process_dlib_hog(frame)
cv2.imshow('dlib-hog', frame)
def main():
producer = Producer()
# Pass pool for CPU-Intensive work:
consumer1_process = multiprocessing.Process(target=haar_cascade, args=(producer,))
consumer1_process.start()
consumer2_process = multiprocessing.Process(target=dlib_hog, args=(producer,))
consumer2_process.start()
producer_process = multiprocessing.Process(target=producer.run)
producer_process.start()
input('Hit enter to terminate:\n')
producer.stop()
producer_process.join()
consumer1_process.join()
consumer2_process.join()
if __name__ == '__main__':
main()
Hobby script kiddo here
I'm trying to make a bot that can beat humans at reaction quizes (such as this one). However, I think my code be more efficient and respond quicker.
The code I have below averages around 130 ms respond time.
import pandas as pd
import cv2
from PIL import Image, ImageGrab
import time
import pyautogui
import mouse
abc123=0
time.sleep(0)
while abc123==0:
im1 = ImageGrab.grab(bbox=(50, 150, 700, 400)) #x, y, w, h
img_np = np.array(im1)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
break
index=["color", "color_name", "hex", "R", "G", "B"]
csv = pd.read_csv(r'D:\code\colors1.csv', names=index, header=None)
clicked = False
r = g = b = xpos = ypos = 0
def recognize_color(R,G,B):
minimum = 10000
for i in range(len(csv)):
d = abs(R- int(csv.loc[i,"R"])) + abs(G- int(csv.loc[i,"G"]))+ abs(B- int(csv.loc[i,"B"]))
if(d<=minimum):
minimum = d
cname = csv.loc[i,"color_name"]
return cname
def mouse_click(event, x, y, flags, param):
if True:
global b,g,r,xpos,ypos, clicked
clicked = True
xpos = x
ypos = y
b,g,r = frame[y,x]
b = int(b)
g = int(g)
r = int(r)
cv2.namedWindow('Color Recognition App')
cv2.setMouseCallback('Color Recognition App', mouse_click)
run_once = 0
pyautogui.click(x=200, y=150)
time.sleep(0)
while(1):
im1 = ImageGrab.grab(bbox=(50, 150, 700, 400)) #x, y, w, h
img_np = np.array(im1)
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
mouse.move(1,1, absolute=False)
mouse.move(-1,-1, absolute=False)
cv2.imshow("Color Recognition App",frame)
if (clicked):
# print(g)
if g in range(200,250):
pyautogui.click(x=470, y=300)
clicked=False
#Break the loop when user hits 'esc' key
if cv2.waitKey(20) & 0xFF ==27:
break
cv2.destroyAllWindows()
csv = pd.read_csv(r'D:\code\colors1.csv', names=index, header=None)
requires this file -> https://file.io/mXCore9FqusW
Played around a little, made one that averages 60ms:
import PIL.ImageGrab
import mouse
while True:
rgb = PIL.ImageGrab.grab().load()[75,180]
rgb2=(78, 221, 113)
if rgb == rgb2:
mouse.click()
im trying to make a color detection system with python opencv this is what i have done so far. im still new to python so please help me thank you
basically what should happen is that when the user opens this application the camera open and when he clicks anywhere on the window it will tell the name of the color
from tkinter import *
from tkinter import ttk
import cv2
import numpy as np
import webcolors
def click_event(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
blue = cap[y, x, 0]
green = cap[y, x, 1]
red = cap[y, x, 2]
font = cv2.FONT_HERSHEY_SIMPLEX
#colourcode = str(blue) + ", " + str(green) + ", " + str(red)
colourcode2 = (red, green, blue)
cv2.imshow('frame', frame)
def closest_colour(requested_colour):
min_colours = {}
for key, name in webcolors.css3_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_colour[0]) ** 2
gd = (g_c - requested_colour[1]) ** 2
bd = (b_c - requested_colour[2]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
def get_colour_name(requested_colour):
try:
closest_name = actual_name = webcolors.rgb_to_name(requested_colour)
except ValueError:
closest_name = closest_colour(requested_colour)
actual_name = None
return actual_name, closest_name
#print(colourcode2)
requested_colour = colourcode2
actual_name, closest_name = get_colour_name(requested_colour)
#print("colour name:", closest_name)
cv2.putText(img, closest_name, (0, 50), font, 1, 255, 2)
cap = cv2.VideoCapture(0);
while True:
ret, frame = cap.read()
cv2.imshow('frame', frame)
if cv2.waitKey(40) == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()
I don't know what is your problem but using your code I create version which works for me
I use cv2.setMouseCallback to assign your function to mouse click. It gets pixel from frame, not cap. Pixel is a list/tuple of (B,G,R) so I revere it list[::-1]. After I get name of color I assign it to external/global variable, not put on frame. In main loop I use this name to put on frame before it will display it.
I also use EVENT_LBUTTONUP to remove text when I release mouse button.
elif event == cv2.EVENT_LBUTTONUP:
closest_name = ''
If you remove above lines then it will keep text when you release mouse button.
import cv2
import webcolors
# --- functions ---
def closest_colour(requested_colour):
min_colours = {}
for key, name in webcolors.css3_hex_to_names.items():
r_c, g_c, b_c = webcolors.hex_to_rgb(key)
rd = (r_c - requested_colour[0]) ** 2
gd = (g_c - requested_colour[1]) ** 2
bd = (b_c - requested_colour[2]) ** 2
min_colours[(rd + gd + bd)] = name
return min_colours[min(min_colours.keys())]
def get_colour_name(requested_colour):
try:
closest_name = actual_name = webcolors.rgb_to_name(requested_colour)
except ValueError:
closest_name = closest_colour(requested_colour)
actual_name = None
return actual_name, closest_name
def click_event(event, x, y, flags, param):
global closest_name # inform function to assign to global/external variable instead of creating local one
if event == cv2.EVENT_LBUTTONDOWN:
#B, G, R = frame[x, y]
#colour = (R, G, B) # reverse values
colour = frame[y,x][::-1] # reverse values
actual_name, closest_name = get_colour_name(colour)
print(actual_name, closest_name)
elif event == cv2.EVENT_LBUTTONUP:
closest_name = ''
# --- main ---
font = cv2.FONT_HERSHEY_SIMPLEX
closest_name = '' # create global variable at start
cap = cv2.VideoCapture(0);
cv2.namedWindow('frame')
cv2.setMouseCallback('frame', click_event)
while True:
ret, frame = cap.read()
if closest_name:
#print(closest_name)
cv2.putText(frame, closest_name, (10, 30), font, 1, (255,255,255), 2)
cv2.imshow('frame', frame)
if cv2.waitKey(40) == 27:
break
cv2.waitKey(0)
cv2.destroyAllWindows()
cap.release()
In all GUIs when you press button then it creates single event EVENT_LBUTTONDOWN - it doesn't generate it again and again when you keep button pressed. So function click_event is executed only once and your version puts text only on one frame but few miliseconds later main loop gets new frame and displays it without text - so finally you don't see text on frame.
I’m trying to do a GUI where the user is gonna control the buttons by eyeblinking. Basically, a short blink should emulate pressing the keyboard key Tab (to move from one button to another) and a long blink should emulate pressing the key Space (to enter in the selected button).
The idea is that both processes, the window and the eyeblink detection system, run at the same time. So here I get all the problems: as they are both while loops I cannot run them at the same time.
In the code I attach, I simplify this by opening first the main window and afterwards clicking the button Start to run the eyeblink system. With pyautogui.press() I suppose to emulate the keyboard pressing in the main window. However, when the eyeblink detection system is working, the main window is no longer accessible (you cannot press anything).
I have tried to evoke the blink function every frame instead of an endless loop, but it’s too slow and not able to properly detect the blinks. I’ve also tried multiprocessing and ‘Python quits unexpectadly’, no error shown so not sure what’s going on (the code I used to try this is at the end commented). I also tried threading but in a simple way an no error but nothing appears either (again, the code I used to try this is at the end commented).
Here I attach the link to the files (.mp3, .xml, .py):
https://drive.google.com/drive/folders/1U2uwHXzl2MtSTlAKw1L68L3xcRmelP2d?usp=sharing
I’ve just started using Python so my knowledge is not high, I’m running out of time and I’m stuck at this point… So any help would be welcome!! Thanks in advance ;)
MacOs
Python 2.7
OpenCV 3.4
Tkinter (I just chose it because it is easy to handle yet I’m open to change if it’s neccessary)
# Ventana+Blink
from Tkinter import *
import numpy as np
import cv2
# To emulate a keyboard pressing
import pyautogui
import time
# To play the sounds
import subprocess
# from Blink import funcion_blink
# from multiprocessing import Process
# import threading
def Onbutton_clicked():
# while True:
# Repeating 2 times the sound
for x in range (0,2):
subprocess.call(['afplay', 'alarm2.mp3'])
def Onbutton2_clicked():
# Repeating 1 times the sound
for x in range (0,1):
subprocess.call(['afplay', 'sound.mp3'])
def execute_func1():
print('enter\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'unconvinced.mp3'])
def execute_func2():
print('tab\n')
pyautogui.press('tab') # press the Tab key
for x in range (1,2):
subprocess.call(['afplay', 'case-closed.mp3'])
def execute_func3():
print('space\n')
pyautogui.press('space') # press the Space key
for x in range (1,2):
subprocess.call(['afplay', 'cheerful.mp3'])
# ----- Eyeblink detection system -----
def funcion_blink():
#XML classifiers should be in the folder with this file
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
det = 0
n = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(100, 100),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(
roi_gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30),
flags = cv2.CASCADE_SCALE_IMAGE
)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
face_img = gray[x:x + w, y:y + h]
face_res = cv2.resize(face_img, (100, 100), interpolation=cv2.INTER_CUBIC)
eye_reg = face_res[15:85, 20:50]
cv2.rectangle(frame, (x+15*w/100, y + 2*h / 10), (x + w*85/100, y + (5 * h / 10)), (0, 0, 255), 2)
if (det < 10):
tmpl_eyes = eye_reg
det = det + 1
print('template acquired\n')
elif (det == 10):
# template matching
wt, ht = tmpl_eyes.shape[::-1]
#res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCORR_NORMED)
res_templ = cv2.matchTemplate(eye_reg, tmpl_eyes, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_templ)
# print(max_val, n)
#value 0.92 should be adapted to the conditions and camera position
if (max_val>0.90):
n=n+1
else:
if (n>=12):
execute_func1()
#here should go the code that triggers some action when the person blinks??
elif (n>=6):
execute_func2()
elif (n>=3):
execute_func3()
n = 0
print(max_val, n)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
cv2.destroyAllWindows()
video_capture.release()
# ---- Main window ----
def main_window():
root= Tk()
root.geometry('700x700')
# Create the buttons of the main window
button=Button(root, text='alarm', command=Onbutton_clicked)
button.bind('<Return>', Onbutton_clicked)
button.pack()
button2=Button(root, text='extra', command=Onbutton2_clicked)
button2.bind('<Return>', Onbutton2_clicked)
button2.pack()
# By pressing this button we start running the eyeblink detection system
button3=Button(root, text='Start', command=funcion_blink)
button3.bind('<Button-1>', funcion_blink)
button3.pack()
# To maintain the window until you close it
root.mainloop()
# Execute the main window
main_window()
# ---- Trials ----
# while True:
# main_window()
# funcion_blink()
# It just plays one function and when it finishes it plays the next one
# Multiprocessing
# if __name__ == '__main__':
# Process(target=main_window).start()
# Process(target=funcion_blink).start()
# PYTHON QUITS UNEXPECTADLY
# Threading
# p1 = threading.Thread(target=main_window, args=())
# p2 = threading.Thread(target=funcion_blink, args=())
# p1.start()
# p2.start()
I am creating a countdown to Christmas using wxpython as a GUI. I tried testing the script with the secondsLeft so that the seconds would be printed on the canvas. The program draws the seconds but they don't change as they're supposed to.
This is my code:
"""A simple contdown for Christmas using wxpython as a GUI"""
import wx
import datetime
#Setting our current time.
currentTime = datetime.datetime.now()
now = list(str(currentTime))
now = now[:19]
now = ''.join(now)
#Computing time left.
while now != '2015-12-25 1:00:00':
if currentTime.month == 11:
daysLeft = (30 - currentTime.day) + 24
else:
daysLeft = 25 - currentTime.day
if currentTime.hour >= 12:
hoursLeft = 25 - currentTime.hour
else:
hoursLeft = (12 - currentTime.hour) + 13
minutesLeft = currentTime.minute
secondsLeft = currentTime.second
currentTime = datetime.datetime.now()
now = list(str(currentTime))
now = now[:19]
now = ''.join(now)
class AFrame(wx.Frame):
def __init__ (self, parent=None, id=-1, title=None):
wx.Frame.__init__(self, parent, id, title, size=(400, 400))
self.statbmp = wx.StaticBitmap(self)
self.draw_image()
self.Refresh()
def draw_image(self):
# select the width and height of the blank bitmap
# must fit frame
w, h = 400, 400
# create the blank bitmap as background
draw_bmp = wx.EmptyBitmap(w, h)
#create canvas.
canvas = wx.MemoryDC(draw_bmp)
#fill the canvas with white
canvas.SetBrush(wx.Brush('white'))
canvas.Clear()
#get text dimentions.
tw, th = canvas.GetTextExtent(str(secondsLeft))
#draw the text.
canvas.DrawText(str(secondsLeft), (w - tw) / 2, (h - th) / 2 )
self.statbmp.SetBitmap(draw_bmp)
app = wx.App(0)
AFrame(title="Coundown to Christmas").Show()
app.MainLoop() #Starts frame.
Use wx.Timer to run function every 1000ms (1s).
Use datetime (and deltatime) to get left seconds.
#!/usr/bin/env python
import wx
import datetime
class AFrame(wx.Frame):
def __init__ (self):
wx.Frame.__init__(self, parent=None, id=-1, title="Coundown to Christmas", size=(400, 400))
# 2015.12.25 1:00:00
self.future_time = datetime.datetime(2015, 12, 25, 1, 0, 0)
# create timer
self.timer = wx.Timer(self)
# assign draw_image to timer
self.Bind(wx.EVT_TIMER, self.draw_image, self.timer)
# start timer
self.timer.Start(1000)
self.statbmp = wx.StaticBitmap(self)
self.draw_image()
self.Refresh()
self.Show()
def draw_image(self, event=None): # event required by timer
# get deltatime
secondsLeft = self.future_time - datetime.datetime.now()
# get seconds, round to integer,
secondsLeft = int(secondsLeft.total_seconds())
if secondsLeft <= 0:
secondsLeft = 0
if self.timer.IsRunning():
self.timer.Stop()
# convert to text
secondsLeft = str(secondsLeft)
# select the width and height of the blank bitmap
# must fit frame
w, h = 400, 400
# create the blank bitmap as background
draw_bmp = wx.EmptyBitmap(w, h)
#create canvas.
canvas = wx.MemoryDC(draw_bmp)
#fill the canvas with white
canvas.SetBrush(wx.Brush('white'))
canvas.Clear()
#get text dimentions.
tw, th = canvas.GetTextExtent(secondsLeft)
#draw the text.
canvas.DrawText(secondsLeft, (w - tw) / 2, (h - th) / 2 )
self.statbmp.SetBitmap(draw_bmp)
app = wx.App()
AFrame()
app.MainLoop()
Tart it up a bit furas!
and no this answer should not be accepted as it is ripping off Furas to whom any credit should go. Rolf
#!/usr/bin/env python
import wx
import datetime
class AFrame(wx.Frame):
def __init__ (self):
wx.Frame.__init__(self, parent=None, id=-1, title="Countdown to Christmas", size=(400, 400))
# 2015.12.25 1:00:00
self.future_time = datetime.datetime(2015, 12, 25, 1, 0, 0)
# create timer
self.timer = wx.Timer(self)
# assign draw_image to timer
self.Bind(wx.EVT_TIMER, self.draw_image, self.timer)
# start timer
self.timer.Start(1000)
self.statbmp = wx.StaticBitmap(self)
self.draw_image()
self.Refresh()
self.Show()
def draw_image(self, event=None): # event required by timer
# get deltatime
secondsLeft = self.future_time - datetime.datetime.now()
# get seconds, round to integer,
secondsLeft = int(secondsLeft.total_seconds())
if secondsLeft <= 0:
secondsLeft = 0
if self.timer.IsRunning():
self.timer.Stop()
# convert to text
d, s = divmod(secondsLeft,86400)
h, s = divmod(s,3600)
m, s = divmod(s, 60)
timestamp = 30*" "
if d> 0:
time_stamp = "%02d Days Hrs:%02d Mins:%02d Secs:%02d" % (d,h,m,s)
elif h > 0:
time_stamp = "Hrs:%02d Mins:%02d Secs:%02d" % (h,m,s)
else:
time_stamp = "Mins:%02d Secs:%02d" % (m,s)
# select the width and height of the blank bitmap
# must fit frame
w, h = 400, 400
# create the blank bitmap as background
draw_bmp = wx.EmptyBitmap(w, h)
#create canvas.
canvas = wx.MemoryDC(draw_bmp)
#fill the canvas with white
canvas.SetBrush(wx.Brush('white'))
canvas.Clear()
#get text dimentions.
tw, th = canvas.GetTextExtent(time_stamp)
#draw the text.
canvas.DrawText(time_stamp, (w - tw) / 2, (h - th) / 2 )
self.statbmp.SetBitmap(draw_bmp)
app = wx.App()
AFrame()
app.MainLoop()