Tkinter display other windows - python

Is it possible to display a window which has been made by OpenCV using Tkinter? I want to open it using Tkinter so that I can provide more GUI functions. Has this been done before? I checked google and SO itself but did not find anything.
So as kobejohn suggested, I am attaching the code for the camera capture and display.
import cv2
import urllib
import numpy as np
import subprocess
stream=urllib.urlopen('IP Address')
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow('i',i)
if cv2.waitKey(1) ==27:
exit(0)

This code is based on the discussion in comments. It doesn't put the opencv window into tkinter. It just takes opencv images and puts them into tkinter.
Prakhar, I don't have an available IP camera so can you try this? I have confirmed that it works with the USB code at the bottom of this answer.
Basically, I just inserted your jpg reading code into a simplified version of this SO question to get the code below. It uses a 2-step conversion: bytes --> opencv image --> tkinter image. There may be a more efficient way to convert directly from the bytes to a tkinter image but you can fix that if performance becomes a problem.
IP Camera
import cv2
import numpy as np
import PIL.Image
import PIL.ImageTk
import Tkinter as tk
import urllib
stream = urllib.urlopen('IP Address')
bytes_ = ''
def update_image(image_label):
global bytes_
bytes_ += stream.read(1024)
a = bytes_.find('\xff\xd8')
b = bytes_.find('\xff\xd9')
if (a != -1) and (b != -1):
jpg = bytes_[a:b+2]
bytes_ = bytes_[b+2:]
cv_image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
cv2.CV_LOAD_IMAGE_COLOR)
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
pil_image = PIL.Image.fromarray(cv_image)
tk_image = PIL.ImageTk.PhotoImage(image=pil_image)
image_label.configure(image=tk_image)
image_label._image_cache = tk_image # avoid garbage collection
root.update()
def update_all(root, image_label):
if root.quit_flag:
root.destroy() # this avoids the update event being in limbo
else:
update_image(image_label)
root.after(1, func=lambda: update_all(root, image_label))
if __name__ == '__main__':
root = tk.Tk()
setattr(root, 'quit_flag', False)
def set_quit_flag():
root.quit_flag = True
root.protocol('WM_DELETE_WINDOW', set_quit_flag)
image_label = tk.Label(master=root) # label for the video frame
image_label.pack()
root.after(0, func=lambda: update_all(root, image_label))
root.mainloop()
USB Camera
*edit - I have confirmed that the code below works to take video from a USB camera using opencv and send it to a tkinter window. So hopefully the above code will work for your ip camera.
import cv2
import PIL.Image
import PIL.ImageTk
import Tkinter as tk
def update_image(image_label, cv_capture):
cv_image = cv_capture.read()[1]
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
pil_image = PIL.Image.fromarray(cv_image)
tk_image = PIL.ImageTk.PhotoImage(image=pil_image)
image_label.configure(image=tk_image)
image_label._image_cache = tk_image # avoid garbage collection
root.update()
def update_all(root, image_label, cv_capture):
if root.quit_flag:
root.destroy() # this avoids the update event being in limbo
else:
update_image(image_label, cv_capture)
root.after(10, func=lambda: update_all(root, image_label, cv_capture))
if __name__ == '__main__':
cv_capture = cv2.VideoCapture()
cv_capture.open(0) # have to use whatever your camera id actually is
root = tk.Tk()
setattr(root, 'quit_flag', False)
def set_quit_flag():
root.quit_flag = True
root.protocol('WM_DELETE_WINDOW', set_quit_flag) # avoid errors on exit
image_label = tk.Label(master=root) # the video will go here
image_label.pack()
root.after(0, func=lambda: update_all(root, image_label, cv_capture))
root.mainloop()

Related

Best way if applicable to implement multiprocessing to this python image handler?

Hope you all are well!
Spent the last couple weeks researching image processing for my tkinter application and came up with this script:
import contextlib
import tkinter as tk
from PIL import Image, ImageTk, ImageSequence
import requests
from itertools import cycle
class ImageLabel(tk.Label):
"""
A Label that displays images, and plays them if they are gifs
:im: A PIL Image instance or a string filename
"""
def load(self, url, width, height=None):
request = requests.get(url, stream=True).raw
im = Image.open(request)
if (height != None):
size = (width, height)
else:
size = (width, get_relative_height(im, width))
try:
self.delay = im.info['duration']
except Exception:
self.delay = 100
global frames_complete
frames_complete = False
self.frames_chunk = cycle(process_frames(im, size))
if frames_complete:
self.next_frame()
def next_frame(self):
self.config(image=next(self.frames_chunk))
self.after(self.delay, self.next_frame)
def unload(self):
self.destroy()
def get_relative_height(source, mywidth):
_, height = source.size
wpercent = (mywidth/float(height))
return int((float(height)*float(wpercent)))
def process_frames(im, size): # resize and arrange gifs
frames_chunk = []
mode = analyseImage(im)["mode"]
last_frame = im.convert("RGBA")
for i, frame in enumerate(ImageSequence.Iterator(im)):
frame_image = Image.new("RGBA", frame.size)
if mode == "partial":
frame_image.paste(last_frame)
print(f'Processing frame {i}')
frame_image.paste(frame, (0, 0), frame.convert("RGBA"))
frame_image.thumbnail(size, Image.BICUBIC)
new_frame = ImageTk.PhotoImage(frame_image)
frames_chunk.append(new_frame)
print("appended frame to frames_chunk")
print("frames completed")
global frames_complete
frames_complete = True
return frames_chunk
def analyseImage(im):
"""
Pre-process pass over the image to determine the mode (full or additive).
Necessary as assessing single frames isn't reliable. Need to know the mode
before processing all frames.ll
"""
results = {
"size": im.size,
"mode": "full",
}
with contextlib.suppress(EOFError):
while True:
if im.tile:
tile = im.tile[0]
update_region = tile[1]
update_region_dimensions = update_region[2:]
if update_region_dimensions != im.size:
results["mode"] = "partial"
break
im.seek(im.tell() + 1)
return results
# test:
root = tk.Tk()
lbl = ImageLabel(root)
lbl.pack()
lbl.load("https://www.saic.edu/~anelso13/gif/images/cat14.gif", 300)
root.mainloop()
running this from inside a tkinter app slows the app down and also freezes the GUI until the frames are finished processing.
This class works alright when it's alone, but there are two major issues I've been struggling to solve,
The process_frames function itterates frame by frame and is very slow. in the app I'm working on I instance this class two times and it takes about 10 seconds to process and resize every frame. I ran the function inside a thread but it didn't seem to improve speed whatsoever.
2: The Main tkinter application freezes until both sets of frames process. I've looked at a few resources and tried a few implementations (tkinter: preventing main loop from freezing) and here.
I have a thread running in the program already which does work as expected but using the same method for processing the frames does not work.
Any and all help is greatly appreciated!

Python cv2 video resizing issue

Im trying to resize the video to full screen but its not working? how to fix it?
from asyncore import loop
from distutils import command
import tkinter
from tkinter import *
from tkvideo import tkvideo
from tkinter import ttk
from tkinter import font
import PIL
from PIL import ImageTk
from PIL import Image
from StudentDetails import *
from entry import EntryLogsSystem
from main import *
import os, sys, subprocess
from training import Training
from recognition import Recognition
from datetime import datetime
import time
from entry import *
import os
import tkinter as tk
from tkVideoPlayer import TkinterVideo
import matplotlib
import matplotlib.pyplot as plt
class home:
def __init__(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture('v2.mp4')
if (cap.isOpened()== False):
print("Error opening video file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
cv2.imshow('Smart Access Face Recognition Portal', frame)
frame = cv2.resize(frame, (1920, 1080))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
os.system('python loading.py')
# Resource: https://coding-engineer.com/2020/09/09/python-opencv-reading-a-video/
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
i tried this code but nothing works. i tried to resize using cv2.resize but it resize it to a specific ration not full screen even when i increase the values.
as i tried this code too but nothing worked too
class home:
def _init_(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture("v2.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (3000,3000))
cv2.imshow("video", frame)
if cv2.waitKey(10) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
You can create a namedWindow with fullscreen attribute, then show your image in it.
Creation of window:
cv2.namedWindow("winname", cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty("winname", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
Displaying image:
cv2.imshow("winname", frame)
It'll automatically resize the image to your window width/height.

How to display videos on tkinter using sequence of images

I am trying to build a GUI that displays sequence of images as videos. The images are numpy arrays.
The code is working when I try to display one image at a time but it crashes when I try to run them as a sequence.
The code:
from tkinter import *
from scipy.io import loadmat
from PIL import ImageTk, Image
import time
data = loadmat('DepthFrames.mat')['DepthFrames'].squeeze(axis=0)
print(data.shape)
counter = 0
root = Tk()
image = ImageTk.PhotoImage(image = Image.fromarray(data[counter]))
root.title("WUDU VIDEOS LABEL TOOL")
myLabel = Label(root, image = image)
myLabel.grid(row = 0)
def changeImg():
global counter
counter +=1
print(counter)
image = ImageTk.PhotoImage(image = Image.fromarray(data[counter]))
myLabel.configure(image = image)
myLabel.image = image
def playVideo():
for i in range(10):
image = ImageTk.PhotoImage(image = Image.fromarray(data[i]))
myLabel.configure(image = image)
myLabel.image = image
time.sleep(0.03333)
my_Button = Button(text = "Play video",command = playVideo)
my_Button.grid(row = 1)
root.mainloop()
time.sleep blocks the main thread of tkinter. Your code will freeze the GUI until the for loop is completed and the image will be shown as the last image. For more details, see this post.
You need to use the after method. Something like this:
def playVideo(frame=0):
try:
image = ImageTk.PhotoImage(image = Image.fromarray(data[frame]))
except IndexError:
return
myLabel.configure(image = image)
myLabel.image = image
root.after(33, playVideo, frame+1)

showing video on the entire screen using OpenCV and Tkiner

I'm trying to create a GUI for playing a video that fills up the entire screen, while the button for Snapshot is still visible at the bottom.
Right now, What i manage to do is just set the app window itself to fullscreen, resulting a small sized video playing at the top and a huge "snapshot" button at the button.
Is there a way to make the video fill up the entire screen?
thanks!
from PIL import Image, ImageTk
import Tkinter as tk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('Cat Walking.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(padx=10, pady=10)
# create a button, that when pressed, will take the current frame and save it to file
btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
btn.pack(fill="both", expand=True, padx=10, pady=10)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
self.current_image = Image.fromarray(cv2image) # convert image for PIL
imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter
self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector
self.root.attributes("-fullscreen",True)
#self.oot.wm_state('zoomed')
self.panel.config(image=imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
def take_snapshot(self):
""" Take snapshot and save it to the file """
ts = datetime.datetime.now() # grab the current timestamp
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S")) # construct filename
p = os.path.join(self.output_path, filename) # construct output path
self.current_image.save(p, "JPEG") # save image as jpeg file
print("[INFO] saved {}".format(filename))
def destructor(self):
""" Destroy the root object and release all resources """
print("[INFO] closing...")
self.root.destroy()
self.vs.release() # release web camera
cv2.destroyAllWindows() # it is not mandatory in this application
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", default="./",
help="path to output directory to store snapshots (default: current folder")
args = vars(ap.parse_args())
# start the app
print("[INFO] starting...")
pba = Application(args["output"])
pba.root.mainloop()
It's not a hard task if you don't care about execution time! We knew that resizing of an image isn't a rocket science for common user, but under the hood it takes some time to resize each frame. And if you really wonder about time and options - there're many options to play around from numpy/scipy to skimage/skvideo.
But let's try to do something with your code "as is" so we have two options to play with: cv2 and Image. For testing I grabbed 20 secs of "Keyboard Cat" video from youtube (480p) and resize each frame upto 1080p, and GUI looks like this (fullscreen 1920x1080):
Resize Methods / timeit elapsed time of showing frames:
cv2.resize() / ~81.377 s.
Image.resize() / ~82.98 s.
As you see - no big difference between theese two so here's a code (only Application class and video_loop changed):
#imports
try:
import tkinter as tk
except:
import Tkinter as tk
from PIL import Image, ImageTk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.root.attributes("-fullscreen", True)
# getting size to resize! 30 - space for button
self.size = (self.root.winfo_screenwidth(), self.root.winfo_screenheight() - 30)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(fill='both', expand=True)
# create a button, that when pressed, will take the current frame and save it to file
self.btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
self.btn.pack(fill='x', expand=True)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
self.current_image = Image.fromarray(cv2image) #.resize(self.size, resample=Image.NEAREST) # convert image for PIL
self.panel.imgtk = ImageTk.PhotoImage(image=self.current_image)
self.panel.config(image=self.panel.imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
But you knew - do such a things "on fly" isn't a good idea, so lets try to resize all frames first and then do all stuff(only Application class and video_loop method changed, resize_video method added):
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
...
# init frames
self.frames = self.resize_video()
self.video_loop()
def resize_video(self):
temp = list()
try:
temp_count_const = cv2.CAP_PROP_FRAME_COUNT
except AttributeError:
temp_count_const = cv2.cv.CV_CAP_PROP_FRAME_COUNT
frames_count = self.vs.get(temp_count_const)
while self.vs.isOpened():
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
cv2image = Image.fromarray(cv2image) # convert image for PIL
temp.append(cv2image)
# simple progress print w/o sys import
print('%d/%d\t%d%%' % (len(temp), frames_count, ((len(temp)/frames_count)*100)))
else:
return temp
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
if len(self.frames) != 0:
self.current_image = self.frames.pop(0)
self.panel.imgtk = ImageTk.PhotoImage(self.current_image)
self.panel.config(image=self.panel.imgtk)
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
timeit elapsed time of showing pre-resized frames: ~78.78 s.
As you see - resizing isn't a main problem of your script, but a good option!

Face Detection OpenCV Python GUI

I'm new in the Python GUI world. I'm developing a Python project to detect faces in a webcam and take a picture of the face, that part is already set up. I've been searching and testing codes for the GUI part, I tried with PyQt and PySide but I didn't have succed. I found a code from Adrian at PyImageSearch which have a frame for the webcam and a button to capture the image and use Tkinter for the GUI.
from __future__ import print_function
from PIL import Image
from PIL import ImageTk
import Tkinter as tki
import threading
import datetime
import imutils
import cv2
import os
class PhotoBoothApp:
def __init__(self, vs, outputPath):
# store the video stream object and output path, then initialize
# the most recently read frame, thread for reading frames, and
# the thread stop event
self.vs = vs
self.outputPath = outputPath
self.frame = None
self.thread = None
self.stopEvent = None
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create a button, that when pressed, will take the current
# frame and save it to file
btn = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
btn.pack(side="bottom", fill="both", expand="yes", padx=10,
pady=10)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("PyImageSearch PhotoBooth")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
def videoLoop(self):
# DISCLAIMER:
# I'm not a GUI developer, nor do I even pretend to be. This
# try/except statement is a pretty ugly hack to get around
# a RunTime error that Tkinter throws due to threading
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
# grab the frame from the video stream and resize it to
# have a maximum width of 300 pixels
self.frame = self.vs.read()
self.frame = imutils.resize(self.frame, width=300)
# OpenCV represents images in BGR order; however PIL
# represents images in RGB order, so we need to swap
# the channels, then convert to PIL and ImageTk format
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
# if the panel is not None, we need to initialize it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
except RuntimeError, e:
print("[INFO] caught a RuntimeError")
def takeSnapshot(self):
# grab the current timestamp and use it to construct the
# output path
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, self.frame.copy())
print("[INFO] saved {}".format(filename))
def onClose(self):
# set the stop event, cleanup the camera, and allow the rest of
# the quit process to continue
print("[INFO] closing...")
self.stopEvent.set()
self.vs.stop()
self.root.quit()
Now the part for the detection face I think it should go in the videoLoop function and I added the face detection code to it so the function looks like this
try:
# keep looping over frames until we are instructed to stop
while not self.stopEvent.is_set():
# grab the frame from the video stream and resize it to
# have a maximum width of 300 pixels
self.frame = self.vs.read()
self.frame = imutils.resize(self.frame, width=300)
# OpenCV represents images in BGR order; however PIL
# represents images in RGB order, so we need to swap
# the channels, then convert to PIL and ImageTk format
detector = cv2.CascadeClassifier("C:\Proyectos\Python\GUI\tkinter-photo-booth\haarcascade_frontalface_default.xml")
image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
# load the cat detector Haar cascade, then detect cat faces
# in the input image
rects = detector.detectMultiScale(image, scaleFactor=1.5,
minNeighbors=5, minSize=(30, 30))
# loop over the cat faces and draw a rectangle surrounding each
for (i, (x, y, w, h)) in rects:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
image = Image.fromarray(image)
image = ImageTk.PhotoImage(image)
# if the panel is not None, we need to initialize it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
But when I run the code, the rectangles to frame the face doesn't appear. I don't really know if the code to detect faces should go in the voidLoop function or if it should go in the snapshot function. I've already ask to Adrian in his webpage but I'm searching for extra help. Thanks in advance

Categories