Im trying to resize the video to full screen but its not working? how to fix it?
from asyncore import loop
from distutils import command
import tkinter
from tkinter import *
from tkvideo import tkvideo
from tkinter import ttk
from tkinter import font
import PIL
from PIL import ImageTk
from PIL import Image
from StudentDetails import *
from entry import EntryLogsSystem
from main import *
import os, sys, subprocess
from training import Training
from recognition import Recognition
from datetime import datetime
import time
from entry import *
import os
import tkinter as tk
from tkVideoPlayer import TkinterVideo
import matplotlib
import matplotlib.pyplot as plt
class home:
def __init__(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture('v2.mp4')
if (cap.isOpened()== False):
print("Error opening video file")
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
cv2.imshow('Smart Access Face Recognition Portal', frame)
frame = cv2.resize(frame, (1920, 1080))
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
os.system('python loading.py')
# Resource: https://coding-engineer.com/2020/09/09/python-opencv-reading-a-video/
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
i tried this code but nothing works. i tried to resize using cv2.resize but it resize it to a specific ration not full screen even when i increase the values.
as i tried this code too but nothing worked too
class home:
def _init_(self, root):
self.root = root
self.root.geometry("1530x790+0+0")
self.root.title("Face Recognition System")
self.root.config(bg="black")
cap = cv2.VideoCapture("v2.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.resize(frame, (3000,3000))
cv2.imshow("video", frame)
if cv2.waitKey(10) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
root = Tk()
obj = home(root)
root.mainloop()
You can create a namedWindow with fullscreen attribute, then show your image in it.
Creation of window:
cv2.namedWindow("winname", cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty("winname", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
Displaying image:
cv2.imshow("winname", frame)
It'll automatically resize the image to your window width/height.
Related
I'm trying to create a GUI that will display an image that is taken every 10 seconds and it's processed counter part. I'm having trouble getting the GUI window to update the two images when they get overridden in the file system.
Here's the code I have so far
import cv2
import threading
from imageai.Detection import ObjectDetection
import os
from tkinter import *
import os
from PIL import ImageTk,Image
root = Tk()
canvas = Canvas(root, width = 2000, height = 2000)
canvas.pack()
execution_path = os.getcwd()
inimg = ImageTk.PhotoImage(Image.open(os.path.join(execution_path , "test.jpg")))
outimg = ImageTk.PhotoImage(Image.open(os.path.join(execution_path , "testnew.jpg")))
def capture():
print("Capturing...")
videoCaptureObject = cv2.VideoCapture(0)
ret,frame = videoCaptureObject.read()
cv2.imwrite("test.jpg",frame)
print("Done capturing.")
print("Finding")
totalPersons = 0
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , "test.jpg"), output_image_path=os.path.join(execution_path , "testnew.jpg"))
for eachObject in detections:
if(eachObject["name"] == "person"):
totalPersons += 1
print("Total persons " + str(totalPersons))
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
execution_path = os.getcwd()
detector.setModelPath(os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel("fastest")
def run():
threading.Timer(10.0, run).start()
canvas.create_image(20, 20, anchor=NW, image=inimg)
canvas.create_image(750, 500, anchor=SW, image=outimg)
root.mainloop()
capture()
run()
try to use
root.update_idletasks()
to update tkinter GUI window.
I am trying to cast video from my external cam, that is captured through some aliexpress EasyCap, to my kivy app. One issue I've faced is that crashes with segmentation fault on trying to
texture = Texture.create(size=(frame.shape[0], frame.shape[1]))
I've found that the problem is on kivy's side. It sometimes can't create NPOT textures. So, I've changed it to POT shape and copied what is possible to another numpy array.
flipped = cv2.flip(frame, 0)
buf = np.zeros((512, 512, 3), dtype=np.uint8)
for i in range(min(frame.shape[0], 512)):
for j in range(min(frame.shape[1], 512)):
buf[i, j] = flipped[i, j]
buf = buf.tostring()
texture = Texture.create(size=(512, 512))
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.texture = texture
But it still crashes with the same old segmentation fault on the following line:
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
If it is relevant, cv2.imshow("image", buf) before buf.tostring() show the image correctly.
Here's the original code:
from kivy.app import App
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
import threading
from time import sleep
import numpy as np
class KivyCamera(Image):
def __init__(self, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.fps = 30
self.capture = cv2.VideoCature(0)
threading.Thread(target=self.update).start()
def update(self):
while True:
ret, frame = self.capture.read()
if ret:
buf = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[0], frame.shape[1])
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.texture = texture
sleep(1.0 / self.fps)
class CamApp(App):
def build(self):
return KivyCamera()
if __name__ == "__main__":
CamApp().run()
Few issues with your code:
The GUI updates should always be done in the mainthread as indicated by John Anderson. Hence instead of separate thread, the update() function should be called through Clock schedule.
The size tuple in the Texture.create statement should be reversed and the colorfmt="bgr" should be added. It should be: texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr").
There is no layout defined. Layouts like BoxLayout should be added as a placeholder of Image widget.
Below is the modified working version of your code:
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
class KivyCamera(BoxLayout):
def __init__(self, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.img1=Image()
self.add_widget(self.img1)
self.capture = cv2.VideoCapture(0)
Clock.schedule_interval(self.update, 1.0/33.0)
def update(self, *args):
ret, frame = self.capture.read()
if ret:
buf = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr")
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.img1.texture = texture
class CamApp(App):
def build(self):
return KivyCamera()
if __name__ == "__main__":
CamApp().run()
The while loop is removed and Clock.schedule_interval is used with 1 / 33. steps.
Hope this helps.
Not well documented, but I believe the Texture manipulation (specifically the blit_buffer()) must be done on the main thread. Your update() method is being run in another thread, so use Clock.schedule_once() to call a method that does the Texture manipulation (and the self.texture = texture) back in the main thread.
i want to click a button to start to capture frame and click another button to release the webcam which allows me to redo the process.
but a problem occurs when i run this code below and click the button:
import cv2
...
self.StartVideo.clicked.connect(self.startVideo)
...
def startVideo(self):
self.StartVideo.setDisabled(True)
self.cap = cv2.VideoCapture(0)
self.cap.release()
the system crashed and said Process finished with exit code 139 (interrupted by signal 11: SIGSEGV)
and then i tried this way:
import cv2
...
self.StartVideo.clicked.connect(self.startVideo)
self.StartVideo.clicked.connect(self.closeVideo)
...
def startVideo(self):
from Video_Analysis.video_threads import video_thread
self.videoThread = video_thread()
self.videoThread.start()
def closeVideo(self):
self.videoThread.stop()
Video_Analysis.video_threads.py is:
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtGui, QtCore
import threading
import numpy as np
from time import sleep
import cv2
class video_thread(QtCore.QThread):
# finishSignal_frame = QtCore.pyqtSignal(list)
# finishSignal_mark = QtCore.pyqtSignal(np.ndarray)
def __init__(self, parent=None):
super(video_thread, self).__init__(parent)
self.cap = cv2.VideoCapture(0)
def run(self):
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 320)
self.cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 240)
self.cap.set(cv2.cv.CV_CAP_PROP_FPS, 30)
while True:
ret, frame = self.cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QtGui.QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0],
QtGui.QImage.Format_RGB888)
convertToQtFormat = QtGui.QPixmap.fromImage(convertToQtFormat)
pixmap = QtGui.QPixmap(convertToQtFormat)
def stop(self):
self.cap.release()
self.terminate()
but the problem is same, how can i sovle this?
Thanks a lot!
I'm trying to create a GUI for playing a video that fills up the entire screen, while the button for Snapshot is still visible at the bottom.
Right now, What i manage to do is just set the app window itself to fullscreen, resulting a small sized video playing at the top and a huge "snapshot" button at the button.
Is there a way to make the video fill up the entire screen?
thanks!
from PIL import Image, ImageTk
import Tkinter as tk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('Cat Walking.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(padx=10, pady=10)
# create a button, that when pressed, will take the current frame and save it to file
btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
btn.pack(fill="both", expand=True, padx=10, pady=10)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
self.current_image = Image.fromarray(cv2image) # convert image for PIL
imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter
self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector
self.root.attributes("-fullscreen",True)
#self.oot.wm_state('zoomed')
self.panel.config(image=imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
def take_snapshot(self):
""" Take snapshot and save it to the file """
ts = datetime.datetime.now() # grab the current timestamp
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S")) # construct filename
p = os.path.join(self.output_path, filename) # construct output path
self.current_image.save(p, "JPEG") # save image as jpeg file
print("[INFO] saved {}".format(filename))
def destructor(self):
""" Destroy the root object and release all resources """
print("[INFO] closing...")
self.root.destroy()
self.vs.release() # release web camera
cv2.destroyAllWindows() # it is not mandatory in this application
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", default="./",
help="path to output directory to store snapshots (default: current folder")
args = vars(ap.parse_args())
# start the app
print("[INFO] starting...")
pba = Application(args["output"])
pba.root.mainloop()
It's not a hard task if you don't care about execution time! We knew that resizing of an image isn't a rocket science for common user, but under the hood it takes some time to resize each frame. And if you really wonder about time and options - there're many options to play around from numpy/scipy to skimage/skvideo.
But let's try to do something with your code "as is" so we have two options to play with: cv2 and Image. For testing I grabbed 20 secs of "Keyboard Cat" video from youtube (480p) and resize each frame upto 1080p, and GUI looks like this (fullscreen 1920x1080):
Resize Methods / timeit elapsed time of showing frames:
cv2.resize() / ~81.377 s.
Image.resize() / ~82.98 s.
As you see - no big difference between theese two so here's a code (only Application class and video_loop changed):
#imports
try:
import tkinter as tk
except:
import Tkinter as tk
from PIL import Image, ImageTk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.root.attributes("-fullscreen", True)
# getting size to resize! 30 - space for button
self.size = (self.root.winfo_screenwidth(), self.root.winfo_screenheight() - 30)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(fill='both', expand=True)
# create a button, that when pressed, will take the current frame and save it to file
self.btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
self.btn.pack(fill='x', expand=True)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
self.current_image = Image.fromarray(cv2image) #.resize(self.size, resample=Image.NEAREST) # convert image for PIL
self.panel.imgtk = ImageTk.PhotoImage(image=self.current_image)
self.panel.config(image=self.panel.imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
But you knew - do such a things "on fly" isn't a good idea, so lets try to resize all frames first and then do all stuff(only Application class and video_loop method changed, resize_video method added):
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
...
# init frames
self.frames = self.resize_video()
self.video_loop()
def resize_video(self):
temp = list()
try:
temp_count_const = cv2.CAP_PROP_FRAME_COUNT
except AttributeError:
temp_count_const = cv2.cv.CV_CAP_PROP_FRAME_COUNT
frames_count = self.vs.get(temp_count_const)
while self.vs.isOpened():
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
cv2image = Image.fromarray(cv2image) # convert image for PIL
temp.append(cv2image)
# simple progress print w/o sys import
print('%d/%d\t%d%%' % (len(temp), frames_count, ((len(temp)/frames_count)*100)))
else:
return temp
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
if len(self.frames) != 0:
self.current_image = self.frames.pop(0)
self.panel.imgtk = ImageTk.PhotoImage(self.current_image)
self.panel.config(image=self.panel.imgtk)
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
timeit elapsed time of showing pre-resized frames: ~78.78 s.
As you see - resizing isn't a main problem of your script, but a good option!
Is it possible to display a window which has been made by OpenCV using Tkinter? I want to open it using Tkinter so that I can provide more GUI functions. Has this been done before? I checked google and SO itself but did not find anything.
So as kobejohn suggested, I am attaching the code for the camera capture and display.
import cv2
import urllib
import numpy as np
import subprocess
stream=urllib.urlopen('IP Address')
bytes=''
while True:
bytes+=stream.read(1024)
a = bytes.find('\xff\xd8')
b = bytes.find('\xff\xd9')
if a!=-1 and b!=-1:
jpg = bytes[a:b+2]
bytes= bytes[b+2:]
i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.CV_LOAD_IMAGE_COLOR)
cv2.imshow('i',i)
if cv2.waitKey(1) ==27:
exit(0)
This code is based on the discussion in comments. It doesn't put the opencv window into tkinter. It just takes opencv images and puts them into tkinter.
Prakhar, I don't have an available IP camera so can you try this? I have confirmed that it works with the USB code at the bottom of this answer.
Basically, I just inserted your jpg reading code into a simplified version of this SO question to get the code below. It uses a 2-step conversion: bytes --> opencv image --> tkinter image. There may be a more efficient way to convert directly from the bytes to a tkinter image but you can fix that if performance becomes a problem.
IP Camera
import cv2
import numpy as np
import PIL.Image
import PIL.ImageTk
import Tkinter as tk
import urllib
stream = urllib.urlopen('IP Address')
bytes_ = ''
def update_image(image_label):
global bytes_
bytes_ += stream.read(1024)
a = bytes_.find('\xff\xd8')
b = bytes_.find('\xff\xd9')
if (a != -1) and (b != -1):
jpg = bytes_[a:b+2]
bytes_ = bytes_[b+2:]
cv_image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
cv2.CV_LOAD_IMAGE_COLOR)
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
pil_image = PIL.Image.fromarray(cv_image)
tk_image = PIL.ImageTk.PhotoImage(image=pil_image)
image_label.configure(image=tk_image)
image_label._image_cache = tk_image # avoid garbage collection
root.update()
def update_all(root, image_label):
if root.quit_flag:
root.destroy() # this avoids the update event being in limbo
else:
update_image(image_label)
root.after(1, func=lambda: update_all(root, image_label))
if __name__ == '__main__':
root = tk.Tk()
setattr(root, 'quit_flag', False)
def set_quit_flag():
root.quit_flag = True
root.protocol('WM_DELETE_WINDOW', set_quit_flag)
image_label = tk.Label(master=root) # label for the video frame
image_label.pack()
root.after(0, func=lambda: update_all(root, image_label))
root.mainloop()
USB Camera
*edit - I have confirmed that the code below works to take video from a USB camera using opencv and send it to a tkinter window. So hopefully the above code will work for your ip camera.
import cv2
import PIL.Image
import PIL.ImageTk
import Tkinter as tk
def update_image(image_label, cv_capture):
cv_image = cv_capture.read()[1]
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
pil_image = PIL.Image.fromarray(cv_image)
tk_image = PIL.ImageTk.PhotoImage(image=pil_image)
image_label.configure(image=tk_image)
image_label._image_cache = tk_image # avoid garbage collection
root.update()
def update_all(root, image_label, cv_capture):
if root.quit_flag:
root.destroy() # this avoids the update event being in limbo
else:
update_image(image_label, cv_capture)
root.after(10, func=lambda: update_all(root, image_label, cv_capture))
if __name__ == '__main__':
cv_capture = cv2.VideoCapture()
cv_capture.open(0) # have to use whatever your camera id actually is
root = tk.Tk()
setattr(root, 'quit_flag', False)
def set_quit_flag():
root.quit_flag = True
root.protocol('WM_DELETE_WINDOW', set_quit_flag) # avoid errors on exit
image_label = tk.Label(master=root) # the video will go here
image_label.pack()
root.after(0, func=lambda: update_all(root, image_label, cv_capture))
root.mainloop()