I need to play a .mov video (ProRes4444) with alpha channel in a scene. The scene has a background image and I need to use the alpha channel of the video so it overlays on the background.
If I open the video normally with QMediaPlayer, the alpha channel appears in black.
screen with background pic & video with black alpha:
How can I make the output of the QMediaPlayer (QGraphicsVideoItem) respect the alpha and make the overlay effect possible?
The closest I got to the answer based on online research is code in cpp that I've found that shows the necessity to create a subclass of a QAbstractVideoSurface that receives videoframes converts to ARGB, then forwards those to a QLabel that displays them.
Displaying a video with an alpha channel using qt
I've also tried that unsuccessfully. Is this the right course or I'm just missing something simple on my current code?
EDIT:
Link to files (background image and video .mov)
https://drive.google.com/drive/folders/1LIZzTg1E8wkaD0YSvkkcfSATdlDTggyh?usp=sharing
import sys
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWindow(QMainWindow):
def __init__(self):
super(VideoWindow, self).__init__()
self.setWindowTitle('QMediaPlayer TEST')
self.resize(1920, 1080)
self.vista = QGraphicsView(self)
self.vista.setGeometry(QRect(0, 0, 1920, 1080))
self.scene = QGraphicsScene(self.vista)
self.scene.setSceneRect(0, 0, 1920, 1080)
self.vista.setScene(self.scene)
self.graphvitem1 = QGraphicsVideoItem()
#SET BACKGROUND IMAGE ON SCENE
self.tempImg = QPixmap("/Users/elemental/Desktop/pyvids/fons.jpeg")
self.tempImg = self.tempImg.scaled(self.scene.width(), self.scene.height())
self.graphicsPixmapItem = QGraphicsPixmapItem(self.tempImg)
self.scene.addItem(self.graphicsPixmapItem)
#SET VIDEO 1 WITH LOOP
self.mediaPlayer1 = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer1.setVideoOutput(self.graphvitem1)
self.playlist1 = QMediaPlaylist(self)
self.playlist1.addMedia(QMediaContent(QUrl.fromLocalFile("/Users/elemental/Desktop/pyvids/vida1.mov")))
self.playlist1.setCurrentIndex(1)
self.playlist1.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist1)
self.graphvitem1.setPos(500, 100)
self.graphvitem1.setSize(QSizeF(1000, 500))
self.scene.addItem(self.graphvitem1)
self.mediaPlayer1.play()
self.vista.show()
if __name__ == '__main__':
app = QApplication([])
window = VideoWindow()
window.show()
sys.exit(app.exec_())
From what I can see, QVideoWidget doesn't support alpha channels by default, so it falls back to the "basic" black background.
But, implementation seems possible, by properly subclassing QAbstractVideoSurface.
Consider that the following code is experimental, my knowledge of QMediaPlayer and the Qt video surface isn't that deep (the former is an abstract for multiple platforms and multiple libraries that can behave very differently on different configurations), and I could only test it on two Linux platforms, so I don't know how it behaves under Windows nor MacOS.
The assumption is that the video surface provides a default dedicated QWidget subclass (VideoWidget) unless another class with a suitable setImage is provided, and updates its image whenever the media player requires it.
Note that I only tested it with a couple of videos (including the provided one), and further testing might be required.
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWidget(QWidget):
image = QImage()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def setImage(self, image):
self.image = image
self.update()
def sizeHint(self):
return QSize(640, 480)
def paintEvent(self, event):
qp = QPainter(self)
# ensure that smooth transformation is used while scaling pixmaps
qp.setRenderHints(qp.SmoothPixmapTransform)
# provide compliancy with background set using stylesheets, see:
# https://doc.qt.io/qt-5/stylesheet-reference.html#qwidget-widget
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, qp, self)
# draw the image, scaled to the widget size; if you need fixed sizes
# or keep aspect ratio, implement this (or the widget) accordingly
qp.drawImage(self.rect(), self.image, self.image.rect())
class AlphaVideoDrawer(QAbstractVideoSurface):
def __init__(self, videoWidget=None, widgetOptions=None):
super().__init__()
if videoWidget:
if not hasattr(videoWidget, 'setImage'):
raise NotImplementedError(
'setImage() must be implemented for videoWidget!')
else:
if not isinstance(widgetOptions, dict):
widgetOptions = {}
elif not 'styleSheet' in widgetOptions:
# just a default background for testing purposes
widgetOptions = {'styleSheet': 'background: darkGray;'}
videoWidget = VideoWidget(**widgetOptions)
self.videoWidget = videoWidget
# QVideoFrame.image() has been introduced since Qt 5.15
version, majVersion, minVersion = map(int, QT_VERSION_STR.split('.'))
if version < 6 and majVersion < 15:
self.imageFromFrame = self._imageFromFrameFix
else:
self.imageFromFrame = lambda frame: frame.image()
def _imageFromFrameFix(self, frame):
clone_frame = QVideoFrame(frame)
clone_frame.map(QAbstractVideoBuffer.ReadOnly)
image = QImage(
clone_frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(),
QVideoFrame.imageFormatFromPixelFormat(frame.pixelFormat()))
clone_frame.unmap()
return image
def supportedPixelFormats(self, type):
return [QVideoFrame.Format_ARGB32]
def present(self, frame: QVideoFrame):
if frame.isValid():
self.videoWidget.setImage(self.imageFromFrame(frame))
if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
self.surfaceFormat().frameSize() != frame.size():
self.setError(QAbstractVideoSurface.IncorrectFormatError)
self.stop()
return False
else:
return True
class AlphaVideoTest(QMainWindow):
def __init__(self):
super().__init__()
self.setStyleSheet('''
QFrame#mainFrame {
background: green;
}
''')
mainFrame = QFrame(objectName='mainFrame')
self.setCentralWidget(mainFrame)
layout = QVBoxLayout(mainFrame)
self.playButton = QPushButton('Play', checkable=True)
layout.addWidget(self.playButton)
self.drawer = AlphaVideoDrawer()
layout.addWidget(self.drawer.videoWidget)
self.mediaPlayer1 = QMediaPlayer(self, QMediaPlayer.VideoSurface)
self.playlist = QMediaPlaylist(self)
path = QDir.current().absoluteFilePath('vida1.mov')
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))
self.playlist.setCurrentIndex(1)
self.playlist.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist)
self.mediaPlayer1.setVideoOutput(self.drawer)
self.playButton.toggled.connect(self.togglePlay)
def togglePlay(self, play):
if play:
self.mediaPlayer1.play()
self.playButton.setText('Pause')
else:
self.mediaPlayer1.pause()
self.playButton.setText('Play')
import sys
app = QApplication(sys.argv)
test = AlphaVideoTest()
test.show()
sys.exit(app.exec_())
I based the above code on the following sources:
wrong video frame of present() of qabstractvideosurface in pyqt5
Displaying a video with an alpha channel using qt
Note that I limited the supportedPixelFormats output, as using the full list of formats provided in the related question didn't work; this doesn't mean that this would work anyway, but that further testing is probably required, possibly on different machines and different OS/System configurations and video formats: remember that QMediaPlayer completely relies on the underlying OS and default media backend.
Finally, if you only need this for "limited" and predefined animations, consider implementing your own subclass of QWidget that uses a list of loaded PNG images and shows them by implementing paintEvent() that would be called by updates based on a QVariantAnimation. While this kind of implementation might result less performant or ideal, it has the major benefit of providing cross platform compatibility.
Related
I am trying to make the code at the link (https://github.com/god233012yamil/Streaming-IP-Cameras-Using-PyQt-and-OpenCV/blob/main/Streaming_IP_Camera_Using_PyQt_OpenCV.py) more generalized. The issue when I run the code below, camera frame 3 shows blinking frames from camera 3 and camera 1.
The camera urls are rtsp links. I also added some youtube video links but it doesn't show them. I also do not know why it doesn't show them but right now my main concern is the blinking video.
When you run the code you can see 2x2 (total 4) camera streams on screen. when you double click one of them it becomes full screen. this is like a CCTV camera software.
I feel like I need to make some variable instance variable but right now in fact I am keeping record of everything in an array cameras where each camera has variables as instance variables.
I am using linux ubuntu.
I would be grateful if you can help
# import the require packages.
import cv2
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, \
QLabel, QGridLayout, QScrollArea, QSizePolicy
from PyQt5.QtGui import QPixmap, QIcon, QImage, QPalette
from PyQt5.QtCore import QThread, pyqtSignal, Qt, QEvent, QObject
from PyQt5 import QtCore
import sys
import math
class CaptureIpCameraFramesWorker(QThread):
# Signal emitted when a new image or a new frame is ready.
ImageUpdated = pyqtSignal(QImage)
def __init__(self, url) -> None:
super(CaptureIpCameraFramesWorker, self).__init__()
# Declare and initialize instance variables.
self.url = url
self.__thread_active = True
self.fps = 0
self.__thread_pause = False
def run(self) -> None:
# Capture video from a network stream.
cap = cv2.VideoCapture(self.url, cv2.CAP_FFMPEG)
# Get default video FPS.
self.fps = cap.get(cv2.CAP_PROP_FPS)
# If video capturing has been initialized already.q
if cap.isOpened():
# While the thread is active.
while self.__thread_active:
#
if not self.__thread_pause:
# Grabs, decodes and returns the next video frame.
ret, frame = cap.read()
# Get the frame height, width and channels.
height, width, channels = frame.shape
# Calculate the number of bytes per line.
bytes_per_line = width * channels
# If frame is read correctly.
if ret:
# Convert image from BGR (cv2 default color format) to RGB (Qt default color format).
cv_rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Convert the image to Qt format.
qt_rgb_image = QImage(cv_rgb_image.data, width, height, bytes_per_line, QImage.Format_RGB888)
# Scale the image.
qt_rgb_image_scaled = qt_rgb_image.scaled(1280, 720, Qt.KeepAspectRatio) # 720p
# qt_rgb_image_scaled = qt_rgb_image.scaled(1920, 1080, Qt.KeepAspectRatio)
# Emit this signal to notify that a new image or frame is available.
self.ImageUpdated.emit(qt_rgb_image_scaled)
else:
break
# When everything done, release the video capture object.
cap.release()
# Tells the thread's event loop to exit with return code 0 (success).
self.quit()
def stop(self) -> None:
self.__thread_active = False
def pause(self) -> None:
self.__thread_pause = True
def unpause(self) -> None:
self.__thread_pause = False
class cameraObj:
#this object is used to store various variables about a camera including GUI properties and camera url
def __init__(self,cameraNumg,cameralabelg,urlg,scrollAreag):
self.scrollArea=scrollAreag
self.cameraNumber=cameraNumg
self.cameralabel=cameralabelg
self.url=urlg
self.ipworker=None
self.state="Normal"
def setIpWorker(self,ipw):
self.ipworker=ipw
class MainWindow(QMainWindow):
def __init__(self) -> None:
super(MainWindow, self).__init__()
self.urls=[]
self.url_1 = "rtsp://admin:12346#192.168.1.12/1"
#self.url_2 = "https://www.youtube.com/watch?v=hGhqPpVZR4A"
self.url_3 = "rtsp://admin:12345#192.168.1.13/1"
#self.url_4 = "https://www.youtube.com/watch?v=YMlUtdaPbHY"
self.urls.append(self.url_1)
#self.urls.append(self.url_2)
#self.urls.append(self.url_4)
self.urls.append(self.url_3)
self.numberofcameras=len(self.urls)
self.cameraNames=[]
self.cameras=[]
for camnum in range(self.numberofcameras):
# Create an instance of a QLabel class to show camera 1.
camera_1 = QLabel()
camname="Camera_"+str(camnum+1)
camera_1.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
camera_1.setScaledContents(True)
camera_1.installEventFilter(self)
camera_1.setObjectName(camname)
# Create an instance of a QScrollArea class to scroll camera 1 image.
QScrollArea_1 = QScrollArea()
QScrollArea_1.setBackgroundRole(QPalette.Dark)
QScrollArea_1.setWidgetResizable(True)
QScrollArea_1.setWidget(camera_1)
cobj=cameraObj(camnum+1,camera_1,self.urls[camnum],QScrollArea_1)
self.cameras.append(cobj)
self.cameraNames.append(camname)
# Set the UI elements for this Widget class.
self.__SetupUI()
for camnum in range(self.numberofcameras):
# Create an instance of CaptureIpCameraFramesWorker.
cWorker = CaptureIpCameraFramesWorker(self.cameras[camnum].url)
cWorker.ImageUpdated.connect(lambda image: self.ShowCamera(image,camnum))
self.cameras[camnum].setIpWorker(cWorker)
for camnum in range(self.numberofcameras):
self.cameras[camnum].ipworker.start()
def hidescroll(self,snum:int):
for i in range(len(self.cameras)):
if i != snum:
self.cameras[i].scrollArea.hide()
def showscroll(self,snum:int):
for i in range(len(self.cameras)):
if i != snum:
self.cameras[i].scrollArea.show()
def __SetupUI(self) -> None:
# Create an instance of a QGridLayout layout.
grid_layout = QGridLayout()
grid_layout.setContentsMargins(0, 0, 0, 0)
sqroot=int(math.sqrt(self.numberofcameras))
for i in range(self.numberofcameras):
grid_layout.addWidget(self.cameras[i].scrollArea, int(i/sqroot), i%sqroot)
# Create a widget instance.
self.widget = QWidget(self)
self.widget.setLayout(grid_layout)
# Set the central widget.
self.setCentralWidget(self.widget)
self.setMinimumSize(800, 600)
self.showMaximized()
self.setStyleSheet("QMainWindow {background: 'black';}")
self.setWindowIcon(QIcon(QPixmap("camera_2.png")))
# Set window title.
self.setWindowTitle("IP Camera System")
#QtCore.pyqtSlot()
def ShowCamera(self, frame: QImage,cnum:int) -> None:
self.cameras[cnum].cameralabel.setPixmap(QPixmap.fromImage(frame))
# Override method for class MainWindow.
def eventFilter(self, source: QObject, event: QEvent) -> bool:
"""
Method to capture the events for objects with an event filter installed.
:param source: The object for whom an event took place.
:param event: The event that took place.
:return: True if event is handled.
"""
#
if event.type() == QtCore.QEvent.MouseButtonDblClick:
camname=source.objectName()
camerapos=int(camname[-1])-1
if not camname in self.cameraNames:
return super(MainWindow, self).eventFilter(source, event)
if self.cameras[camerapos].state == "Normal":
self.hidescroll(camerapos)
self.cameras[camerapos].state = "Maximized"
else:
self.showscroll(camerapos)
self.cameras[camerapos].state = "Normal"
return True
else:
return super(MainWindow, self).eventFilter(source, event)
# Overwrite method closeEvent from class QMainWindow.
def closeEvent(self, event) -> None:
# If thread getIpCameraFrameWorker_1 is running, then exit it.
for i in range(self.numberofcameras):
if self.cameras[i].ipworker.isRunning():
self.cameras[i].ipworker.quit()
event.accept()
def main() -> None:
# Create a QApplication object. It manages the GUI application's control flow and main settings.camnum
# It handles widget specific initialization, finalization.
# For any GUI application using Qt, there is precisely one QApplication object
app = QApplication(sys.argv)
# Create an instance of the class MainWindow.
window = MainWindow()
# Show the window.
window.show()
# Start Qt event loop.
sys.exit(app.exec_())
if __name__ == '__main__':
main()
Short context:
I have a QMainwindow with an mpv player inside. I play videos with mpv, create overlay images with PIL and run it all in the pyqt window. The overlay image is updated more or less every frame of the video.
Here is my issue:
If the mpv picture is large, then updating the overlay images is far too slow (I have optimized a lot to improve performance, using separate processes and threads, only using one overlay etc.). If the picture is small, however, it all works flawlessly (thus indicating that it is not far from satisfactory in performance).
I wouldn't mind losing resolution to gain performance, so I want to have a large window with lower resolution content. Is this possible?
The bottleneck here is mpv's overlay.update function
My main idea is to zoom the QMainwindow, but I cannot seem to find a way to do this. Any other solution is of course sufficient.
Example code (note that test.mp4 is the hardcoded video, provide anything you have)
#!/usr/bin/env python3
import mpv
import sys
from PIL import Image, ImageDraw
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
class Test(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.container = QWidget(self)
self.setCentralWidget(self.container)
self.container.setAttribute(Qt.WA_DontCreateNativeAncestors)
self.container.setAttribute(Qt.WA_NativeWindow)
self.w = 1800
self.h = int(self.w / 16 * 9)
self.setFixedSize(self.w, self.h)
self.player = mpv.MPV(wid=str(int(self.container.winId())),
log_handler=print)
self.player.play('test.mp4')
self.overlay = self.player.create_image_overlay()
self.coords = [20, 20, 50, 50]
def play(self):
#self.player.property_observer("time-pos")
def _time_observer(_name: str, value: float) -> None:
for i in range(len(self.coords)):
self.coords[i] = self.coords[i]*2 % self.h
img = Image.new("RGBA", (self.w, self.h), (0, 0, 0, 0))
draw = ImageDraw.Draw(img)
draw.rectangle(self.coords, outline=(255,255,255,255), width=4)
self.overlay.update(img)
app = QApplication(sys.argv)
# This is necessary since PyQT stomps over the locale settings needed by libmpv.
# This needs to happen after importing PyQT before creating the first mpv.MPV instance.
import locale
locale.setlocale(locale.LC_NUMERIC, 'C')
win = Test()
win.show()
win.play()
sys.exit(app.exec_())
Short Summary
Having a large window causes mpv's overlay.update method to consume too much time/computation. It is acceptable to decrease the dpi (resolution) of the overlay pictures, and even the video, to make it run faster.
I've been working on a chess GUI for a short while. Am new to PyQt5. Already coded quite a bit though, so for the sake of generality (and to keep the lines of code here to a minimum), I'll just treat it as any board game that has a grid, and has pieces navigating the grid. I managed to get drag and drop behaviour in different ways, but they all had some (for me significant) downside. In any case, the particular interaction I am trying to implement is 'grabbing' a piece from a certain square (on click) and then having it follow the mouse, until it is dropped onto a different square (on release).
Version info
PyQt5 version: 5.15.0
Windows version: Windows 10 Pro 64 bits (10.0)
First Approach
Initially I was hoping simply moving a QLabel with a given pixmap might do the trick but when moving the mouse quite fast, it seems the drawing is not fast enough to keep up with the movement of the mouse. As a result, it looks like the image phases in and out of existence, or only half of it renders before it gets redrawn. Tried something similar using QGraphicsView and QGraphicsScene, and some QGraphicsItem objects and making them movable. But the results were the same, image cutting off while trying to keep up with the cursor. Example code attempting to use QGraphicsView:
from PyQt5.QtWidgets import (QMainWindow, QGraphicsView, QApplication, QGraphicsPixmapItem,
QGraphicsScene, QGraphicsItem)
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
import sys
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.pm = QPixmap(70,70)
self.pm.fill(Qt.black)
self.item = QGraphicsPixmapItem(self.pm)
self.setGeometry(100,100,900,900)
self.scene = QGraphicsScene()
self.view = QGraphicsView(self.scene, self)
self.view.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.view.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.viewWidth = 800
self.viewHeight = self.viewWidth
self.view.setGeometry(0,0,self.viewWidth, self.viewHeight)
self.bk = self.scene.addPixmap(self.pm)
self.bk.setFlag(QGraphicsItem.ItemIsMovable)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
With this code, if you grab the 'piece' and go back and forth with the mouse at a high speed, I mainly see half/a quarter of the image. In the middle of the area I move the object, I usually don't see the object at all. Video of this in action: https://vimeo.com/user41790219
Second Approach
I found the QDrag class, and with it, a reasonable way of creating draggable pieces. It looks fantastic, absolutely no problems once the drag actually started, the image moves very smoothly. However, I wanted the drag to start on a mousePressEvent, and have it start immediately. With 'start immediately', I rather mean render the pixmap at the mouse location on click. This does not occur on Windows. Now, as stated in https://doc.qt.io/qt-5/qdrag.html#exec, the QDrag.exec_() is blocking on Windows. It says that once entering exec, it will call processEvents() frequently to make sure the GUI stays responsive. This is all fine, but on Windows it does not render the image assigned to QDrag, until the mouse has moved some amount. This is a shame, because on Linux it actually looks kinda snappy (on click it moves to the mouse instantly). Maybe it's a bit petty, but I really want to have the piece instantly snap to the mouse. This is some sample code I kinda pieced together/came up using QDrag and QPushbuttons that shows this behaviour:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtSvg import *
import sys
BOARDSIZE = 500
SQUARESIZE = 100
PIECESIZE = 70
FILES = 5
RANKS = 5
class Square(QPushButton):
def __init__(self, pixmap=None, size=100, dark=False):
super(Square,self).__init__()
self.pixmap = pixmap
self.size = size
if dark:
self.bg = '#8CA2AD'
else:
self.bg = '#DEE3E6'
self.setFixedWidth(size)
self.setFixedHeight(size)
if self.pixmap is not None:
self.setIcon(QIcon(self.pixmap))
self.setIconSize(self.pixmap.rect().size())
self.setStyle()
self.setAcceptDrops(True)
def setStyle(self):
self.setStyleSheet(f"""
QPushButton {{
background-color:{self.bg};
color:#ffffff;
border-width:0px;
border-radius: 0px;
padding:0px 0px 0px 0px;
}}
""")
def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
if self.pixmap is not None:
s = self.pixmap.width()
drag = QDrag(self)
drag.setPixmap(self.pixmap)
mime = QMimeData()
mime.setText('piece')
drag.setMimeData(mime)
drag.setHotSpot(QPoint(int(s/2) , int(s/2)))
self.setIcon(QIcon())
self.pixmap = None
drag.exec_(Qt.MoveAction | Qt.CopyAction)
def dragEnterEvent(self, event):
event.accept()
def dropEvent(self, event):
pixmap = QPixmap(PIECESIZE, PIECESIZE)
pixmap.fill(Qt.black)
self.pixmap = pixmap
self.setIcon(QIcon(pixmap))
self.setIconSize(pixmap.rect().size())
class Board(QWidget):
def __init__(self, parent=None, width=BOARDSIZE, height=BOARDSIZE):
super(Board,self).__init__(parent)
self.width = width
self.height = height
self.setFixedWidth(self.width)
self.setFixedHeight(self.height)
grid = QGridLayout(self)
grid.setSpacing(0)
self.setAcceptDrops(False)
size = int(self.width/FILES)
pixmap = QPixmap(PIECESIZE, PIECESIZE)
pixmap.fill(Qt.black)
dark = True
for row in range(0,FILES):
for col in range(0,RANKS):
if row == 0 or row == 4:
pm = pixmap
else:
pm = None
square = Square(pixmap=pm,size=size,dark=dark)
grid.addWidget(square, row, col)
dark = not dark
self.setLayout(grid)
self.setAcceptDrops(False)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow,self).__init__()
self.board = Board(parent=self)
self.setGeometry(100,100,BOARDSIZE+50,BOARDSIZE+50)
self.setCentralWidget(self.board)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
But in this example, when you click, the drag only visually initiates after the mouse has been moved, I would want it to visually initiate immediately after the mouse is clicked. This is Windows-specific.
Mention-worthy stuff
I found that on a QApplication, you can set the startDragTime and startDragDistance, but after setting those to some values (including 0 and -1), nothing happened, so I assumed they are just symbolic variables to be used as globals.
Question
What I'm looking for is the QDrag behaviour after the mouse moves slightly, i.e. the smooth movement during the drag + that it shows up immediately after the mousePressEvent. I'm also fine with any anything involving the use of QObjects with a pixmap and simply moving them, as long as the movement under the cursor looks smooth (doesnt clip or cut off). Any hints/ideas, or resources I may have missed are greatly appreciated!
I'm building a T Shirt Designer using PySide. For this, I've set up a QGraphicsScene with the image of the T Shirt as a QPixmapItem on the scene. To overlay the design on the T Shirt, I'm getting the design image PNG from the user and setting that up as another QPixmapItem. I align them up using the setPos() method and then use the setZValue() method to ensure that the Design PNG shows up on top of the T Shirt image.
I've enabled the flags ItemIsMovable, ItemIsSelectable, ItemSendsScenePositionChanges, ItemIsFocusable for the Design image QPixmapItem. So I am able to move the design around over the T Shirt image. Next, I want to restrict this movement to only where the printing is possible. To achieve this, I've followed this question to derive a new QGraphicsPixmapItem class and have tried to reimplement both the itemChange() and the mouseMoveEvent() methods.
Inside these methods, I've tried calling the same methods of the original QPixmapItem class using both super() as well as the regular way QGraphicsPixmapItem.itemChange(change, event). However, nothing seems to be happening. The design moves just fine but it's not getting restricted. To see if the method gets called I added print statements inside these methods but they don't get executed.
I've tried adding setSceneRect() in the scene as well. I've also enabled setMouseTracking on the QGraphicsView. However, none of those things triggers either the itemChanged() or mouseMoveEvent().
There are other questions where people have explained how to do this in C++. However, I'm not able to replicate it in python.
# -*- coding: utf-8 -*-
from PySide import QtCore, QtGui
from os import path
class Pixmap(QtGui.QGraphicsPixmapItem):
def __init__(self, pix):
super(Pixmap, self).__init__()
self.pixmap_item = QtGui.QGraphicsPixmapItem(pix)
self.pixmap_item.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.pixmap_item.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.pixmap_item.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges, True)
self.pixmap_item.setFlag(QtGui.QGraphicsItem.ItemIsFocusable, True)
def itemChange(self, change, event):
QtGui.QGraphicsPixmapItem.itemChange(change, event)
print "Item Changed"
#Code to restrict to a rectangular area goes here
return QtGui.QGraphicsPixmapItem.itemChange(self, change, event)
def mouseMoveEvent(self, event):
super(Pixmap, self).mouseMoveEvent(event)
print "Mouse Moved"
#Code to restrict to a rectangular area goes here
class Ui_frmSelectRoundNeckHalfSleeve(QtCore.QObject):
def setupUi(self, frmSelectRoundNeckHalfSleeve):
frmSelectRoundNeckHalfSleeve.setObjectName("frmSelectRoundNeckHalfSleeve")
frmSelectRoundNeckHalfSleeve.resize(842, 595)
self.imgRoundNeckTShirt = QtGui.QGraphicsView(frmSelectRoundNeckHalfSleeve)
self.imgRoundNeckTShirt.setGeometry(QtCore.QRect(20, 20, 500, 500))
self.imgRoundNeckTShirt.setObjectName("imgRoundNeckTShirt")
self.imgRoundNeckTShirt.setMouseTracking(True)
self.tShirtScene = QtGui.QGraphicsScene(frmSelectRoundNeckHalfSleeve)
self.tShirtScene.setSceneRect(20, 20, 480, 480)
self.TShirtImage = QtGui.QGraphicsPixmapItem(QtGui.QPixmap("./Images/black-t-shirt.jpg").scaled(480, 480, QtCore.Qt.KeepAspectRatio))
self.designImagePixmap = QtGui.QPixmap("./Designs/test.png")
self.designImagePng = Pixmap(self.designImagePixmap.scaledToWidth(135,QtCore.Qt.SmoothTransformation))
self.TShirtImage.setZValue(10)
self.designImagePng.pixmap_item.setZValue(40)
self.designImagePng.pixmap_item.setPos(167,90)
self.tShirtScene.addItem(self.TShirtImage)
self.tShirtScene.addItem(self.designImagePng.pixmap_item)
self.imgRoundNeckTShirt.setScene(self.tShirtScene)
if __name__ == "__main__":
path = r"E:\\Documents\\T Shirt Designer\\"
QtGui.QApplication.addLibraryPath(path)
app = QtGui.QApplication(sys.argv)
testFile = QtGui.QWidget()
ui = Ui_frmSelectRoundNeckHalfSleeve()
ui.setupUi(testFile)
testFile.show()
sys.exit(app.exec_())
The error you have is that you are not overwriting the pixmap_item itemChange but a Pixmap. It seems you are confusing inheritance with composition.
An improvement is that the base item (T-shirt) is the father of the design item so that the coordinates of the design item are relative to the base item.
Considering the previous thing I have implemented the logic to restrict the movement of the design item to the space of the T-shirt Item.
# -*- coding: utf-8 -*-
import sys
from os import path
from PySide import QtCore, QtGui
class Pixmap(QtGui.QGraphicsPixmapItem):
def __init__(self, pix, parent=None):
super(Pixmap, self).__init__(pix, parent)
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable, True)
self.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges, True)
self.setFlag(QtGui.QGraphicsItem.ItemIsFocusable, True)
def itemChange(self, change, value):
if change == QtGui.QGraphicsItem.ItemPositionChange:
parent = self.parentItem()
if parent is not None:
r = self.mapToParent(self.boundingRect()).boundingRect()
R = parent.boundingRect()
rR = QtCore.QRectF(R.topLeft(), R.size() - r.size())
if not rR.contains(value):
x = min(max(rR.left(), value.x()), rR.right())
y = min(max(rR.top(), value.y()), rR.bottom())
return QtCore.QPointF(x, y)
return QtGui.QGraphicsPixmapItem.itemChange(self, change, value)
class Ui_frmSelectRoundNeckHalfSleeve(QtCore.QObject):
def setupUi(self, frmSelectRoundNeckHalfSleeve):
frmSelectRoundNeckHalfSleeve.setObjectName(
"frmSelectRoundNeckHalfSleeve"
)
frmSelectRoundNeckHalfSleeve.resize(842, 595)
self.imgRoundNeckTShirt = QtGui.QGraphicsView(
frmSelectRoundNeckHalfSleeve
)
self.imgRoundNeckTShirt.setGeometry(QtCore.QRect(20, 20, 500, 500))
self.imgRoundNeckTShirt.setObjectName("imgRoundNeckTShirt")
self.tShirtScene = QtGui.QGraphicsScene(frmSelectRoundNeckHalfSleeve)
self.tShirtScene.setSceneRect(20, 20, 480, 480)
self.TShirtImage = QtGui.QGraphicsPixmapItem(
QtGui.QPixmap("./Images/black-t-shirt.jpg").scaled(
480, 480, QtCore.Qt.KeepAspectRatio
)
)
designImagePixmap = QtGui.QPixmap("./Designs/test.png").scaledToWidth(
135, QtCore.Qt.SmoothTransformation
)
self.designImagePng = Pixmap(designImagePixmap, self.TShirtImage)
self.designImagePng.setZValue(1)
self.designImagePng.setPos(167, 90)
self.tShirtScene.addItem(self.TShirtImage)
self.imgRoundNeckTShirt.setScene(self.tShirtScene)
if __name__ == "__main__":
path = r"E:\\Documents\\T Shirt Designer\\"
QtGui.QApplication.addLibraryPath(path)
app = QtGui.QApplication(sys.argv)
testFile = QtGui.QWidget()
ui = Ui_frmSelectRoundNeckHalfSleeve()
ui.setupUi(testFile)
testFile.show()
sys.exit(app.exec_())
I am trying to display live images from my 1394 camera.
Currently my code is able to obtain images in a loop from the camera and I was looking for any quick GUI that will update dynamically (as a separate thread). I can do this in PyQt maybe using QThreads but is there any recommendation or faster way of doing this??
Here's my code
#Loop capturing frames from camera
for frame in range(1,500):
print 'frame:',frame
TIME.sleep(1) #capture frame every second
image_binary = pycam.cam.RetrieveBuffer()
#convert to PIL Image
pilimg = PIL.Image.frombuffer("L",(cimg.GetCols(),cimg.GetRows()),image_binary,'raw', "RGBA", 0, 1)
# At this point I want to send my image data to a GUI window and display it
Thank you.
Here's wxPython code that will do it...
import wx
from PIL import Image
SIZE = (640, 480)
def get_image():
# Put your code here to return a PIL image from the camera.
return Image.new('L', SIZE)
def pil_to_wx(image):
width, height = image.size
buffer = image.convert('RGB').tostring()
bitmap = wx.BitmapFromBuffer(width, height, buffer)
return bitmap
class Panel(wx.Panel):
def __init__(self, parent):
super(Panel, self).__init__(parent, -1)
self.SetSize(SIZE)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.update()
def update(self):
self.Refresh()
self.Update()
wx.CallLater(15, self.update)
def create_bitmap(self):
image = get_image()
bitmap = pil_to_wx(image)
return bitmap
def on_paint(self, event):
bitmap = self.create_bitmap()
dc = wx.AutoBufferedPaintDC(self)
dc.DrawBitmap(bitmap, 0, 0)
class Frame(wx.Frame):
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
super(Frame, self).__init__(None, -1, 'Camera Viewer', style=style)
panel = Panel(self)
self.Fit()
def main():
app = wx.PySimpleApp()
frame = Frame()
frame.Center()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
I thought I'd try PyQt4 imageviewer.py example and it worked for me.
Thanks for all your help guys.
Here's my modified code:
from PyQt4 import QtCore, QtGui
class CameraViewer(QtGui.QMainWindow):
def __init__(self):
super(CameraViewer, self).__init__()
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.setWindowTitle("Image Viewer")
self.resize(640, 480)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.open)
timer.start(33) #30 Hz
def open(self):
#get data and display
pilimg = getMyPILImageDatFromCamera()
image = PILQT.ImageQt.ImageQt(pilimg)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer","Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.imageLabel.adjustSize()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
CameraViewer = CameraViewer()
CameraViewer.show()
sys.exit(app.exec_())
I recommend using Tkinter since it's already part of python. I've never used PIL but a quick google shows it's easy to use PIL images in Tk widgets (via the pil.ImageTk.PhotoImage() method).
If you already have a Tkinter widget set up to display images (a Label widget works fine) all you need to do is arrange for the image to be updated every second or so. You can do this by using the after command of tkinter.
Here's an example; I don't have PIL so it uses a static image but it illustrates how to use the event loop to fetch images every second:
import Tkinter
class App(Tkinter.Tk):
def __init__(self):
Tkinter.Tk.__init__(self)
self.label = Tkinter.Label(text="your image here", compound="top")
self.label.pack(side="top", padx=8, pady=8)
self.iteration=0
self.UpdateImage(1000)
def UpdateImage(self, delay, event=None):
# this is merely so the display changes even though the image doesn't
self.iteration += 1
self.image = self.get_image()
self.label.configure(image=self.image, text="Iteration %s" % self.iteration)
# reschedule to run again in 1 second
self.after(delay, self.UpdateImage, 1000)
def get_image(self):
# this is where you get your image and convert it to
# a Tk PhotoImage. For demonstration purposes I'll
# just return a static image
data = '''
R0lGODlhIAAgALMAAAAAAAAAgHCAkC6LV76+vvXeswD/ANzc3DLNMubm+v/6zS9PT6Ai8P8A////
/////yH5BAEAAAkALAAAAAAgACAAAAS00MlJq7046803AF3ofAYYfh8GIEvpoUZcmtOKAO5rLMva
0rYVKqX5IEq3XDAZo1GGiOhw5rtJc09cVGo7orYwYtYo3d4+DBxJWuSCAQ30+vNTGcxnOIARj3eT
YhJDQ3woDGl7foNiKBV7aYeEkHEignKFkk4ciYaImJqbkZ+PjZUjaJOElKanqJyRrJyZgSKkokOs
NYa2q7mcirC5I5FofsK6hcHHgsSgx4a9yzXK0rrV19gRADs=
'''
image = Tkinter.PhotoImage(data=data)
return image
if __name__ == "__main__":
app=App()
app.mainloop()
Since the good answers are pretty large, I feel like I should post a library I built specifically for this:
from cvpubsubs.webcam_pub import VideoHandlerThread
import numpy as np
image_np = numpy.array(pilImage)
def update_function(frame, cam_id):
frame[...] = image_np[...]
VideoHandlerThread(video_source=image_np, callbacks=update_function).display()
Actually, that's if image_binary is a new numpy array every time. If it's assigned to the same location, then just this should work:
from cvpubsubs.webcam_pub import VideoHandlerThread
VideoHandlerThread(video_source=image_np).display()
I know OpenCV barely counts as a GUI, but this is quick code wise.
Try to take a look at gstreamer. This is the first result google gave me searching for "gstreamer 1394" and this one is the first for "gstreamer pyqt".