Any quick Python GUI to display live images from Camera - python

I am trying to display live images from my 1394 camera.
Currently my code is able to obtain images in a loop from the camera and I was looking for any quick GUI that will update dynamically (as a separate thread). I can do this in PyQt maybe using QThreads but is there any recommendation or faster way of doing this??
Here's my code
#Loop capturing frames from camera
for frame in range(1,500):
print 'frame:',frame
TIME.sleep(1) #capture frame every second
image_binary = pycam.cam.RetrieveBuffer()
#convert to PIL Image
pilimg = PIL.Image.frombuffer("L",(cimg.GetCols(),cimg.GetRows()),image_binary,'raw', "RGBA", 0, 1)
# At this point I want to send my image data to a GUI window and display it
Thank you.

Here's wxPython code that will do it...
import wx
from PIL import Image
SIZE = (640, 480)
def get_image():
# Put your code here to return a PIL image from the camera.
return Image.new('L', SIZE)
def pil_to_wx(image):
width, height = image.size
buffer = image.convert('RGB').tostring()
bitmap = wx.BitmapFromBuffer(width, height, buffer)
return bitmap
class Panel(wx.Panel):
def __init__(self, parent):
super(Panel, self).__init__(parent, -1)
self.SetSize(SIZE)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.update()
def update(self):
self.Refresh()
self.Update()
wx.CallLater(15, self.update)
def create_bitmap(self):
image = get_image()
bitmap = pil_to_wx(image)
return bitmap
def on_paint(self, event):
bitmap = self.create_bitmap()
dc = wx.AutoBufferedPaintDC(self)
dc.DrawBitmap(bitmap, 0, 0)
class Frame(wx.Frame):
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
super(Frame, self).__init__(None, -1, 'Camera Viewer', style=style)
panel = Panel(self)
self.Fit()
def main():
app = wx.PySimpleApp()
frame = Frame()
frame.Center()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()

I thought I'd try PyQt4 imageviewer.py example and it worked for me.
Thanks for all your help guys.
Here's my modified code:
from PyQt4 import QtCore, QtGui
class CameraViewer(QtGui.QMainWindow):
def __init__(self):
super(CameraViewer, self).__init__()
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.setWindowTitle("Image Viewer")
self.resize(640, 480)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.open)
timer.start(33) #30 Hz
def open(self):
#get data and display
pilimg = getMyPILImageDatFromCamera()
image = PILQT.ImageQt.ImageQt(pilimg)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer","Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.imageLabel.adjustSize()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
CameraViewer = CameraViewer()
CameraViewer.show()
sys.exit(app.exec_())

I recommend using Tkinter since it's already part of python. I've never used PIL but a quick google shows it's easy to use PIL images in Tk widgets (via the pil.ImageTk.PhotoImage() method).
If you already have a Tkinter widget set up to display images (a Label widget works fine) all you need to do is arrange for the image to be updated every second or so. You can do this by using the after command of tkinter.
Here's an example; I don't have PIL so it uses a static image but it illustrates how to use the event loop to fetch images every second:
import Tkinter
class App(Tkinter.Tk):
def __init__(self):
Tkinter.Tk.__init__(self)
self.label = Tkinter.Label(text="your image here", compound="top")
self.label.pack(side="top", padx=8, pady=8)
self.iteration=0
self.UpdateImage(1000)
def UpdateImage(self, delay, event=None):
# this is merely so the display changes even though the image doesn't
self.iteration += 1
self.image = self.get_image()
self.label.configure(image=self.image, text="Iteration %s" % self.iteration)
# reschedule to run again in 1 second
self.after(delay, self.UpdateImage, 1000)
def get_image(self):
# this is where you get your image and convert it to
# a Tk PhotoImage. For demonstration purposes I'll
# just return a static image
data = '''
R0lGODlhIAAgALMAAAAAAAAAgHCAkC6LV76+vvXeswD/ANzc3DLNMubm+v/6zS9PT6Ai8P8A////
/////yH5BAEAAAkALAAAAAAgACAAAAS00MlJq7046803AF3ofAYYfh8GIEvpoUZcmtOKAO5rLMva
0rYVKqX5IEq3XDAZo1GGiOhw5rtJc09cVGo7orYwYtYo3d4+DBxJWuSCAQ30+vNTGcxnOIARj3eT
YhJDQ3woDGl7foNiKBV7aYeEkHEignKFkk4ciYaImJqbkZ+PjZUjaJOElKanqJyRrJyZgSKkokOs
NYa2q7mcirC5I5FofsK6hcHHgsSgx4a9yzXK0rrV19gRADs=
'''
image = Tkinter.PhotoImage(data=data)
return image
if __name__ == "__main__":
app=App()
app.mainloop()

Since the good answers are pretty large, I feel like I should post a library I built specifically for this:
from cvpubsubs.webcam_pub import VideoHandlerThread
import numpy as np
image_np = numpy.array(pilImage)
def update_function(frame, cam_id):
frame[...] = image_np[...]
VideoHandlerThread(video_source=image_np, callbacks=update_function).display()
Actually, that's if image_binary is a new numpy array every time. If it's assigned to the same location, then just this should work:
from cvpubsubs.webcam_pub import VideoHandlerThread
VideoHandlerThread(video_source=image_np).display()
I know OpenCV barely counts as a GUI, but this is quick code wise.

Try to take a look at gstreamer. This is the first result google gave me searching for "gstreamer 1394" and this one is the first for "gstreamer pyqt".

Related

Video with alpha channel overlay on background image: Alpha shows black

I need to play a .mov video (ProRes4444) with alpha channel in a scene. The scene has a background image and I need to use the alpha channel of the video so it overlays on the background.
If I open the video normally with QMediaPlayer, the alpha channel appears in black.
screen with background pic & video with black alpha:
How can I make the output of the QMediaPlayer (QGraphicsVideoItem) respect the alpha and make the overlay effect possible?
The closest I got to the answer based on online research is code in cpp that I've found that shows the necessity to create a subclass of a QAbstractVideoSurface that receives videoframes converts to ARGB, then forwards those to a QLabel that displays them.
Displaying a video with an alpha channel using qt
I've also tried that unsuccessfully. Is this the right course or I'm just missing something simple on my current code?
EDIT:
Link to files (background image and video .mov)
https://drive.google.com/drive/folders/1LIZzTg1E8wkaD0YSvkkcfSATdlDTggyh?usp=sharing
import sys
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWindow(QMainWindow):
def __init__(self):
super(VideoWindow, self).__init__()
self.setWindowTitle('QMediaPlayer TEST')
self.resize(1920, 1080)
self.vista = QGraphicsView(self)
self.vista.setGeometry(QRect(0, 0, 1920, 1080))
self.scene = QGraphicsScene(self.vista)
self.scene.setSceneRect(0, 0, 1920, 1080)
self.vista.setScene(self.scene)
self.graphvitem1 = QGraphicsVideoItem()
#SET BACKGROUND IMAGE ON SCENE
self.tempImg = QPixmap("/Users/elemental/Desktop/pyvids/fons.jpeg")
self.tempImg = self.tempImg.scaled(self.scene.width(), self.scene.height())
self.graphicsPixmapItem = QGraphicsPixmapItem(self.tempImg)
self.scene.addItem(self.graphicsPixmapItem)
#SET VIDEO 1 WITH LOOP
self.mediaPlayer1 = QMediaPlayer(None, QMediaPlayer.VideoSurface)
self.mediaPlayer1.setVideoOutput(self.graphvitem1)
self.playlist1 = QMediaPlaylist(self)
self.playlist1.addMedia(QMediaContent(QUrl.fromLocalFile("/Users/elemental/Desktop/pyvids/vida1.mov")))
self.playlist1.setCurrentIndex(1)
self.playlist1.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist1)
self.graphvitem1.setPos(500, 100)
self.graphvitem1.setSize(QSizeF(1000, 500))
self.scene.addItem(self.graphvitem1)
self.mediaPlayer1.play()
self.vista.show()
if __name__ == '__main__':
app = QApplication([])
window = VideoWindow()
window.show()
sys.exit(app.exec_())
From what I can see, QVideoWidget doesn't support alpha channels by default, so it falls back to the "basic" black background.
But, implementation seems possible, by properly subclassing QAbstractVideoSurface.
Consider that the following code is experimental, my knowledge of QMediaPlayer and the Qt video surface isn't that deep (the former is an abstract for multiple platforms and multiple libraries that can behave very differently on different configurations), and I could only test it on two Linux platforms, so I don't know how it behaves under Windows nor MacOS.
The assumption is that the video surface provides a default dedicated QWidget subclass (VideoWidget) unless another class with a suitable setImage is provided, and updates its image whenever the media player requires it.
Note that I only tested it with a couple of videos (including the provided one), and further testing might be required.
from PyQt5.QtMultimedia import *
from PyQt5.QtMultimediaWidgets import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class VideoWidget(QWidget):
image = QImage()
def __init__(self, **kwargs):
super().__init__(**kwargs)
def setImage(self, image):
self.image = image
self.update()
def sizeHint(self):
return QSize(640, 480)
def paintEvent(self, event):
qp = QPainter(self)
# ensure that smooth transformation is used while scaling pixmaps
qp.setRenderHints(qp.SmoothPixmapTransform)
# provide compliancy with background set using stylesheets, see:
# https://doc.qt.io/qt-5/stylesheet-reference.html#qwidget-widget
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, qp, self)
# draw the image, scaled to the widget size; if you need fixed sizes
# or keep aspect ratio, implement this (or the widget) accordingly
qp.drawImage(self.rect(), self.image, self.image.rect())
class AlphaVideoDrawer(QAbstractVideoSurface):
def __init__(self, videoWidget=None, widgetOptions=None):
super().__init__()
if videoWidget:
if not hasattr(videoWidget, 'setImage'):
raise NotImplementedError(
'setImage() must be implemented for videoWidget!')
else:
if not isinstance(widgetOptions, dict):
widgetOptions = {}
elif not 'styleSheet' in widgetOptions:
# just a default background for testing purposes
widgetOptions = {'styleSheet': 'background: darkGray;'}
videoWidget = VideoWidget(**widgetOptions)
self.videoWidget = videoWidget
# QVideoFrame.image() has been introduced since Qt 5.15
version, majVersion, minVersion = map(int, QT_VERSION_STR.split('.'))
if version < 6 and majVersion < 15:
self.imageFromFrame = self._imageFromFrameFix
else:
self.imageFromFrame = lambda frame: frame.image()
def _imageFromFrameFix(self, frame):
clone_frame = QVideoFrame(frame)
clone_frame.map(QAbstractVideoBuffer.ReadOnly)
image = QImage(
clone_frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(),
QVideoFrame.imageFormatFromPixelFormat(frame.pixelFormat()))
clone_frame.unmap()
return image
def supportedPixelFormats(self, type):
return [QVideoFrame.Format_ARGB32]
def present(self, frame: QVideoFrame):
if frame.isValid():
self.videoWidget.setImage(self.imageFromFrame(frame))
if self.surfaceFormat().pixelFormat() != frame.pixelFormat() or \
self.surfaceFormat().frameSize() != frame.size():
self.setError(QAbstractVideoSurface.IncorrectFormatError)
self.stop()
return False
else:
return True
class AlphaVideoTest(QMainWindow):
def __init__(self):
super().__init__()
self.setStyleSheet('''
QFrame#mainFrame {
background: green;
}
''')
mainFrame = QFrame(objectName='mainFrame')
self.setCentralWidget(mainFrame)
layout = QVBoxLayout(mainFrame)
self.playButton = QPushButton('Play', checkable=True)
layout.addWidget(self.playButton)
self.drawer = AlphaVideoDrawer()
layout.addWidget(self.drawer.videoWidget)
self.mediaPlayer1 = QMediaPlayer(self, QMediaPlayer.VideoSurface)
self.playlist = QMediaPlaylist(self)
path = QDir.current().absoluteFilePath('vida1.mov')
self.playlist.addMedia(QMediaContent(QUrl.fromLocalFile(path)))
self.playlist.setCurrentIndex(1)
self.playlist.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)
self.mediaPlayer1.setPlaylist(self.playlist)
self.mediaPlayer1.setVideoOutput(self.drawer)
self.playButton.toggled.connect(self.togglePlay)
def togglePlay(self, play):
if play:
self.mediaPlayer1.play()
self.playButton.setText('Pause')
else:
self.mediaPlayer1.pause()
self.playButton.setText('Play')
import sys
app = QApplication(sys.argv)
test = AlphaVideoTest()
test.show()
sys.exit(app.exec_())
I based the above code on the following sources:
wrong video frame of present() of qabstractvideosurface in pyqt5
Displaying a video with an alpha channel using qt
Note that I limited the supportedPixelFormats output, as using the full list of formats provided in the related question didn't work; this doesn't mean that this would work anyway, but that further testing is probably required, possibly on different machines and different OS/System configurations and video formats: remember that QMediaPlayer completely relies on the underlying OS and default media backend.
Finally, if you only need this for "limited" and predefined animations, consider implementing your own subclass of QWidget that uses a list of loaded PNG images and shows them by implementing paintEvent() that would be called by updates based on a QVariantAnimation. While this kind of implementation might result less performant or ideal, it has the major benefit of providing cross platform compatibility.

how to capture an image using pyqt5? [duplicate]

This question already has answers here:
PyQt showing video stream from opencv
(3 answers)
Closed 4 years ago.
I am new to pyqt5 when i am doing a program to develop a camera software, A click button is given to capture an image. when i press enter it clicks an image and saved in disk. when i press button the image is not saved and also it closes the program i couldn't figure out the mistake i made
The error is
Process finished with exit code -1073740791 (0xC0000409)
My code is
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import cv2
cap = cv2.VideoCapture(0)
class Thread(QThread):
changePixmap = pyqtSignal(QImage)
def run(self):
while (True):
ret, frame = cap.read()
rgbImage = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
convertToQtFormat = QImage(rgbImage.data, rgbImage.shape[1], rgbImage.shape[0], QImage.Format_RGB888)
p = convertToQtFormat.scaled(640, 480, Qt.KeepAspectRatio)
self.changePixmap.emit(p)
class camera(QWidget):
def __init__(self):
super().__init__()
self.title = 'Camera'
self.left = 0
self.top = 0
self.width = 640
self.height = 480
self.cameraUI()
#pyqtSlot(QImage)
def setImage(self, image):
self.label.setPixmap(QPixmap.fromImage(image))
def cameraUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.resize(1800, 1200)
# create a label
self.label = QLabel(self)
self.label.move(100, 120)
self.label.resize(640, 480)
camera_button = QPushButton("camera_click", self)
camera_button.move(50, 50)
camera_button.clicked.connect(self.click_picture)
th = Thread(self)
th.changePixmap.connect(self.setImage)
th.start()
self.show()
def click_picture(self):
while (True):
frame= cap.read()
img_name = "image.png"
cv2.imwrite(img_name,frame)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = camera()
ex.show()
sys.exit(app.exec_())
Any suggestion is appretiated
It looks like a threading issue. Since your thread is using the camera, you probably cannot read from it in your main thread. Try not starting the thread, since it doesn't seem it's being used otherwise, and it is not stated in your goal. If instead you want to get frames continously and just save them when the button is clicked, you'll have to write a slot that is connected to the signal you're emitting in the thread, then put in some logic to save when the button is clicked.
Also, It looks like you have a infinite loop in click_picture, and the reading syntax in that method should look like: flag, frame = cap.read().
If you're interested in a project using a similar approach, check this out (disclaimer, I'm one of the authors.): https://github.com/natedileas/ImageRIT/blob/master/Server/qt_CameraWidget.py

How to make screenshot while showing video from cam?

#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.

WXPython Display Images Sometimes do not Display

I am currently using WXPython to do a GUI which displays images. I am currently changing the images about 1 to 2 times per second:
image = image.Scale(scaledHeight, scaledWidth)
image1 = image.ConvertToBitmap()
# Center the image
self.panel.bmp1 = wx.StaticBitmap(self.panel, -1, image1, ((width / 2) - (image.GetWidth() / 2), (height / 2) - (image.GetHeight() / 2)), (image.GetWidth(),image.GetHeight()))
Only problem is periodically the image does not display. I tried out an inefficient solution and copied the image into image1, image2, etc and displayed all of them hoping for the chances for all of them to not display to be lower. Unfortunately, the image will still periodically not display. Is there some sort of buffer I need to use?
Thanks in advance!
It would be convenient if you could provide your code snippet and some more details regarding what you are trying to achieve.
How ever I have created a small example which shows how to change/update images on a panel. The code below basically creates a random number in myThread() class. The number is then sent to the gui() class using publisher-subscriber strategy. The changeImage() of the gui() class checks if the value is less than or equal to 5, then it will display green image on the panel named as self.myPanel, else if the value is greater than 5 then a blue image is displayed.
The images can be downloaded from here: green.bmp blue.bmp
Code: Please note that the image files should be in the same directory in which this script is located.
import wx
import time
from wx.lib.pubsub import setupkwargs
from wx.lib.pubsub import pub
from threading import Thread
import random
class gui(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, None, id, title, size=(100,100))
self.myPanel = wx.Panel(self, -1)
image_file1 = 'green.bmp'
image_file2 = 'blue.bmp'
self.image1 = wx.Image(image_file1, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.image2 = wx.Image(image_file2, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.bitmap = wx.StaticBitmap(self.myPanel, -1)
pub.subscribe(self.changeImage, 'Update')
def changeImage(self, value):
#To destroy any previous image on self.myPanel
if self.bitmap:
self.bitmap.Destroy()
#If the value received from myThread() class is <= 5 then display the green image on myPanel
if value <=5:
self.bitmap = wx.StaticBitmap(self.myPanel, -1, self.image1, (0, 0))
#If the value received from myThread() class is > 5 then display the green image on myPanel
else:
self.bitmap = wx.StaticBitmap(self.myPanel, -1, self.image2, (10, 10))
class myThread(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.start()
def run(self):
while True:
number = random.randrange(1,10)
wx.CallAfter(pub.sendMessage, 'Update', value=number)
time.sleep(1)
if __name__=='__main__':
app = wx.App()
frame = gui(parent=None, id=-1, title="Test")
frame.Show()
myThread()
app.MainLoop()

Set Parameters of Screenshot taken by Python

I have used the following code to take the screen shot of the web page.
From the code I understand that first the complete page is loaded in the frame and then the frame is rendered in painter. The problem I am facing is that some web pages might be very very lengthy in terms of content, but I would just want the screen shot of the first page of website. Is there any way I can handle that?
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
print 'saving', output_file
image.save(output_file)
I have tried to set Preferred size of frame with the help of QSize object, but it wont help either.
Or alternative, is there any way I can crop image?
Any help is highly appreciated, Thanks.
There is no such thing as "the first page". It's entirely up to the application to decide how to divide up the content.
At the moment, your script explicitly sets the first division to be the entire contents of the page. To avoid that, you should simply decide how much of the content you want to capture, and then resize the viewport as appropriate.
The demo script below uses QPrinter to calculate an A4 page size:
usage: capture.py url > webpage.png
from PyQt4 import QtCore, QtGui, QtWebKit
class WebPage(QtWebKit.QWebPage):
def __init__(self):
QtWebKit.QWebPage.__init__(self)
self.mainFrame().setScrollBarPolicy(
QtCore.Qt.Horizontal, QtCore.Qt.ScrollBarAlwaysOff)
self.mainFrame().setScrollBarPolicy(
QtCore.Qt.Vertical, QtCore.Qt.ScrollBarAlwaysOff)
self.mainFrame().loadFinished.connect(self.handleLoadFinished)
printer = QtGui.QPrinter()
printer.setPaperSize(QtGui.QPrinter.A4)
self.setViewportSize(printer.paperSize(
QtGui.QPrinter.DevicePixel).toSize())
def capture(self, url):
self._url = QtCore.QUrl(url)
QtCore.QTimer.singleShot(0, self.handleLoad)
def handleLoad(self):
self.mainFrame().load(self._url)
def handleLoadFinished(self):
image = QtGui.QImage(self.viewportSize(),
QtGui.QImage.Format_ARGB32)
painter = QtGui.QPainter(image)
self.mainFrame().render(painter)
painter.end()
output = QtCore.QFile()
output.open(1, QtCore.QIODevice.WriteOnly)
image.save(output, 'PNG')
sys.exit(0)
if __name__ == '__main__':
import sys, signal
app = QtGui.QApplication(sys.argv)
signal.signal(signal.SIGINT, signal.SIG_DFL)
page = WebPage()
page.capture(sys.argv[1])
sys.exit(app.exec_())
could you resize the image to the size to your screen
import gtk, pygtk
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
window = gtk.Window()
s = window.get_screen()
image = image.resize((s.get_width(),s.get_height()), Image.ANTIALIAS)
print 'saving', output_file
image.save(output_file)

Categories