I have used the following code to take the screen shot of the web page.
From the code I understand that first the complete page is loaded in the frame and then the frame is rendered in painter. The problem I am facing is that some web pages might be very very lengthy in terms of content, but I would just want the screen shot of the first page of website. Is there any way I can handle that?
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
print 'saving', output_file
image.save(output_file)
I have tried to set Preferred size of frame with the help of QSize object, but it wont help either.
Or alternative, is there any way I can crop image?
Any help is highly appreciated, Thanks.
There is no such thing as "the first page". It's entirely up to the application to decide how to divide up the content.
At the moment, your script explicitly sets the first division to be the entire contents of the page. To avoid that, you should simply decide how much of the content you want to capture, and then resize the viewport as appropriate.
The demo script below uses QPrinter to calculate an A4 page size:
usage: capture.py url > webpage.png
from PyQt4 import QtCore, QtGui, QtWebKit
class WebPage(QtWebKit.QWebPage):
def __init__(self):
QtWebKit.QWebPage.__init__(self)
self.mainFrame().setScrollBarPolicy(
QtCore.Qt.Horizontal, QtCore.Qt.ScrollBarAlwaysOff)
self.mainFrame().setScrollBarPolicy(
QtCore.Qt.Vertical, QtCore.Qt.ScrollBarAlwaysOff)
self.mainFrame().loadFinished.connect(self.handleLoadFinished)
printer = QtGui.QPrinter()
printer.setPaperSize(QtGui.QPrinter.A4)
self.setViewportSize(printer.paperSize(
QtGui.QPrinter.DevicePixel).toSize())
def capture(self, url):
self._url = QtCore.QUrl(url)
QtCore.QTimer.singleShot(0, self.handleLoad)
def handleLoad(self):
self.mainFrame().load(self._url)
def handleLoadFinished(self):
image = QtGui.QImage(self.viewportSize(),
QtGui.QImage.Format_ARGB32)
painter = QtGui.QPainter(image)
self.mainFrame().render(painter)
painter.end()
output = QtCore.QFile()
output.open(1, QtCore.QIODevice.WriteOnly)
image.save(output, 'PNG')
sys.exit(0)
if __name__ == '__main__':
import sys, signal
app = QtGui.QApplication(sys.argv)
signal.signal(signal.SIGINT, signal.SIG_DFL)
page = WebPage()
page.capture(sys.argv[1])
sys.exit(app.exec_())
could you resize the image to the size to your screen
import gtk, pygtk
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(frame.contentsSize())
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
window = gtk.Window()
s = window.get_screen()
image = image.resize((s.get_width(),s.get_height()), Image.ANTIALIAS)
print 'saving', output_file
image.save(output_file)
Related
I wirte a pyqt5 code to show picture or file from our local computer. After this, I have no idea to save the picture or file showed in window on other path. The only way I figured out is copy them. Any suggestion or tips would be appreciated.
Here is the code to show picture or file:
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
class fileDialogdemo(QWidget):
def __init__(self,parent=None):
super(fileDialogdemo, self).__init__(parent)
layout=QVBoxLayout()
self.btn=QPushButton("Load Picture")
self.btn.clicked.connect(self.getimage)
layout.addWidget(self.btn)
self.le=QLabel('')
layout.addWidget(self.le)
self.btn1=QPushButton('Load text file')
self.btn1.clicked.connect(self.getFiles)
layout.addWidget(self.btn1)
self.contents=QTextEdit()
layout.addWidget(self.contents)
self.setLayout(layout)
self.setWindowTitle('File Dialog ')
def getimage(self):
image_file,_=QFileDialog.getOpenFileName(self,'Open file','C:\\','Image files (*.jpg *.gif *.png *.jpeg)')
self.le.setPixmap(QPixmap(image_file))
def getFiles(self):
dig=QFileDialog()
dig.setFileMode(QFileDialog.AnyFile)
dig.setFilter(QDir.Files)
if dig.exec_():
filenames=dig.selectedFiles()
f=open(filenames[0],'r')
with f:
data=f.read()
self.contents.setText(data)
if __name__ == '__main__':
app=QApplication(sys.argv)
ex=fileDialogdemo()
ex.show()
sys.exit(app.exec_())
Here is the code to save I am trying, but there is something wrong:
def contextMenuEvent(self, event):
cmenu = QMenu(self)
saveAct = cmenu.addAction("Save as")
action = cmenu.exec_(self.mapToGlobal(event.pos()))
if action == saveAct:
filename = QFileDialog.getSaveFileName(self)
shutil.copyfile(self.image_file, filename)
Except for the copy idea, is there any better way ?
Do not copy the image as nobody guarantees that after loading the image the original image is in the same location or still exists. Instead, retrieve the QPixmap from the QLabel and save it:
def contextMenuEvent(self, event):
cmenu = QMenu(self)
saveAct = cmenu.addAction("Save as")
action = cmenu.exec_(self.mapToGlobal(event.pos()))
if action == saveAct:
filename, _ = QFileDialog.getSaveFileName(self)
pixmap = self.le.pixmap()
if pixmap is not None and filename:
pixmap.save(filename)
I am newbie, I want to create a jpg or png image of a web page whith PyQt4, so that you understand me for example something like this.
This script works perfect:
import sys
import time
import os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
width= int(530)
height=int(1060)
class Screenshot(QWebView):
def __init__(self):
self.app = QApplication(sys.argv)
QWebView.__init__(self)
self._loaded = False
self.loadFinished.connect(self._loadFinished)
def capture(self, url, output_file):
self.load(QUrl(url))
self.wait_load()
time.sleep(2)
# set to webpage size
frame = self.page().mainFrame()
self.page().setViewportSize(QSize(width, height,))
# render image
image = QImage(self.page().viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
frame.render(painter)
painter.end()
print ('Guardada'), output_file
image.save(output_file)
def wait_load(self, delay=0):
# process app events until page loaded
while not self._loaded:
self.app.processEvents()
time.sleep(delay)
self._loaded = False
def _loadFinished(self, result):
self._loaded = True
filename = "capture"
s = Screenshot()
s.capture('http://www.filmaffinity.com/es/main.html', filename+(time.strftime("-%H-%M-%S"))+".png")
This captures me from above the page 530 px width x
1060px height, that's fine, but I want the capture start from below the web and more extended, I mean a certain area of the web,
such as this image
How can I modify the script?
Thanks
I'm integrating openCV 3.0 with Qt5 in Python 3.4.3 using pyqt5. I've been trying to build an app to process videos from files, but ran into some trouble with pyqt. Specifically, I will be loading videos through a file dialog multiple times and these videos will not be the same size. Therefore, I want the main window in my app to wrap/expand to the size of the video being played.
Below is a simplified version of my code with the 3 core classes for showing the video. One for the Main Window, one for a frame viewer widget to show each video frame in the GUI, and one for a video processor to read and process the video through opencv, transform it to a QImage then send it to the viewer.
class videoProcessor(QtCore.QObject):
filename = None
cap = None
videoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self):
super().__init__()
self.filename = "test.mp4"
#QtCore.pyqtSlot()
def runVideoProcessor(self):
self.cap = cv2.VideoCapture(self.filename)
while self.cap.isOpened():
ret, frame = self.cap.read()
if ret:
outimg = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
imgh, imgw, bytesPerComponent = outimg.shape
bytesPerLine = bytesPerComponent * imgw;
qtimg = QtGui.QImage(outimg.data,imgw,imgh,bytesPerLine,QtGui.QImage.Format_RGB888)
self.videoSignal.emit(qtimg)
else:
break
self.cap.release()
class frameViewer(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.image = QtGui.QImage()
self.imageAvailable = False
def paintEvent(self,event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0,self.image)
self.image = QtGui.QImage()
painter.end()
#QtCore.pyqtSlot(QtGui.QImage)
def setFrame(self,image):
self.image = image
self.setFixedSize(self.image.size())
self.repaint()
class mainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.fview = frameViewer()
self.vproc = videoProcessor()
self.vproc.videoSignal.connect(self.fview.setFrame)
self.startButton = QtWidgets.QPushButton("Start")
self.startButton.clicked.connect(self.vproc.runVideoProcessor)
self.mainLayout = QtWidgets.QVBoxLayout()
self.mainLayout.addWidget(self.fview)
self.mainLayout.addWidget(self.startButton)
self.mainWidget = QtWidgets.QWidget()
self.mainWidget.setLayout(self.mainLayout)
self.mainWidget.setSizePolicy(QtWidgets.QSizePolicy.Expanding,QtWidgets.QSizePolicy.Expanding)
self.setCentralWidget(self.mainWidget)
self.statusBar().showMessage('Ready')
self.setGeometry(50, 50, 300, 300)
self.setWindowTitle('OpenCV PyQt Test')
self.show()
if __name__=='__main__':
app = QtWidgets.QApplication(sys.argv)
mw = mainWindow()
sys.exit(app.exec_())
So far, the program can run videos but there are two main problems:
The window does not adjust to the size of the video frame until the end of the video. However, any repetition of the same video play will be in the correct size.
If I don't set self.image=QtGui.QImage() in paintEvent after drawing the image, the program crashes. However, if I put that line in, at the end of the video, the window will go blank because an empty QImage will be drawn in the last frame's place whenever the window is updated.
Any ideas on how to solve these issues? Thank you.
I'm still quite a newbie with Python and PyQt so I have a really basic question. I have some text and images in a parent window inside a QTextEdit widget and I'm trying to copy all the content to a child window's QTextEdit. But for some reason I can't get it to copy the image - only the text is copied not the image. Here's a snippet of the code that's giving me trouble:
self.textEdit.selectAll()
data = self.textEdit.createMimeDataFromSelection()
self.child_window.textEdit.insertFromMimeData(data) # doesn't work with images
Here's is the small program that I'm trying to run:
import sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class MyWindow(QtGui.QWidget):
def __init__(self,parent=None):
super(MyWindow,self).__init__(parent)
self.textEdit = QtGui.QTextEdit(self)
self.textEdit.setText("Hello World\n")
self.pushButton = QtGui.QPushButton(self)
self.pushButton.setText("Copy and paste to Child Window")
self.pushButton.clicked.connect(self.click_copy_data)
self.pushButton2 = QtGui.QPushButton(self)
self.pushButton2.setText("Insert Image")
self.pushButton2.clicked.connect(self.click_file_dialog)
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.textEdit)
self.layoutVertical.addWidget(self.pushButton2)
self.layoutVertical.addWidget(self.pushButton)
self.setGeometry(150, 150,640, 480)
self.child_window = CustomWindow(self)
self.child_window.show()
def click_copy_data(self):
self.textEdit.selectAll()
data = self.textEdit.createMimeDataFromSelection()
self.child_window.textEdit.insertFromMimeData(data)
def click_file_dialog(self):
filePath = QtGui.QFileDialog.getOpenFileName(
self,
"Select an image",
".",
"Image Files(*.png *.gif *.jpg *jpeg *.bmp)"
)
if not filePath.isEmpty():
self.insertImage(filePath)
def insertImage(self,filePath):
imageUri = QtCore.QUrl(QtCore.QString("file://{0}".format(filePath)))
image = QtGui.QImage(QtGui.QImageReader(filePath).read())
self.textEdit.document().addResource(
QtGui.QTextDocument.ImageResource,
imageUri,
QtCore.QVariant(image)
)
imageFormat = QtGui.QTextImageFormat()
imageFormat.setWidth(image.width())
imageFormat.setHeight(image.height())
imageFormat.setName(imageUri.toString())
textCursor = self.textEdit.textCursor()
textCursor.movePosition(
QtGui.QTextCursor.End,
QtGui.QTextCursor.MoveAnchor
)
textCursor.insertImage(imageFormat)
# This will hide the cursor
blankCursor = QtGui.QCursor(QtCore.Qt.BlankCursor)
self.textEdit.setCursor(blankCursor)
class CustomWindow(QtGui.QDialog):
def __init__(self,parent=None):
super(CustomWindow,self).__init__(parent)
self.textEdit = QtGui.QTextEdit(self)
self.layoutVertical = QtGui.QVBoxLayout(self)
self.layoutVertical.addWidget(self.textEdit)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
app.setApplicationName('MyWindow')
main = MyWindow()
main.show()
sys.exit(app.exec_())
The way the program works is that you have some text inside the main window and then you insert an image. Then you click "Copy and paste to Child Window" button and it should paste all the contents to the child, including the image - but that doesn't work that way it's supposed to - the text is copied but I get a little file icon where the image should be.
I would appreciate your help on this.
Paul
QTextEdit doesn't decode image MIME types by default, so just subclass it to add support, you'll need to reimplement canInsertFromMimeData and insertFromMimeData, also try QTextBrowser instead. Just add this to your script:
class MyTextBrowser(QtGui.QTextBrowser):
def __init__(self, parent=None):
super(MyTextBrowser, self).__init__(parent)
self.setReadOnly(False)
def canInsertFromMimeData(self, source):
if source.hasImage():
return True
else:
return super(MyTextBrowser, self).canInsertFromMimeData(source)
def insertFromMimeData(self, source):
if source.hasImage():
image = QtCore.QVariant(source.imageData())
document = self.document()
document.addResource(
QtGui.QTextDocument.ImageResource,
QtCore.QUrl("image"),
image
)
cursor = self.textCursor()
cursor.insertImage("image")
super(MyTextBrowser, self).insertFromMimeData(source)
And change self.textEdit = QtGui.QTextEdit(self) into self.textEdit = MyTextBrowser(self) on both widgets.
This is the solution I ended using as suggested by X.Jacobs.
html = parent_textEdit.toHtml()
child_textEdit.setHtml(html)
I was making things more complicated. When I realized that QTextEdit keeps track of where the image is stored as a url inside the html generated by toHtml() then it all made sense.
I am trying to display live images from my 1394 camera.
Currently my code is able to obtain images in a loop from the camera and I was looking for any quick GUI that will update dynamically (as a separate thread). I can do this in PyQt maybe using QThreads but is there any recommendation or faster way of doing this??
Here's my code
#Loop capturing frames from camera
for frame in range(1,500):
print 'frame:',frame
TIME.sleep(1) #capture frame every second
image_binary = pycam.cam.RetrieveBuffer()
#convert to PIL Image
pilimg = PIL.Image.frombuffer("L",(cimg.GetCols(),cimg.GetRows()),image_binary,'raw', "RGBA", 0, 1)
# At this point I want to send my image data to a GUI window and display it
Thank you.
Here's wxPython code that will do it...
import wx
from PIL import Image
SIZE = (640, 480)
def get_image():
# Put your code here to return a PIL image from the camera.
return Image.new('L', SIZE)
def pil_to_wx(image):
width, height = image.size
buffer = image.convert('RGB').tostring()
bitmap = wx.BitmapFromBuffer(width, height, buffer)
return bitmap
class Panel(wx.Panel):
def __init__(self, parent):
super(Panel, self).__init__(parent, -1)
self.SetSize(SIZE)
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_PAINT, self.on_paint)
self.update()
def update(self):
self.Refresh()
self.Update()
wx.CallLater(15, self.update)
def create_bitmap(self):
image = get_image()
bitmap = pil_to_wx(image)
return bitmap
def on_paint(self, event):
bitmap = self.create_bitmap()
dc = wx.AutoBufferedPaintDC(self)
dc.DrawBitmap(bitmap, 0, 0)
class Frame(wx.Frame):
def __init__(self):
style = wx.DEFAULT_FRAME_STYLE & ~wx.RESIZE_BORDER & ~wx.MAXIMIZE_BOX
super(Frame, self).__init__(None, -1, 'Camera Viewer', style=style)
panel = Panel(self)
self.Fit()
def main():
app = wx.PySimpleApp()
frame = Frame()
frame.Center()
frame.Show()
app.MainLoop()
if __name__ == '__main__':
main()
I thought I'd try PyQt4 imageviewer.py example and it worked for me.
Thanks for all your help guys.
Here's my modified code:
from PyQt4 import QtCore, QtGui
class CameraViewer(QtGui.QMainWindow):
def __init__(self):
super(CameraViewer, self).__init__()
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
self.imageLabel.setScaledContents(True)
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidget(self.imageLabel)
self.setCentralWidget(self.scrollArea)
self.setWindowTitle("Image Viewer")
self.resize(640, 480)
timer = QtCore.QTimer(self)
timer.timeout.connect(self.open)
timer.start(33) #30 Hz
def open(self):
#get data and display
pilimg = getMyPILImageDatFromCamera()
image = PILQT.ImageQt.ImageQt(pilimg)
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer","Cannot load %s." % fileName)
return
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(image))
self.imageLabel.adjustSize()
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
CameraViewer = CameraViewer()
CameraViewer.show()
sys.exit(app.exec_())
I recommend using Tkinter since it's already part of python. I've never used PIL but a quick google shows it's easy to use PIL images in Tk widgets (via the pil.ImageTk.PhotoImage() method).
If you already have a Tkinter widget set up to display images (a Label widget works fine) all you need to do is arrange for the image to be updated every second or so. You can do this by using the after command of tkinter.
Here's an example; I don't have PIL so it uses a static image but it illustrates how to use the event loop to fetch images every second:
import Tkinter
class App(Tkinter.Tk):
def __init__(self):
Tkinter.Tk.__init__(self)
self.label = Tkinter.Label(text="your image here", compound="top")
self.label.pack(side="top", padx=8, pady=8)
self.iteration=0
self.UpdateImage(1000)
def UpdateImage(self, delay, event=None):
# this is merely so the display changes even though the image doesn't
self.iteration += 1
self.image = self.get_image()
self.label.configure(image=self.image, text="Iteration %s" % self.iteration)
# reschedule to run again in 1 second
self.after(delay, self.UpdateImage, 1000)
def get_image(self):
# this is where you get your image and convert it to
# a Tk PhotoImage. For demonstration purposes I'll
# just return a static image
data = '''
R0lGODlhIAAgALMAAAAAAAAAgHCAkC6LV76+vvXeswD/ANzc3DLNMubm+v/6zS9PT6Ai8P8A////
/////yH5BAEAAAkALAAAAAAgACAAAAS00MlJq7046803AF3ofAYYfh8GIEvpoUZcmtOKAO5rLMva
0rYVKqX5IEq3XDAZo1GGiOhw5rtJc09cVGo7orYwYtYo3d4+DBxJWuSCAQ30+vNTGcxnOIARj3eT
YhJDQ3woDGl7foNiKBV7aYeEkHEignKFkk4ciYaImJqbkZ+PjZUjaJOElKanqJyRrJyZgSKkokOs
NYa2q7mcirC5I5FofsK6hcHHgsSgx4a9yzXK0rrV19gRADs=
'''
image = Tkinter.PhotoImage(data=data)
return image
if __name__ == "__main__":
app=App()
app.mainloop()
Since the good answers are pretty large, I feel like I should post a library I built specifically for this:
from cvpubsubs.webcam_pub import VideoHandlerThread
import numpy as np
image_np = numpy.array(pilImage)
def update_function(frame, cam_id):
frame[...] = image_np[...]
VideoHandlerThread(video_source=image_np, callbacks=update_function).display()
Actually, that's if image_binary is a new numpy array every time. If it's assigned to the same location, then just this should work:
from cvpubsubs.webcam_pub import VideoHandlerThread
VideoHandlerThread(video_source=image_np).display()
I know OpenCV barely counts as a GUI, but this is quick code wise.
Try to take a look at gstreamer. This is the first result google gave me searching for "gstreamer 1394" and this one is the first for "gstreamer pyqt".