I've been trying to create an interactive OpenCV image viewer where I'll be able to view the image immediately after a manipulation. Like say, I'm applying a binary thresholding operation on an image and changing the threshold value from PyQt slider. Now, I want to see each thresholded image in the image viewer.
I've created a very basic program for this purpose using python OpenCV and PyQT5 lib. But, the image is not being updated in the QLabel.
Below is my code:
import sys
import cv2
import numpy as np
import imutils
from PyQt5 import QtCore
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QVBoxLayout, QLCDNumber, QSlider, QLabel, QCheckBox
from PyQt5.QtGui import QPixmap, QImage
class MyWindow(QWidget):
def __init__(self):
super().__init__()
self.imglabel = QLabel(self)
self.imglabel.setFixedSize(1200, 900)
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
ori_img = imutils.resize(ori_img, height=960)
self.gray_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
self.gray_img_c = ori_img
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
# lcd = QLCDNumber(self)
hbox1 = QHBoxLayout()
cb_thresh = QCheckBox('thresh', self)
cb_thresh.setChecked(False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
hbox1.addWidget(cb_thresh)
thresh_slider = QSlider(Qt.Horizontal, self)
thresh_slider.setFocusPolicy(Qt.StrongFocus)
thresh_slider.setTickPosition(QSlider.TicksBothSides)
thresh_slider.setTickInterval(1)
thresh_slider.setSingleStep(1)
thresh_slider.setPageStep(1)
thresh_slider.setMinimum(1)
thresh_slider.setMaximum(127)
thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QVBoxLayout()
vbox.addLayout(hbox1)
vbox.addWidget(thresh_slider)
vbox.addWidget(self.imglabel)
self.setLayout(vbox)
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.updateImage()
self.show()
def changeTitleThresh(self, state):
# print("thresh checkbox: ", state, Qt.Checked)
if state == Qt.Checked:
self.thresh = True
else:
self.thresh = False
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
print("ksize: ", ksize)
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
self.gray_img = cv2.threshold(self.gray_img, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)[1]
self.gray_img_c = cv2.cvtColor(self.gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage()
def updateImage(self):
height, width, channel = self.gray_img_c.shape
bytesPerLine = 3 * width
qImg = QImage(self.gray_img_c.data, width, height, bytesPerLine, QImage.Format_RGB888)
pixMap = QPixmap.fromImage(qImg)
pixMap = pixMap.scaled(700, 500, Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
self.imglabel.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
I've tried every solution found through google search. But, could not fix it.
Any help or hint will be much appreciated.
The original image must remain intact but you are applying the filter and modifying it every time, in the following example I show the correct way to do it
import sys
import cv2
import imutils
from PyQt5 import QtCore, QtGui, QtWidgets
class MyWindow(QtWidgets.QWidget):
def __init__(self):
super().__init__()
ori_img = cv2.imread("../resources/omr-1-ans-ori.png", cv2.IMREAD_COLOR)
self.original_image_color = imutils.resize(ori_img, height=960)
self.original_image_gray = cv2.cvtColor(self.original_image_color, cv2.COLOR_BGR2GRAY)
self.thresh = False
self.thresh_karnel_size = 11
self.init_ui()
def init_ui(self):
self.imglabel = QtWidgets.QLabel(alignment=QtCore.Qt.AlignCenter)
self.imglabel.setFixedSize(1200, 900)
cb_thresh = QtWidgets.QCheckBox('thresh', checked=False)
cb_thresh.stateChanged.connect(self.changeTitleThresh)
self.thresh_slider = QtWidgets.QSlider(QtCore.Qt.Horizontal,
focusPolicy=QtCore.Qt.StrongFocus,
tickPosition=QtWidgets.QSlider.TicksBothSides,
tickInterval=1,
singleStep=1,
pageStep=1,
minimum=1,
maximum=127)
self.thresh_slider.valueChanged[int].connect(self.threshSliderChangeValue)
vbox = QtWidgets.QVBoxLayout(self)
vbox.addWidget(cb_thresh)
vbox.addWidget(self.thresh_slider)
vbox.addWidget(self.imglabel)
self.threshSliderChangeValue(self.thresh_slider.value())
self.setGeometry(50, 50, 1200, 768)
self.setWindowTitle('Learning PyQT5')
self.show()
#QtCore.pyqtSlot(int)
def changeTitleThresh(self, state):
self.thresh = state == QtCore.Qt.Checked
self.threshSliderChangeValue(self.thresh_slider.value())
#QtCore.pyqtSlot(int)
def threshSliderChangeValue(self, value):
ksize = (value * 2) + 1
if ksize > 1 and ksize % 2 != 0 and self.thresh:
self.thresh_karnel_size = ksize
_, gray_img = cv2.threshold(self.original_image_gray, self.thresh_karnel_size, 255, cv2.THRESH_BINARY)
gray_img_c = cv2.cvtColor(gray_img.copy(), cv2.COLOR_GRAY2BGR)
self.updateImage(gray_img_c)
else:
self.updateImage(self.original_image_color)
def updateImage(self, image):
height, width, channel = image.shape
bytesPerLine = 3 * width
qImg = QtGui.QImage(image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
pixMap = QtGui.QPixmap.fromImage(qImg).scaled(700, 500, QtCore.Qt.KeepAspectRatio)
self.imglabel.setPixmap(pixMap)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
ex = MyWindow()
sys.exit(app.exec_())
Related
I'm using OpenCV for some image processing and want to create a transparent overlay on my screen using PyQt widgets. Below I have a basic example of sending a basic frame from opencv to PyQt through a signal/slot and displaying it on the window. The issue is I can't get the transparent background using this method, instead it is just a black background:
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout
from PyQt5.QtGui import QPixmap
import sys
import cv2
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QThread
import numpy as np
import os
class VideoThread(QThread):
change_pixmap_signal = pyqtSignal(np.ndarray)
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255), 2)
while True
self.change_pixmap_signal.emit(img)
class App(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("Qt live label demo")
self.disply_width = 1920
self.display_height = 1080
# create the label that holds the image
self.image_label = QLabel(self)
self.image_label.resize(self.disply_width, self.display_height)
# create a text label
self.textLabel = QLabel('Webcam')
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setStyleSheet("background-color:transparent;")
# create a vertical box layout and add the two labels
vbox = QVBoxLayout()
vbox.addWidget(self.image_label)
vbox.addWidget(self.textLabel)
# set the vbox layout as the widgets layout
self.setLayout(vbox)
# create the video capture thread
self.thread = VideoThread()
# connect its signal to the update_image slot
self.thread.change_pixmap_signal.connect(self.update_image)
# start the thread
self.thread.start()
#pyqtSlot(np.ndarray)
def update_image(self, cv_img):
"""Updates the image_label with a new opencv image"""
qt_img = self.convert_cv_qt(cv_img)
self.image_label.setPixmap(qt_img)
def convert_cv_qt(self, cv_img):
"""Convert from an opencv image to QPixmap"""
rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
h, w, ch = rgb_image.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio)
return QPixmap.fromImage(p)
if __name__ == "__main__":
app = QApplication(sys.argv)
a = App()
a.show()
sys.exit(app.exec_())
If it isn't possible to send a frame with an alpha channel to PyQt, I was wondering if it's possible to just send the rectangle 4 point location and use the PyQt paint to draw a rectangle on screen? I think this would require a widget.update() but I'm not sure where to invoke that.
If you are going to use transparencies then the colors and the image must be 4-channel, in your case you are passing a 3-channel color to the cv2.rectangle method and then converting you use a 3-channel format in QImage. On the other hand cv2.rectangle returns the drawn image, it does not modify the input array.
def run(self):
img = np.zeros((500, 500, 4), dtype=np.uint8)
output = cv2.rectangle(img, (0, 0), (200, 200), (0, 0, 255, 255), 2)
while True:
self.change_pixmap_signal.emit(output)
QThread.msleep(1)
def convert_cv_qt(self, cv_img):
h, w, ch = cv_img.shape
bytes_per_line = ch * w
convert_to_Qt_format = QtGui.QImage(
cv_img.data, w, h, bytes_per_line, QtGui.QImage.Format_RGBA8888
)
p = convert_to_Qt_format.scaled(
self.disply_width, self.display_height, Qt.KeepAspectRatio
)
return QPixmap.fromImage(p)
I am trying to make a program that can pixelate images. When the image is uploaded, the image is suppose to go through the color_quantization function then the pixelate function. But when I run the program, the image only goes through the pixelate function.
from PyQt5 import QtWidgets as Qtw, QtGui, QtCore
from PyQt5.QtWidgets import QFileDialog
from pixel import Ui_Form
from PyQt5.QtGui import QImage
import cv2
import numpy as np
class Worker(QtCore.QObject):
hash = QtCore.pyqtSignal()
def color_quantization(self, img, k=3):
data = np.float32(img).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 0.001)
ret, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
center = np.uint8(center)
result = center[label.flatten()]
result = result.reshpe(img.shape)
for x in result:
self.hash.emit(x)
return x
class Pixel(Qtw.QWidget):
hash_requested = QtCore.pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.upload.clicked.connect(self.upload_image)
self.worker = Worker()
self.worker_thread = QtCore.QThread()
self.worker.hash.connect(self.upload_image)
self.hash_requested.connect(self.worker.color_quantization)
self.worker.moveToThread(self.worker_thread)
self.worker_thread.start()
def upload_image(self):
filename = QFileDialog.getOpenFileName(filter="Image(*.*)")[0]
img = cv2.imread(filename)
self.pixelate(img)
def pixelate(self, image):
height, width = image.shape[:2]
w, h = (128, 128)
temp = cv2.resize(image, (w, h), interpolation=cv2.INTER_LINEAR)
output = cv2.resize(temp, (width, height), interpolation=cv2.INTER_NEAREST)
frame = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
image = QImage(frame, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
self.ui.label.setPixmap(QtGui.QPixmap.fromImage(image))
if __name__ == '__main__':
app = Qtw.QApplication([])
win = Pixel()
win.show()
app.exec_()
I'd like to use the wheelEvent to resize an image and place a QGraphicPixmap into a QGraphicsScene.
Before adding the original image, it is resized to around 1/3rd its original size. In the wheelEvent, I'm calling a function that will resize the original image and create a QImage to set the QGraphicsPixmap.
After adding the resized pixmap to the scene, the pixels that were originally under the cursor before the scale have shifted. I'm not sure which positions I need to be mapping to/from the scene to achieve this.
I've tried scaling the graphicsPixmap, scaling and translating the graphicsPixmap, scaling the view and translating the graphicsPixmap/setting an offset.
I clearly don't something about what's happening but I'm not sure what that is..
The WheelEvent below works perfectly until maybe_resize is called.
Depending on the size of the current image in the viewer the maybe_resize method will either resize the current ndarray image, create a new qimage and set a new pixmap in the graphicPixmap, or it exits the method without resizing.
If you run the code as is, the pixmap is in the same place under the cursor, but if you uncomment maybe_resize this is no longer the case.
from PyQt5.QtCore import QRectF, QSize, Qt, pyqtSignal
import cv2
import numpy as np
from PyQt5.QtCore import QRectF, QSize, Qt, pyqtSignal
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import (QApplication,
QFrame,
QGraphicsPixmapItem,
QGraphicsScene,
QGraphicsView,
QMainWindow,
QSizePolicy)
class GraphicsView(QGraphicsView):
def __init__(self, parent):
super(GraphicsView, self).__init__(parent)
self.pixmap = QPixmap()
self._zoom_level = 0
self._scene = Scene(self)
self.setScene(self._scene)
self.gpm = QGraphicsPixmapItem()
self._scene.addItem(self.gpm)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setFrameShape(QFrame.NoFrame)
self.has_image = False
def maybe_resize(self, factor):
self.resize_requested(factor)
def read_image(self, path):
self.base_image = cv2.imread(path, -1)
self._original_res = self.base_image.shape
h, w = self.base_image.shape[0], self.base_image.shape[1]
self.resized_image = cv2.resize(self.base_image, (w // 4, h // 4))
self.has_image = True
self.set_image(self.resized_image)
return self.resized_image
def resize_requested(self, factor):
factor = max(1. * (self._zoom_level * factor), 1)
h = int(self.resized_image.shape[0] * factor)
w = int(self.resized_image.shape[1] * factor)
src = cv2.resize(self.base_image, (w, h))
dst = np.ndarray(src.shape, src.dtype)
dst[:, :, :] = src
self.set_image(dst)
def wheelEvent(self, event):
factor = 1.1
if event.angleDelta().y() < 0:
factor = 0.9
self._zoom_level-=1
else:
self._zoom_level+=1
view_pos = event.pos()
scene_pos = self.mapToScene(view_pos)
self.centerOn(scene_pos)
self.scale(factor, factor)
delta = self.mapToScene(view_pos) - self.mapToScene(self.viewport().rect().center())
self.centerOn(scene_pos - delta)
# self.maybe_resize(factor)
def set_image(self, img):
if not self.has_image:
return
shape = img.shape
w = shape[1]
h = shape[0]
self._image = img
q_img_format = QImage.Format_RGB888
try:
bands = shape[2]
except IndexError:
bands = 1
q_img = QImage(img, w, h, w * bands, q_img_format)
self.pixmap = self.pixmap.fromImage(q_img)
self.setSceneRect(QRectF(self.pixmap.rect()))
self.gpm.setPixmap(self.pixmap)
class Scene(QGraphicsScene):
zoom_changed = pyqtSignal(float)
def __init__(self, parent=None):
super(Scene, self).__init__(parent)
class Window(QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.gv = GraphicsView(self)
self.setCentralWidget(self.gv)
def load_image(self, path):
self.gv.read_image(path)
def sizeHint(self):
return QSize(800, 800)
if __name__ == "__main__":
app = QApplication([])
w = Window()
w.load_image('test.jpg')
w.show()
app.exit(app.exec_())
#Importing necessary libraries, mainly the OpenCV, and PyQt libraries
import cv2
import numpy as np
import sys
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5.QtCore import pyqtSignal
class ShowVideo(QtCore.QObject):
#initiating the built in camera
camera_port = -1
camera = cv2.VideoCapture(camera_port)
VideoSignal = QtCore.pyqtSignal(QtGui.QImage)
def __init__(self, parent = None):
super(ShowVideo, self).__init__(parent)
#QtCore.pyqtSlot()
def startVideo(self):
run_video = True
while run_video:
ret, image = self.camera.read()
color_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
height, width, _ = color_swapped_image.shape
qt_image = QtGui.QImage(color_swapped_image.data,
width,
height,
color_swapped_image.strides[0],
QtGui.QImage.Format_RGB888)
pixmap = QtGui.QPixmap(qt_image)
qt_image = pixmap.scaled(640, 480, QtCore.Qt.KeepAspectRatio)
qt_image = QtGui.QImage(qt_image)
self.VideoSignal.emit(qt_image)
#QtCore.pyqtSlot()
def makeScreenshot(self):
#cv2.imwrite("test.jpg", self.image)
print("Screenshot saved")
#self.qt_image.save('test.jpg')
class ImageViewer(QtWidgets.QWidget):
def __init__(self, parent = None):
super(ImageViewer, self).__init__(parent)
self.image = QtGui.QImage()
self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawImage(0,0, self.image)
self.image = QtGui.QImage()
def initUI(self):
self.setWindowTitle('Test')
#QtCore.pyqtSlot(QtGui.QImage)
def setImage(self, image):
if image.isNull():
print("viewer dropped frame!")
self.image = image
if image.size() != self.size():
self.setFixedSize(image.size())
self.update()
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
thread = QtCore.QThread()
thread.start()
vid = ShowVideo()
vid.moveToThread(thread)
image_viewer = ImageViewer()
#image_viewer.resize(200,400)
vid.VideoSignal.connect(image_viewer.setImage)
#Button to start the videocapture:
push_button = QtWidgets.QPushButton('Start')
push_button.clicked.connect(vid.startVideo)
push_button2 = QtWidgets.QPushButton('Screenshot')
push_button2.clicked.connect(vid.makeScreenshot)
vertical_layout = QtWidgets.QVBoxLayout()
vertical_layout.addWidget(image_viewer)
vertical_layout.addWidget(push_button)
vertical_layout.addWidget(push_button2)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(vertical_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
main_window.resize(640,480)
main_window.show()
sys.exit(app.exec_())
This code showing video from camera in endless loop using OpenCV and PyQt5. But how to make screenshot and don't stop showing video. I think it needs to be stop loop for a little, make screnshot, and run loop again.
You can use cv2.waitKey() for the same, as shown below:
while run_video:
ret, image = self.camera.read()
if(cv2.waitKey(10) & 0xFF == ord('s')):
cv2.imwrite("screenshot.jpg",image)
(I'm guessing that by the term "screenshot", you mean the camera frame, and not the image of the entire screen.)
When you press 's' on the keyboard, it'll perform imwrite.
Note that if you wish to save multiple images, you'd have to vary the filename. The above code will overwrite screenshot.jpg to save only the latest frame.
I'm currently using PyQt4 and qrcode4.0.4.
from PyQt4 import QtGui, QtCore
from PIL.ImageQt import ImageQt
import qrcode
class QRLabel(QtGui.QLabel):
def __init__(self, text=""):
super(QRLabel, self).__init__()
self.setCode(text)
def setCode(self, text=""):
self.text = text
qrImg = qrcode.make(text)
imgQt = ImageQt(qrImg.convert("RGB")) # keep a reference!
pixm = QtGui.QPixmap.fromImage(imgQt)
self.setPixmap(pixm.scaled(self.size(),QtCore.Qt.KeepAspectRatio))
As you can see, there are several hurdles to be passed before you get the image on your screen. The QR code starts as a RGBA PIL Image, it is converted to RGB, then to PIL ImageQt object, then to a QPixmap, which is then placed on a QLabel with a scaling fix.
If you don't explicitly store the imgQt reference, you get rubbish when you load the widget.
My question: is there anything I could do to improve this, as it seems there are so many conversions involved.
From the qrcode docs, it appears you can create your own image_factory, which might allow you to streamline the process.
You just need to subclass qrcode.image.base.BaseImage and reimplement the new_image, drawrect and save methods. This subclass could wrap a QImage and therefore eliminate the need for the PIL conversion step.
UPDATE:
Here's a demo that eliminates the PIL dependency (which is just as well, because I found PIL crashes with certain inputs):
from PyQt4 import QtGui, QtCore
import qrcode
class Image(qrcode.image.base.BaseImage):
def __init__(self, border, width, box_size):
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(
size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size, self.box_size,
QtCore.Qt.black)
def save(self, stream, kind=None):
pass
class Window(QtGui.QWidget):
def __init__(self):
QtGui.QWidget.__init__(self)
self.label = QtGui.QLabel(self)
self.edit = QtGui.QLineEdit(self)
self.edit.returnPressed.connect(self.handleTextEntered)
layout = QtGui.QVBoxLayout(self)
layout.addWidget(self.label)
layout.addWidget(self.edit)
def handleTextEntered(self):
text = unicode(self.edit.text())
self.label.setPixmap(
qrcode.make(text, image_factory=Image).pixmap())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
window = Window()
window.setGeometry(500, 300, 200, 200)
window.show()
sys.exit(app.exec_())
You can also use PNG as an intermediate format and store it in memory using StringIO.
import qrcode
import StringIO
def set_qr_label(label, text):
"""
set qrcode image on QLabel
#param label: QLabel
#param text: text for the QR code
"""
buf = StringIO.StringIO()
img = qrcode.make(text)
img.save(buf, "PNG")
label.setText("")
qt_pixmap = QtGui.QPixmap()
qt_pixmap.loadFromData(buf.getvalue(), "PNG")
label.setPixmap(qt_pixmap)
Great answer by mgmax but Python2 only. For Python3 use:
import qrcode
from io import BytesIO
def set_qr_label(label, text):
"""
set qrcode image on QLabel
#param label: QLabel
#param text: text for the QR code
"""
buf = BytesIO()
img = qrcode.make(text)
img.save(buf, "PNG")
label.setText("")
qt_pixmap = QtGui.QPixmap()
qt_pixmap.loadFromData(buf.getvalue(), "PNG")
label.setPixmap(qt_pixmap)
This is ekhumoro answer working in pyqt5 in python3
hope it helps someone down the line.
from PyQt5 import QtWidgets, QtCore, QtGui
import qrcode
class Image(qrcode.image.base.BaseImage):
def __init__(self, border, width, box_size):
self.border = border
self.width = width
self.box_size = box_size
size = (width + border * 2) * box_size
self._image = QtGui.QImage(
size, size, QtGui.QImage.Format_RGB16)
self._image.fill(QtCore.Qt.white)
def pixmap(self):
return QtGui.QPixmap.fromImage(self._image)
def drawrect(self, row, col):
painter = QtGui.QPainter(self._image)
painter.fillRect(
(col + self.border) * self.box_size,
(row + self.border) * self.box_size,
self.box_size, self.box_size,
QtCore.Qt.black)
def save(self, stream, kind=None):
pass
class Window(QtWidgets.QWidget):
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.label = QtWidgets.QLabel(self)
self.edit = QtWidgets.QLineEdit(self)
self.edit.returnPressed.connect(self.handleTextEntered)
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.label)
layout.addWidget(self.edit)
def handleTextEntered(self):
text = self.edit.text()#text = unicode(self.edit.text())
self.label.setPixmap(
qrcode.make(text, image_factory=Image).pixmap())
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = Window()
window.setGeometry(500, 300, 200, 200)
window.show()
sys.exit(app.exec_())