Add glumpy widget to PyQt5 GUI - python

I have a simple GUI onto which I am plotting images. I create Widgets for text outputs and arrays plots, and I would like to add one for 3D visualtion using glumpy, say to show this example from the glumpy documentation.
What I would like is an "extra" slot on which to have the glumpy output:
I saw for example in this GitHub thread that people were referring to PyQt5 and glumpy integration, but I only see snippets of code and noting that works as a standalone example.
Also, it seems glumpy is already using PyQt5 in the backend (here), but I don't understand this well enough to know if and how I can access it a posteriori?
This is my MWE:
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import sys
width = 1000
height = 500
class layout():
def setup(self, window):
self.window = window
self.window.resize(width, height)
self.centralwidget = QtGui.QWidget(self.window)
self.horizontallayout = QtGui.QHBoxLayout(self.centralwidget)
self.window.setCentralWidget(self.centralwidget)
self.dialogue = QtGui.QTextEdit()
self.horizontallayout.addWidget(self.dialogue)
self.plot = pg.GraphicsLayoutWidget(self.window)
self.horizontallayout.addWidget(self.plot)
self.plot1 = self.plot.addPlot(colspan=1)
class Window(pg.Qt.QtGui.QMainWindow, layout):
def __init__(self, shot = None):
super(Window, self).__init__()
self.setup(self)
self.show()
if __name__ == '__main__':
app = pg.Qt.QtGui.QApplication([])
Window()
sys.exit(app.exec_())

You have to get the internal QGLWidget through the "_native_window" attribute. The following example is based on the official example geometry-surface.py .
import sys
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
from glumpy import app as glumpy_app, gl, gloo, data, library
from glumpy.geometry import primitives
from glumpy.transforms import Trackball
width = 1000
height = 500
glumpy_app.use("qt5")
vertex = """
#include "misc/spatial-filters.frag"
uniform float height;
uniform sampler2D data;
uniform vec2 data_shape;
attribute vec3 position;
attribute vec2 texcoord;
varying vec3 v_position;
varying vec2 v_texcoord;
void main()
{
float z = height*Bicubic(data, data_shape, texcoord).r;
gl_Position = <transform>;
v_texcoord = texcoord;
v_position = vec3(position.xy, z);
}
"""
fragment = """
#include "misc/spatial-filters.frag"
uniform mat4 model;
uniform mat4 view;
uniform mat4 normal;
uniform sampler2D texture;
uniform float height;
uniform vec4 color;
uniform sampler2D data;
uniform vec2 data_shape;
uniform vec3 light_color[3];
uniform vec3 light_position[3];
varying vec3 v_position;
varying vec2 v_texcoord;
float lighting(vec3 v_normal, vec3 light_position)
{
// Calculate normal in world coordinates
vec3 n = normalize(normal * vec4(v_normal,1.0)).xyz;
// Calculate the location of this fragment (pixel) in world coordinates
vec3 position = vec3(view * model * vec4(v_position, 1));
// Calculate the vector from this pixels surface to the light source
vec3 surface_to_light = light_position - position;
// Calculate the cosine of the angle of incidence (brightness)
float brightness = dot(n, surface_to_light) /
(length(surface_to_light) * length(n));
brightness = max(min(brightness,1.0),0.0);
return brightness;
}
void main()
{
mat4 model = <transform.trackball_model>;
// Extract data value
float value = Bicubic(data, data_shape, v_texcoord).r;
// Compute surface normal using neighbour values
float hx0 = height*Bicubic(data, data_shape, v_texcoord+vec2(+1,0)/data_shape).r;
float hx1 = height*Bicubic(data, data_shape, v_texcoord+vec2(-1,0)/data_shape).r;
float hy0 = height*Bicubic(data, data_shape, v_texcoord+vec2(0,+1)/data_shape).r;
float hy1 = height*Bicubic(data, data_shape, v_texcoord+vec2(0,-1)/data_shape).r;
vec3 dx = vec3(2.0/data_shape.x,0.0,hx0-hx1);
vec3 dy = vec3(0.0,2.0/data_shape.y,hy0-hy1);
vec3 v_normal = normalize(cross(dx,dy));
// Map value to rgb color
float c = 0.6 + 0.4*texture2D(texture, v_texcoord).r;
vec4 l1 = vec4(light_color[0] * lighting(v_normal, light_position[0]), 1);
vec4 l2 = vec4(light_color[1] * lighting(v_normal, light_position[1]), 1);
vec4 l3 = vec4(light_color[2] * lighting(v_normal, light_position[2]), 1);
gl_FragColor = color * vec4(c,c,c,1) * (0.5 + 0.5*(l1+l2+l3));
} """
def func3(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-(x ** 2) - y ** 2)
class layout:
def setup(self, window):
self.window = window
self.window.resize(width, height)
self.dialogue = QtGui.QTextEdit()
self.plot = pg.GraphicsLayoutWidget(self.window)
self.plot1 = self.plot.addPlot(colspan=1)
self.glumpy_window = glumpy_app.Window(color=(1, 1, 1, 1))
self.glumpy_window._native_window
self.centralwidget = QtGui.QWidget(self.window)
self.horizontallayout = QtGui.QHBoxLayout(self.centralwidget)
self.window.setCentralWidget(self.centralwidget)
self.horizontallayout.addWidget(self.dialogue, stretch=1)
self.horizontallayout.addWidget(self.plot, stretch=1)
self.horizontallayout.addWidget(self.glumpy_window._native_window, stretch=1)
class Window(pg.Qt.QtGui.QMainWindow, layout):
def __init__(self, shot=None):
super(Window, self).__init__()
self.setup(self)
n = 64
self.surface = gloo.Program(vertex, fragment)
self.vertices, self.s_indices = primitives.plane(2.0, n=n)
self.surface.bind(self.vertices)
I = []
for i in range(n):
I.append(i)
for i in range(1, n):
I.append(n - 1 + i * n)
for i in range(n - 1):
I.append(n * n - 1 - i)
for i in range(n - 1):
I.append(n * (n - 1) - i * n)
self.b_indices = np.array(I, dtype=np.uint32).view(gloo.IndexBuffer)
x = np.linspace(-2.0, 2.0, 32).astype(np.float32)
y = np.linspace(-2.0, 2.0, 32).astype(np.float32)
X, Y = np.meshgrid(x, y)
Z = func3(X, Y)
self.surface["data"] = (Z - Z.min()) / (Z.max() - Z.min())
self.surface["data"].interpolation = gl.GL_NEAREST
self.surface["data_shape"] = Z.shape[1], Z.shape[0]
self.surface["u_kernel"] = data.get("spatial-filters.npy")
self.surface["u_kernel"].interpolation = gl.GL_LINEAR
self.surface["texture"] = data.checkerboard(32, 24)
self.transform = Trackball("vec4(position.xy, z, 1.0)")
self.surface["transform"] = self.transform
self.glumpy_window.attach(self.transform)
T = (Z - Z.min()) / (Z.max() - Z.min())
self.surface["height"] = 0.75
self.surface["light_position[0]"] = 3, 0, 0 + 5
self.surface["light_position[1]"] = 0, 3, 0 + 5
self.surface["light_position[2]"] = -3, -3, +5
self.surface["light_color[0]"] = 1, 0, 0
self.surface["light_color[1]"] = 0, 1, 0
self.surface["light_color[2]"] = 0, 0, 1
phi, theta = -45, 0
self.time = 0
self.glumpy_window.set_handler("on_init", self.on_init)
self.glumpy_window.set_handler("on_draw", self.on_draw)
def on_init(self):
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glPolygonOffset(1, 1)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(2.5)
def on_draw(self, dt):
self.time += dt
self.glumpy_window.clear()
self.surface["data"]
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
self.surface["color"] = 1, 1, 1, 1
self.surface.draw(gl.GL_TRIANGLES, self.s_indices)
gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)
gl.glEnable(gl.GL_BLEND)
gl.glDepthMask(gl.GL_FALSE)
self.surface["color"] = 0, 0, 0, 1
self.surface.draw(gl.GL_LINE_LOOP, self.b_indices)
gl.glDepthMask(gl.GL_TRUE)
model = self.surface["transform"]["model"].reshape(4, 4)
view = self.surface["transform"]["view"].reshape(4, 4)
self.surface["view"] = view
self.surface["model"] = model
self.surface["normal"] = np.array(np.matrix(np.dot(view, model)).I.T)
self.surface["height"] = 0.75 * np.cos(self.time)
def showEvent(self, event):
super().showEvent(event)
self.glumpy_window.dispatch_event("on_resize", *self.glumpy_window.get_size())
def closeEvent(self, event):
super().closeEvent(event)
self.glumpy_window.close()
if __name__ == "__main__":
app = pg.Qt.QtGui.QApplication([])
w = Window()
w.show()
glumpy_app.run()

Related

Convert 3D CBCT dicom to 2D Panaromic View

I want to generate 2D panarmoic view from the series of 3D cbct dental images by python.
I am converting the series of 3D Cbct images to 2D panaromic view by python. The code is working fine but the output is not complete.
import sys
import vtkmodules.all as vtk
from PyQt5 import QtCore, QtWidgets
from vtkmodules.qt.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
def create_reader(dir):
reader = vtk.vtkDICOMImageReader()
reader.SetDirectoryName(dir)
reader.Update()
return reader
def create_center(reader):
(xMin, xMax, yMin, yMax, zMin, zMax) = reader.GetExecutive().GetWholeExtent(reader.GetOutputInformation(0))
(xSpacing, ySpacing, zSpacing) = reader.GetOutput().GetSpacing()
(x0, y0, z0) = reader.GetOutput().GetOrigin()
center = [x0 + xSpacing * 0.5 * (xMin + xMax),
y0 + ySpacing * 0.5 * (yMin + yMax),
z0 + zSpacing * 0.5 * (zMin + zMax)]
return center
def create_sagittal_slice_matrix(center):
#Create Sagittal Slice Matrix
sagittal = vtk.vtkMatrix4x4()
sagittal.DeepCopy((0, 0, -1, center[0],
1, 0, 0, center[1],
0, -1, 0, center[2],
0, 0, 0, 1))
return sagittal
def create_resliced_image(reader, sagittal, frame):
# Reslice image
widget = QVTKRenderWindowInteractor(frame)
slice = vtk.vtkImageReslice()
slice.SetInputConnection(reader.GetOutputPort())
slice.SetOutputDimensionality(2)
slice.SetResliceAxes(sagittal)
slice.SetInterpolationModeToLinear()
return widget, slice
def create_display_image_actor(slice):
# Display the image
actor = vtk.vtkImageActor()
actor.GetMapper().SetInputConnection(slice.GetOutputPort())
# renderer = vtk.vtkRenderer()
return actor
def adjust_renderer_settings(renderer, widget, actor):
# Remove Renderer And Reset
renderer.RemoveAllViewProps()
renderer.ResetCamera()
widget.GetRenderWindow().Render()
renderer.AddActor(actor)
widget.GetRenderWindow().AddRenderer(renderer)
return widget
def setup_interaction(widget):
# Set up the interaction
slice_interactorStyle = vtk.vtkInteractorStyleImage()
slice_interactor = widget.GetRenderWindow().GetInteractor()
slice_interactor.SetInteractorStyle(slice_interactorStyle)
widget.GetRenderWindow().SetInteractor(slice_interactor)
widget.GetRenderWindow().Render()
return slice_interactor
# Start interaction
# slice_interactor.Start()
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent = None):
QtWidgets.QMainWindow.__init__(self, parent)
self.frame = QtWidgets.QFrame()
self.vl = QtWidgets.QVBoxLayout()
reader = create_reader(r"C:\work.ateeb\2.16.840.114421.10241.9572018034.9603554034\DECOMPRESSED\One")
center = create_center(reader)
sagittal = create_sagittal_slice_matrix(center)
self.vtkWidget, slice = create_resliced_image(reader, sagittal, self.frame)
self.vl.addWidget(self.vtkWidget)
# create renderer
renderer = vtk.vtkRenderer()
# create actor
actor = create_display_image_actor(slice)
self.vtkWidget = adjust_renderer_settings(renderer, self.vtkWidget, actor)
slice_interactor = setup_interaction(self.vtkWidget)
self.show()
self.frame.setLayout(self.vl)
self.setCentralWidget(self.frame)
slice_interactor.Initialize()
slice_interactor.Start()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
sys.exit(app.exec_())
I was expecting the output like this
enter image description here
But the output the code is generating is:
enter image description here

Stack two widgets in a GUI

From this answer I have code that adds a number of Widgets to a PyQt5 GUI:
I would like to add another widget just in this configuration:
I was trying to play around with QGridLayout(), for a simplified version of the problem (MWE below).
What I tried (MWE below). I get something like this (plots all squashed on one side):
and varying the grid coordinates has no effect whatsoever.
MWE:
import PyQt5
from PyQt5 import QtGui, QtCore
import pyqtgraph as pg
import sys
import numpy as np
width = 1000
height = 500
class layout():
def setup(self, window):
self.window = window
self.window.resize(width, height)
grid = PyQt5.QtWidgets.QGridLayout()
self.dialogue = QtGui.QTextEdit()
grid.addWidget(self.dialogue , 100, 0)
self.plot = pg.GraphicsLayoutWidget(self.window)
grid.addWidget(self.plot , 200, 200)
class Window(pg.Qt.QtGui.QMainWindow, layout):
def __init__(self, shot = None):
super(Window, self).__init__()
self.setup(self)
self.show()
if __name__ == '__main__':
app = pg.Qt.QtGui.QApplication([])
Window()
sys.exit(app.exec_())
The errors in the code provided by the OP are:
The layout (QGridLayout) was never set in a widget. The Qt layouts are not visual elements but managers of the geometry of the widgets.
A centralWidget must be set if a QMainWindow is used.
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
width = 1000
height = 500
class layout:
def setup(self, window):
self.window = window
self.window.resize(width, height)
grid = QtGui.QGridLayout()
self.dialogue = QtGui.QTextEdit()
grid.addWidget(self.dialogue, 100, 0)
self.plot = pg.GraphicsLayoutWidget()
grid.addWidget(self.plot, 200, 200)
central_widget = QtGui.QWidget()
window.setCentralWidget(central_widget)
central_widget.setLayout(grid)
class Window(pg.Qt.QtGui.QMainWindow, layout):
def __init__(self, shot=None):
super(Window, self).__init__()
self.setup(self)
if __name__ == "__main__":
app = QtGui.QApplication([])
w = Window()
w.show()
sys.exit(app.exec_())
Anyway, that problem has nothing to do with the initial problem.
Probably (since the OP does not provide any attempt to the initial goal) the error is that it is thought that adding the same widget 2 times will create 2 copies but it is not, when you add a widget to a layout then it will be removed from its previous position. The solution is to create 2 widgets. For this, it is better to create a class that allows to implement this logic in a simple way.
import sys
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from glumpy import app as glumpy_app, gl, gloo, data, library
from glumpy.geometry import primitives
from glumpy.transforms import Trackball
width = 1000
height = 500
glumpy_app.use("qt5")
vertex = """
#include "misc/spatial-filters.frag"
uniform float height;
uniform sampler2D data;
uniform vec2 data_shape;
attribute vec3 position;
attribute vec2 texcoord;
varying vec3 v_position;
varying vec2 v_texcoord;
void main()
{
float z = height*Bicubic(data, data_shape, texcoord).r;
gl_Position = <transform>;
v_texcoord = texcoord;
v_position = vec3(position.xy, z);
}
"""
fragment = """
#include "misc/spatial-filters.frag"
uniform mat4 model;
uniform mat4 view;
uniform mat4 normal;
uniform sampler2D texture;
uniform float height;
uniform vec4 color;
uniform sampler2D data;
uniform vec2 data_shape;
uniform vec3 light_color[3];
uniform vec3 light_position[3];
varying vec3 v_position;
varying vec2 v_texcoord;
float lighting(vec3 v_normal, vec3 light_position)
{
// Calculate normal in world coordinates
vec3 n = normalize(normal * vec4(v_normal,1.0)).xyz;
// Calculate the location of this fragment (pixel) in world coordinates
vec3 position = vec3(view * model * vec4(v_position, 1));
// Calculate the vector from this pixels surface to the light source
vec3 surface_to_light = light_position - position;
// Calculate the cosine of the angle of incidence (brightness)
float brightness = dot(n, surface_to_light) /
(length(surface_to_light) * length(n));
brightness = max(min(brightness,1.0),0.0);
return brightness;
}
void main()
{
mat4 model = <transform.trackball_model>;
// Extract data value
float value = Bicubic(data, data_shape, v_texcoord).r;
// Compute surface normal using neighbour values
float hx0 = height*Bicubic(data, data_shape, v_texcoord+vec2(+1,0)/data_shape).r;
float hx1 = height*Bicubic(data, data_shape, v_texcoord+vec2(-1,0)/data_shape).r;
float hy0 = height*Bicubic(data, data_shape, v_texcoord+vec2(0,+1)/data_shape).r;
float hy1 = height*Bicubic(data, data_shape, v_texcoord+vec2(0,-1)/data_shape).r;
vec3 dx = vec3(2.0/data_shape.x,0.0,hx0-hx1);
vec3 dy = vec3(0.0,2.0/data_shape.y,hy0-hy1);
vec3 v_normal = normalize(cross(dx,dy));
// Map value to rgb color
float c = 0.6 + 0.4*texture2D(texture, v_texcoord).r;
vec4 l1 = vec4(light_color[0] * lighting(v_normal, light_position[0]), 1);
vec4 l2 = vec4(light_color[1] * lighting(v_normal, light_position[1]), 1);
vec4 l3 = vec4(light_color[2] * lighting(v_normal, light_position[2]), 1);
gl_FragColor = color * vec4(c,c,c,1) * (0.5 + 0.5*(l1+l2+l3));
} """
def func3(x, y):
return (1 - x / 2 + x ** 5 + y ** 3) * np.exp(-(x ** 2) - y ** 2)
class Viewer(QtGui.QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.glumpy_window = glumpy_app.Window(color=(1, 1, 1, 1))
lay = QtGui.QVBoxLayout(self)
lay.addWidget(self.glumpy_window._native_window)
n = 64
self.surface = gloo.Program(vertex, fragment)
self.vertices, self.s_indices = primitives.plane(2.0, n=n)
self.surface.bind(self.vertices)
I = []
for i in range(n):
I.append(i)
for i in range(1, n):
I.append(n - 1 + i * n)
for i in range(n - 1):
I.append(n * n - 1 - i)
for i in range(n - 1):
I.append(n * (n - 1) - i * n)
self.b_indices = np.array(I, dtype=np.uint32).view(gloo.IndexBuffer)
x = np.linspace(-2.0, 2.0, 32).astype(np.float32)
y = np.linspace(-2.0, 2.0, 32).astype(np.float32)
X, Y = np.meshgrid(x, y)
Z = func3(X, Y)
self.surface["data"] = (Z - Z.min()) / (Z.max() - Z.min())
self.surface["data"].interpolation = gl.GL_NEAREST
self.surface["data_shape"] = Z.shape[1], Z.shape[0]
self.surface["u_kernel"] = data.get("spatial-filters.npy")
self.surface["u_kernel"].interpolation = gl.GL_LINEAR
self.surface["texture"] = data.checkerboard(32, 24)
self.transform = Trackball("vec4(position.xy, z, 1.0)")
self.surface["transform"] = self.transform
self.glumpy_window.attach(self.transform)
T = (Z - Z.min()) / (Z.max() - Z.min())
self.surface["height"] = 0.75
self.surface["light_position[0]"] = 3, 0, 0 + 5
self.surface["light_position[1]"] = 0, 3, 0 + 5
self.surface["light_position[2]"] = -3, -3, +5
self.surface["light_color[0]"] = 1, 0, 0
self.surface["light_color[1]"] = 0, 1, 0
self.surface["light_color[2]"] = 0, 0, 1
phi, theta = -45, 0
self.time = 0
self.glumpy_window.set_handler("on_init", self.on_init)
self.glumpy_window.set_handler("on_draw", self.on_draw)
def on_init(self):
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glPolygonOffset(1, 1)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glLineWidth(2.5)
def on_draw(self, dt):
self.time += dt
self.glumpy_window.clear()
self.surface["data"]
gl.glDisable(gl.GL_BLEND)
gl.glEnable(gl.GL_DEPTH_TEST)
gl.glEnable(gl.GL_POLYGON_OFFSET_FILL)
self.surface["color"] = 1, 1, 1, 1
self.surface.draw(gl.GL_TRIANGLES, self.s_indices)
gl.glDisable(gl.GL_POLYGON_OFFSET_FILL)
gl.glEnable(gl.GL_BLEND)
gl.glDepthMask(gl.GL_FALSE)
self.surface["color"] = 0, 0, 0, 1
self.surface.draw(gl.GL_LINE_LOOP, self.b_indices)
gl.glDepthMask(gl.GL_TRUE)
model = self.surface["transform"]["model"].reshape(4, 4)
view = self.surface["transform"]["view"].reshape(4, 4)
self.surface["view"] = view
self.surface["model"] = model
self.surface["normal"] = np.array(np.matrix(np.dot(view, model)).I.T)
self.surface["height"] = 0.75 * np.cos(self.time)
def showEvent(self, event):
super().showEvent(event)
self.glumpy_window.dispatch_event("on_resize", *self.glumpy_window.get_size())
class layout:
def setup(self, window):
self.window = window
self.window.resize(width, height)
self.dialogue = QtGui.QTextEdit()
self.plot = pg.GraphicsLayoutWidget(self.window)
self.plot1 = self.plot.addPlot(colspan=1)
self.centralwidget = QtGui.QWidget(self.window)
self.horizontallayout = QtGui.QHBoxLayout(self.centralwidget)
self.window.setCentralWidget(self.centralwidget)
self.top_viewer = Viewer()
self.bottom_viewer = Viewer()
right_container = QtGui.QWidget()
lay = QtGui.QVBoxLayout(right_container)
lay.addWidget(self.top_viewer)
lay.addWidget(self.bottom_viewer)
self.horizontallayout.addWidget(self.dialogue, stretch=1)
self.horizontallayout.addWidget(self.plot, stretch=1)
self.horizontallayout.addWidget(right_container, stretch=1)
class Window(QtGui.QMainWindow, layout):
def __init__(self, shot=None):
super(Window, self).__init__()
self.setup(self)
def closeEvent(self, event):
super().closeEvent(event)
for viewer in (self.top_viewer, self.bottom_viewer):
viewer.glumpy_window.close()
if __name__ == "__main__":
app = pg.Qt.QtGui.QApplication([])
w = Window()
w.show()
glumpy_app.run()

How to rotate the triangle with Modern OpenGL and Python

I have written a code to render a triangle using a shader program. I want to rotate the triangle. I'm using PyGLM to set a transformation matrix. Here I'm presenting the whole code. If I run this code a triangle is appearing in the window as expected, but there is no rotation. I think I've failed to pass the transformation matrix to the buffer.
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.GL import shaders
import numpy as np
import glm
VERTEX_SHADER = """
#version 330
in vec4 position;
in vec3 color;
out vec3 newColor;
void main()
{
gl_Position = position;
newColor = color;
}
"""
FRAGMENT_SHADER = """
#version 330
in vec3 newColor;
out vec4 outColor;
void main()
{
outColor = vec4(newColor,1.0f);
}
"""
shaderProgram = None
def initliaze():
global VERTEXT_SHADER
global FRAGMEN_SHADER
global shaderProgram
vertexshader = shaders.compileShader(VERTEX_SHADER, GL_VERTEX_SHADER)
fragmentshader = shaders.compileShader(FRAGMENT_SHADER, GL_FRAGMENT_SHADER)
shaderProgram = shaders.compileProgram(vertexshader, fragmentshader)
triangles = [-0.5, -0.5, 0.0, 1.0,0.0,0.0,
0.5, -0.5, 0.0, 0.0,1.0,0.0,
0.0, 0.5, 0.0, 0,0,0.0,1.0]
triangles = np.array(triangles, dtype=np.float32)
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, triangles.nbytes, triangles, GL_DYNAMIC_DRAW)
position = glGetAttribLocation(shaderProgram, 'position')
glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))
glEnableVertexAttribArray(position)
color = glGetAttribLocation(shaderProgram, 'color')
glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))
glEnableVertexAttribArray(color)
def render():
global shaderProgram
global angle
#shader
glUseProgram(shaderProgram)
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
#transform matrix
transform = glm.mat4(1)
transform = glm.translate(transform, glm.vec3(0.5,-0.5,0.0))
transform = glm.rotate(transform, glutGet(GLUT_ELAPSED_TIME),glm.vec3(0,0,1))
transformLoc = glGetUniformLocation(shaderProgram,"transform")
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm.value_ptr(transform))
#render program
glDrawArrays(GL_TRIANGLES, 0, 3)
glUseProgram(0)
glutSwapBuffers()
def main():
glutInit([])
glutInitWindowSize(640, 480)
glutCreateWindow("pyopengl with glut 2")
initliaze()
glutDisplayFunc(render)
glutMainLoop()
if __name__ == '__main__':
main()
In VERTEX_SHADER you didn't mentioned transform variable. So your triangle position remain fixed after you run the program. Change your VERTEX_SHADER as following.
VERTEX_SHADER = """
#version 330
in vec4 position;
in vec3 color;
out vec3 newColor;
uniform mat4 transform;
void main()
{
gl_Position = transform*position;
newColor = color;
}
"""
In your code you are accessing the location of location of a uniform variable transform by following line.
transformLoc = glGetUniformLocation(shaderProgram,"transform")
You should add glutPostRedisplay() function after the glutSwapBuffers() function to visualize the continuous change.
Looks like you will want to create your own library from GLM. What you're doing in the code above no longer works. As another user stated, this is a good template to build functionality from. I'd suggest downloading GLM, taking it apart, and reverse engineering what you need into Python.

Using a media/gif as a texture in rotating cube with vispy

I am using Vispy to rotate a cube. I can use imageio to use a custom texture in the face of the rotating cube successfully, but I don't understand how to use a movie or gif image.
Full source code:
import numpy as np
from vispy import gloo, app
from vispy.gloo import Program, VertexBuffer, IndexBuffer
from vispy.util.transforms import perspective, translate, rotate
from vispy.geometry import create_cube
import imageio
im = imageio.imread('C:\\vhosts\\VIDEO_TWO_CLONE\\shape.jpg')
vertex = """
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform sampler2D texture;
attribute vec3 position;
attribute vec2 texcoord;
attribute vec3 normal;
attribute vec4 color;
varying vec2 v_texcoord;
void main()
{
gl_Position = projection * view * model * vec4(position,1.0);
v_texcoord = texcoord;
}
"""
fragment = """
uniform sampler2D texture;
varying vec2 v_texcoord;
void main()
{
gl_FragColor = texture2D(texture, v_texcoord);
}
"""
def checkerboard(grid_num=8, grid_size=32):
row_even = grid_num // 2 * [0, 1]
row_odd = grid_num // 2 * [1, 0]
Z = np.row_stack(grid_num // 2 * (row_even, row_odd)).astype(np.uint8)
return 255 * Z.repeat(grid_size, axis=0).repeat(grid_size, axis=1)
class Canvas(app.Canvas):
def __init__(self):
app.Canvas.__init__(self, size=(750, 750), title='Textured cube',
keys='interactive')
self.timer = app.Timer('auto', self.on_timer)
# Build cube data
V, I, _ = create_cube()
vertices = VertexBuffer(V)
self.indices = IndexBuffer(I)
# Build program
self.program = Program(vertex, fragment)
self.program.bind(vertices)
# Build view, model, projection & normal
view = translate((0, 0, -5))
model = np.eye(4, dtype=np.float32)
self.program['model'] = model
self.program['view'] = view
self.program['texture'] = im # checkerboard()
self.activate_zoom()
self.phi, self.theta = 0, 0
# OpenGL initalization
gloo.set_state(clear_color=(0.30, 0.30, 0.35, 1.00), depth_test=True)
self.timer.start()
self.show()
def on_draw(self, event):
gloo.clear(color=True, depth=True)
self.program.draw('triangles', self.indices)
def on_resize(self, event):
self.activate_zoom()
def activate_zoom(self):
gloo.set_viewport(0, 0, *self.physical_size)
projection = perspective(45.0, self.size[0] / float(self.size[1]),
2.0, 10.0)
self.program['projection'] = projection
def on_timer(self, event):
self.theta += .5
self.phi += .5
self.program['model'] = np.dot(rotate(self.theta, (0, 1, 0)),
rotate(self.phi, (0, 1, 0)))
self.update()
if __name__ == '__main__':
c = Canvas()
app.run()
A gif or a movie is essentially many image frames, coming one after the other.
What you would need to do is read the images from the GIF into an array containing the RGB(A) values of each image (for GIF, you can use the code from e.g. https://gist.github.com/BigglesZX/4016539), and then advance self.program['texture'] to the next frame of the GIF in on_timer.
In case you want to loop the texture, you could use the modulo operator %, so you don't run over the array boundaries, with something like
# gif_array is the array populated with GIF frames
self.program['texture'] = gif_array[index % (len(gif_array) - 1)]
index = index + 1

How to center a subclass of QGLWidget in PyQt5

I have an example program in PyQt5
I have a class:
class QGLControllerWidget(QtOpenGL.QGLWidget):
...
And a main program:
app = QtWidgets.QApplication([])
window = QGLControllerWidget() # this is my class
window.show() # I want to center the window on the screen
app.exec_()
Currently the window's location is somewhere between the center and the bottom right corner, please see the image:
How to center the window/widget on the screen in PyQt5?
03_alpha_blending.py
import struct
from PyQt5 import QtCore, QtOpenGL, QtWidgets
import ModernGL
class QGLControllerWidget(QtOpenGL.QGLWidget):
def __init__(self):
fmt = QtOpenGL.QGLFormat()
fmt.setVersion(3, 3)
fmt.setProfile(QtOpenGL.QGLFormat.CoreProfile)
fmt.setSampleBuffers(True)
self.timer = QtCore.QElapsedTimer()
super(QGLControllerWidget, self).__init__(fmt, None)
def initializeGL(self):
self.ctx = ModernGL.create_context()
prog = self.ctx.program([
self.ctx.vertex_shader('''
#version 330
in vec2 vert;
in vec4 vert_color;
out vec4 frag_color;
uniform vec2 scale;
uniform float rotation;
void main() {
frag_color = vert_color;
float r = rotation * (0.5 + gl_InstanceID * 0.05);
mat2 rot = mat2(cos(r), sin(r), -sin(r), cos(r));
gl_Position = vec4((rot * vert) * scale, 0.0, 1.0);
}
'''),
self.ctx.fragment_shader('''
#version 330
in vec4 frag_color;
out vec4 color;
void main() {
color = vec4(frag_color);
}
'''),
])
self.scale = prog.uniforms['scale']
self.rotation = prog.uniforms['rotation']
vbo = self.ctx.buffer(struct.pack(
'18f',
1.0, 0.0, 1.0, 0.0, 0.0, 0.5,
-0.5, 0.86, 0.0, 1.0, 0.0, 0.5,
-0.5, -0.86, 0.0, 0.0, 1.0, 0.5,
))
self.vao = self.ctx.simple_vertex_array(prog, vbo, ['vert', 'vert_color'])
self.timer.restart()
def paintGL(self):
self.ctx.viewport = (0, 0, self.width(), self.height())
self.ctx.clear(0.9, 0.9, 0.9)
self.scale.value = (self.height() / self.width() * 0.75, 0.75)
self.rotation.value = self.timer.elapsed() / 1000
self.ctx.enable(ModernGL.BLEND)
self.vao.render(instances=10)
self.ctx.finish()
self.update()
app = QtWidgets.QApplication([])
window = QGLControllerWidget()
window.show()
app.exec_()
You have to move the widget to the center of the screen, to know the size of the screen you must use QDesktopWidget, like the following:
app = QtWidgets.QApplication([])
window = QGLControllerWidget()
pos = QtWidgets.QDesktopWidget().rect().center() -window.rect().center()
window.move(pos)
window.show()
app.exec_()

Categories