Solar System in OpenGL, Camera position - python

I want to make simple solar system in OpenGL, with four cameras.
What I want is simple, just locate a camera at earth's one side.
In follow code, I get MODELVIEW_MATRIX by glGetFloatv(GL_MODELVIEW_MATRIX) (line 116)
So I thought that { MODELVIEW_MATRIX multiple [[0],[0],[0],[1]] matrix } get a Origin point of planet in world coordinate system.
But it doesn't work well and so I need some help.
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import math
import numpy as np
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 600
WINDOW_POSITION_X = 0
WINDOW_POSITION_Y = 0
earthRevolveAngle = 180
earthRotateAngle = 0
satelliteRevolveAngle = 180
satellitePlaneAngle = 0
plutoRevolveAngle = 180
plutoRotateAngle = 0
plutoCamera = np.array([0, 0, 0])
earthPosition = np.array([0, 0, 0])
class Camera :
def __init__(self): #constructor
self.loc = np.array([0.0, 50.0, 0.0])
self.tar = np.array([0.0, 0.0, 0.0])
self.up = np.array([1.0, 0.0, 0.0])
self.right = np.array([1.0, 0.0, 0.0])
self.dir = np.array([0.0, 0.0, -1.0])
self.asp = 1.0
self.fov = 70
self.near= 0.1
self.far = 500.0
def setCameraLoc(self, loc):
self.loc = loc
self.tar = self.loc + self.dir
def setCamera(self, loc, tar, up):
self.loc, self.tar, self.up = loc, tar, up
self.dir = self.tar - self.loc
l = np.linalg.norm(self.dir)
if l > 0.0 :
self.dir = self.dir / l
l = np.linalg.norm(self.up)
if l > 0.0 :
self.up = self.up / l
self.right = np.cross(self.dir, self.up)
def setLens(self, fov, asp, near, far):
self.fov, self.asp, self.near, self.far = fov, asp, near, far
def applyCamera(self):
gluLookAt(self.loc[0], self.loc[1], self.loc[2],
self.tar[0], self.tar[1], self.tar[2],
self.up [0], self.up [1], self.up [2])
def applyLens(self):
gluPerspective(self.fov, self.asp, self.near, self.far)
def moveForward(self, step=1.0):
self.tar += self.dir*step
self.loc += self.dir*step
def zoomIn(self, step=1.0):
self.loc += self.dir*step
def zoomOut(self, step=1.0):
self.loc -= self.dir*step
def drawPlanet(semiMajor, semiMinor, revolveAngle, rotateAngle, shape, slope, axisTilt) :
global plutoCamera, earthPosition
a = semiMajor
b = semiMinor
#Orbit's slope
glRotatef(slope, 1, 0, 0)
#Start draw orbit
glBegin(GL_LINE_STRIP)
for i in range(0, 361):
theta = 2.0 * 3.141592 * i / 360.0
x = a*math.cos(theta)
z = b*math.sin(theta)
glVertex3f(x, 0, z)
glEnd()
#End draw orbit
theta = 2.0 * 3.141592 * (revolveAngle%360) / 360.0
x = a * math.cos(theta)
z = b * math.sin(theta)
glRotatef(revolveAngle, 0, 1, 0)
glTranslatef( math.sqrt( x**2 + z**2 ) , 0, 0)
glRotatef(rotateAngle, 0, 1, 0)
glRotatef(axisTilt, 0, 0, 1)
t = glGetFloatv(GL_MODELVIEW_MATRIX)
if(shape == "satellite"):
glScalef(0.4,0.4,0.4)
glutSolidTetrahedron()
glScalef(2.5,2.5,2.5)
elif(shape == "earth"):
glutWireCube(1)
earthPosition = t * np.matrix( [[0],[0],[0],[1]] )
elif(shape == "pluto"):
glScalef(0.4,0.4,0.4)
glutWireOctahedron()
glScalef(2.5,2.5,2.5)
def drawScene() :
global earthRevolveAngle, earthRotateAngle, satelliteAngle, satelliteRevolveAngle, satellitePlaneAngle, plutoRevolveAngle, plutoRotateAngle
# draw solar
glColor3f(1,0,0)
glutWireSphere(1.0, 20, 20)
glPushMatrix()
# draw earth
glColor3f(0,0.5,1.0)
earthRevolveAngle+=0.05 # earth's revolution
earthRotateAngle+=0.2
drawPlanet(5, 5, earthRevolveAngle, earthRotateAngle, "earth",0,15)
# draw satellite
glColor3f(0.7,0.7,0.7)
satelliteRevolveAngle+=1.5
satellitePlaneAngle += 0.25
glRotatef(satellitePlaneAngle, 1, 0, 0)
drawPlanet(1, 1, satelliteRevolveAngle, 1, "satellite",0,0)
# draw pluto
glPopMatrix() # comeback to solar central coordinate
glPushMatrix()
glColor3f(0.9,0.7,0.26)
plutoRevolveAngle+=0.0125 # pluto's revolution
plutoRotateAngle+=0.1 # pluto's rotation
drawPlanet(10, 8, plutoRevolveAngle,plutoRotateAngle, "pluto",0,0)
glPopMatrix()
Cam = Camera()
def disp() :
global plutoCamera, earthPosition, Cam
# reset buffer
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Camera view setting
Cam.setLens(30,1.0,0.1,1000)
Cam.applyLens()
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# first quadrant
glViewport(int(WINDOW_POSITION_X+WINDOW_WIDTH/2), int(WINDOW_POSITION_Y + WINDOW_HEIGHT/2), int(WINDOW_WIDTH/2), int(WINDOW_HEIGHT/2))
glPushMatrix()
Cam.setCamera( np.array([0,0,1]), np.array([0,0,100]), np.array([0,1,0]))
Cam.applyCamera()
drawScene()
glPopMatrix()
# second quadrant
glViewport(int(WINDOW_POSITION_X), int(WINDOW_POSITION_Y + WINDOW_HEIGHT/2), int(WINDOW_WIDTH/2), int(WINDOW_HEIGHT/2) )
glPushMatrix()
Cam.setCamera( np.array([30,30,30]), np.array([0,0,0]), np.array([0,1,0]))
Cam.applyCamera()
drawScene()
glPopMatrix()
# third quadrant
glViewport(WINDOW_POSITION_X, WINDOW_POSITION_Y, int(WINDOW_WIDTH/2) , int(WINDOW_HEIGHT/2) )
glPushMatrix()
Cam.setCamera( plutoCamera, np.array([0,0,0]), np.array([0,1,0]))
Cam.applyCamera()
drawScene()
glPopMatrix()
# fourth quadrant
glViewport(int(WINDOW_POSITION_X+WINDOW_WIDTH/2), WINDOW_POSITION_Y, int(WINDOW_WIDTH/2), int(WINDOW_HEIGHT/2) )
glPushMatrix()
Cam.setCamera( earthPosition, np.array([0,0,0]) , np.array([0,1,0]))
Cam.applyCamera()
drawScene()
glPopMatrix()
glFlush()
def main():
# windowing
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)
glutInitWindowSize(WINDOW_WIDTH,WINDOW_HEIGHT)
glutInitWindowPosition(WINDOW_POSITION_X,WINDOW_POSITION_Y)
glutCreateWindow(b"Simple Solar_201624489_ParkChangHae")
glClearColor(0, 0.0, 0.0, 0)
# register callbacks
glutDisplayFunc(disp)
glutIdleFunc(disp)
# enter main infinite-loop
glutMainLoop()
if __name__=="__main__":
main()

The * operator doesn't do what you expect it to do, it is an array multiplication, but not a matrix multiplication. It would perform a componentwise multiplication of the elements. See how does multiplication differ for NumPy Matrix vs Array classes? and Numerical operations on arrays.
Use numpy.dot or numpy.matmul to transform a vector by a matrix.
The result of the transformation of a 4 component vector (Homogeneous coordinates) by 4*4 matrix, is still a 4 component vector. In general you would have to do a perspective divide after the transformation. But the model view matrix is an Orthogonal matrix, so it is sufficient to use the first 3 components of the result, since the 4th component is always 1:
pos = np.array( [0,0,0,1] )
pos = np.dot( pos, t )
earthPosition = pos[0:3]
But note, the view space position of the coordinate (0, 0, 0, 1) is the translation part (the 4th row) of the model view matrix:
earthPosition = t[3][0:3]
Sadly this is not what you want to do, because you want to know the world position of the earth, but not the view position.
Since glGetFloatv(GL_MODELVIEW_MATRIX) returns the model view matrix, the transformation calculates the view position, but not the world position.
You have to transform by the model matrix, but not the model view matrix. Since you can't separated model matrix from the model view matrix, this is not that easy.
What you can get get is the view matrix. With the view matrix and the model view matrix you van get the world position.
A transformation by the model matrix is the same as a transformation by the model view matrix and the inverse view matrix:
p_world = inverse(view_matrix) * model_view_matrix * p_model
I recommend to get the view matrix and to calculate the inverse model view matrix in the Cam class right after setting it by lookAt. The inverse matrix can be calculated by numpy.linalg.inv:
def applyCamera(self):
gluLookAt(self.loc[0], self.loc[1], self.loc[2],
self.tar[0], self.tar[1], self.tar[2],
self.up [0], self.up [1], self.up [2])
self.viewmat = glGetFloatv(GL_MODELVIEW_MATRIX)
self.inv_viewmat = np.linalg.inv(self.viewmat)
Finally the world position is a simple transformation of the 4th row of the model view matrix by the inverse view matrix:
global plutoCamera, earthPosition, Cam
.....
model_view = glGetFloatv(GL_MODELVIEW_MATRIX)
if(shape == "satellite"):
glScalef(0.4,0.4,0.4)
glutSolidTetrahedron()
glScalef(2.5,2.5,2.5)
elif(shape == "earth"):
glutWireCube(1)
pos = np.dot( model_view[3], Cam.inv_viewmat )
earthPosition = pos[0:3]
elif(shape == "pluto"):
glScalef(0.4,0.4,0.4)
glutWireOctahedron()
glScalef(2.5,2.5,2.5)
pos = np.dot( model_view[3], Cam.inv_viewmat )
plutoCamera = pos[0:3]
Preview:

Related

Attribute Error while using PyQtgraph libraries in my python code

I'm running a code from github site and it has this error ( last lines) :
File "D:\Anaconda3\lib\site-packages\pyqtgraph\opengl\GLViewWidget.py", line 152, in viewMatrix
tr.translate(-center.x(), -center.y(), -center.z())
AttributeError: 'int' object has no attribute 'x'
I found that the error is related to pyqtgraph libraries and i didn't change their files
just install the last versions of PyOpenGl and PyQtGraph in spyder
can you please help me in this error?
We need more information,
The problem seems to be related to the fact that center is an int but you use it as an object with attributs
Can we see center's initialization ?
the GlViewWidget.py file :
from ..Qt import QtCore, QtGui, QtOpenGL, QT_LIB
from OpenGL.GL import *
import OpenGL.GL.framebufferobjects as glfbo
import numpy as np
from .. import Vector
from .. import functions as fn
##Vector = QtGui.QVector3D
ShareWidget = None
class GLViewWidget(QtOpenGL.QGLWidget):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
- Axis/grid display
- Export options
High-DPI displays: Qt5 should automatically detect the correct resolution.
For Qt4, specify the ``devicePixelRatio`` argument when initializing the
widget (usually this value is 1-2).
"""
def __init__(self, parent=None, devicePixelRatio=None):
global ShareWidget
if ShareWidget is None:
## create a dummy widget to allow sharing objects (textures, shaders, etc) between views
ShareWidget = QtOpenGL.QGLWidget()
QtOpenGL.QGLWidget.__init__(self, parent, ShareWidget)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.opts = {
'center': Vector(0,0,0), ## will always appear at the center of the widget
'distance': 10.0, ## distance of camera from center
'fov': 60, ## horizontal field of view in degrees
'elevation': 30, ## camera's angle of elevation in degrees
'azimuth': 45, ## camera's azimuthal angle in degrees
## (rotation around z-axis 0 points along x-axis)
'viewport': None, ## glViewport params; None == whole widget
'devicePixelRatio': devicePixelRatio,
}
self.setBackgroundColor('k')
self.items = []
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
self.keysPressed = {}
self.keyTimer = QtCore.QTimer()
self.keyTimer.timeout.connect(self.evalKeyState)
self.makeCurrent()
def addItem(self, item):
self.items.append(item)
if hasattr(item, 'initializeGL'):
self.makeCurrent()
try:
item.initializeGL()
except:
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))
item._setView(self)
#print "set view", item, self, item.view()
self.update()
def removeItem(self, item):
self.items.remove(item)
item._setView(None)
self.update()
def initializeGL(self):
self.resizeGL(self.width(), self.height())
def setBackgroundColor(self, *args, **kwds):
"""
Set the background color of the widget. Accepts the same arguments as
pg.mkColor() and pg.glColor().
"""
self.opts['bgcolor'] = fn.glColor(*args, **kwds)
self.update()
def getViewport(self):
vp = self.opts['viewport']
dpr = self.devicePixelRatio()
if vp is None:
return (0, 0, int(self.width() * dpr), int(self.height() * dpr))
else:
return tuple([int(x * dpr) for x in vp])
def devicePixelRatio(self):
dpr = self.opts['devicePixelRatio']
if dpr is not None:
return dpr
if hasattr(QtOpenGL.QGLWidget, 'devicePixelRatio'):
return QtOpenGL.QGLWidget.devicePixelRatio(self)
else:
return 1.0
def resizeGL(self, w, h):
pass
#glViewport(*self.getViewport())
#self.update()
def setProjection(self, region=None):
m = self.projectionMatrix(region)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def projectionMatrix(self, region=None):
if region is None:
dpr = self.devicePixelRatio()
region = (0, 0, self.width() * dpr, self.height() * dpr)
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def setModelview(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = self.viewMatrix()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def viewMatrix(self):
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -self.opts['distance'])
tr.rotate(self.opts['elevation']-90, 1, 0, 0)
tr.rotate(self.opts['azimuth']+90, 0, 0, -1)
center = self.opts['center']
tr.translate(-center.x(), -center.y(), -center.z())
return tr
def itemsAt(self, region=None):
"""
Return a list of the items displayed in the region (x, y, w, h)
relative to the widget.
"""
region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#buf = np.zeros(100000, dtype=np.uint)
buf = glSelectBuffer(100000)
try:
glRenderMode(GL_SELECT)
glInitNames()
glPushName(0)
self._itemNames = {}
self.paintGL(region=region, useItemNames=True)
finally:
hits = glRenderMode(GL_RENDER)
items = [(h.near, h.names[0]) for h in hits]
items.sort(key=lambda i: i[0])
return [self._itemNames[i[1]] for i in items]
def paintGL(self, region=None, viewport=None, useItemNames=False):
"""
viewport specifies the arguments to glViewport. If None, then we use self.opts['viewport']
region specifies the sub-region of self.opts['viewport'] that should be rendered.
Note that we may use viewport != self.opts['viewport'] when exporting.
"""
if viewport is None:
glViewport(*self.getViewport())
else:
glViewport(*viewport)
self.setProjection(region=region)
self.setModelview()
bgcolor = self.opts['bgcolor']
glClearColor(*bgcolor)
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
self.drawItemTree(useItemNames=useItemNames)
def drawItemTree(self, item=None, useItemNames=False):
if item is None:
items = [x for x in self.items if x.parentItem() is None]
else:
items = item.childItems()
items.append(item)
items.sort(key=lambda a: a.depthValue())
for i in items:
if not i.visible():
continue
if i is item:
try:
glPushAttrib(GL_ALL_ATTRIB_BITS)
if useItemNames:
glLoadName(i._id)
self._itemNames[i._id] = i
i.paint()
except:
from .. import debug
debug.printExc()
msg = "Error while drawing item %s." % str(item)
ver = glGetString(GL_VERSION)
if ver is not None:
ver = ver.split()[0]
if int(ver.split(b'.')[0]) < 2:
print(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
print(msg)
finally:
glPopAttrib()
else:
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
tr = i.transform()
a = np.array(tr.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
self.drawItemTree(i, useItemNames=useItemNames)
finally:
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def setCameraPosition(self, pos=None, distance=None, elevation=None, azimuth=None):
if pos is not None:
self.opts['center'] = pos
if distance is not None:
self.opts['distance'] = distance
if elevation is not None:
self.opts['elevation'] = elevation
if azimuth is not None:
self.opts['azimuth'] = azimuth
self.update()
def cameraPosition(self):
"""Return current position of camera based on center, dist, elevation, and azimuth"""
center = self.opts['center']
dist = self.opts['distance']
elev = self.opts['elevation'] * np.pi/180.
azim = self.opts['azimuth'] * np.pi/180.
pos = Vector(
center.x() + dist * np.cos(elev) * np.cos(azim),
center.y() + dist * np.cos(elev) * np.sin(azim),
center.z() + dist * np.sin(elev)
)
return pos
def orbit(self, azim, elev):
"""Orbits the camera around the center position. *azim* and *elev* are given in degrees."""
self.opts['azimuth'] += azim
self.opts['elevation'] = np.clip(self.opts['elevation'] + elev, -90, 90)
self.update()
def pan(self, dx, dy, dz, relative='global'):
"""
Moves the center (look-at) position while holding the camera in place.
============== =======================================================
**Arguments:**
*dx* Distance to pan in x direction
*dy* Distance to pan in y direction
*dx* Distance to pan in z direction
*relative* String that determines the direction of dx,dy,dz.
If "global", then the global coordinate system is used.
If "view", then the z axis is aligned with the view
direction, and x and y axes are inthe plane of the
view: +x points right, +y points up.
If "view-upright", then x is in the global xy plane and
points to the right side of the view, y is in the
global xy plane and orthogonal to x, and z points in
the global z direction.
============== =======================================================
Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
Prior to version 0.11, *relative* was expected to be either True (x-aligned) or
False (global). These values are deprecated but still recognized.
"""
# for backward compatibility:
relative = {True: "view-upright", False: "global"}.get(relative, relative)
if relative == 'global':
self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
elif relative == 'view-upright':
cPos = self.cameraPosition()
cVec = self.opts['center'] - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
elif relative == 'view':
# pan in plane of camera
elev = np.radians(self.opts['elevation'])
azim = np.radians(self.opts['azimuth'])
fov = np.radians(self.opts['fov'])
dist = (self.opts['center'] - self.cameraPosition()).length()
fov_factor = np.tan(fov / 2) * 2
scale_factor = dist * fov_factor / self.width()
z = scale_factor * np.cos(elev) * dy
x = scale_factor * (np.sin(azim) * dx - np.sin(elev) * np.cos(azim) * dy)
y = scale_factor * (np.cos(azim) * dx + np.sin(elev) * np.sin(azim) * dy)
self.opts['center'] += QtGui.QVector3D(x, -y, z)
else:
raise ValueError("relative argument must be global, view, or view-upright")
self.update()
def pixelSize(self, pos):
"""
Return the approximate size of a screen pixel at the location pos
Pos may be a Vector or an (N,3) array of locations
"""
cam = self.cameraPosition()
if isinstance(pos, np.ndarray):
cam = np.array(cam).reshape((1,)*(pos.ndim-1)+(3,))
dist = ((pos-cam)**2).sum(axis=-1)**0.5
else:
dist = (pos-cam).length()
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)
return xDist / self.width()
def mousePressEvent(self, ev):
self.mousePos = ev.pos()
def mouseMoveEvent(self, ev):
diff = ev.pos() - self.mousePos
self.mousePos = ev.pos()
if ev.buttons() == QtCore.Qt.LeftButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), diff.y(), 0, relative='view')
else:
self.orbit(-diff.x(), diff.y())
elif ev.buttons() == QtCore.Qt.MidButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), 0, diff.y(), relative='view-upright')
else:
self.pan(diff.x(), diff.y(), 0, relative='view-upright')
def mouseReleaseEvent(self, ev):
pass
# Example item selection code:
#region = (ev.pos().x()-5, ev.pos().y()-5, 10, 10)
#print(self.itemsAt(region))
## debugging code: draw the picking region
#glViewport(*self.getViewport())
#glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
#region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#self.paintGL(region=region)
#self.swapBuffers()
def wheelEvent(self, ev):
delta = 0
if QT_LIB in ['PyQt4', 'PySide']:
delta = ev.delta()
else:
delta = ev.angleDelta().x()
if delta == 0:
delta = ev.angleDelta().y()
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.opts['fov'] *= 0.999**delta
else:
self.opts['distance'] *= 0.999**delta
self.update()
def keyPressEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
def keyReleaseEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
def evalKeyState(self):
speed = 2.0
if len(self.keysPressed) > 0:
for key in self.keysPressed:
if key == QtCore.Qt.Key_Right:
self.orbit(azim=-speed, elev=0)
elif key == QtCore.Qt.Key_Left:
self.orbit(azim=speed, elev=0)
elif key == QtCore.Qt.Key_Up:
self.orbit(azim=0, elev=-speed)
elif key == QtCore.Qt.Key_Down:
self.orbit(azim=0, elev=speed)
elif key == QtCore.Qt.Key_PageUp:
pass
elif key == QtCore.Qt.Key_PageDown:
pass
self.keyTimer.start(16)
else:
self.keyTimer.stop()
def checkOpenGLVersion(self, msg):
## Only to be called from within exception handler.
ver = glGetString(GL_VERSION).split()[0]
if int(ver.split(b'.')[0]) < 2:
from .. import debug
debug.printExc()
raise Exception(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
raise
def readQImage(self):
"""
Read the current buffer pixels out as a QImage.
"""
w = self.width()
h = self.height()
self.repaint()
pixels = np.empty((h, w, 4), dtype=np.ubyte)
pixels[:] = 128
pixels[...,0] = 50
pixels[...,3] = 255
glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels)
# swap B,R channels for Qt
tmp = pixels[...,0].copy()
pixels[...,0] = pixels[...,2]
pixels[...,2] = tmp
pixels = pixels[::-1] # flip vertical
img = fn.makeQImage(pixels, transpose=False)
return img
def renderToArray(self, size, format=GL_BGRA, type=GL_UNSIGNED_BYTE, textureSize=1024, padding=256):
w,h = map(int, size)
self.makeCurrent()
tex = None
fb = None
try:
output = np.empty((w, h, 4), dtype=np.ubyte)
fb = glfbo.glGenFramebuffers(1)
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, fb )
glEnable(GL_TEXTURE_2D)
tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
texwidth = textureSize
data = np.zeros((texwidth,texwidth,4), dtype=np.ubyte)
## Test texture dimensions first
glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
## create teture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, data.transpose((1,0,2)))
self.opts['viewport'] = (0, 0, w, h) # viewport is the complete image; this ensures that paintGL(region=...)
# is interpreted correctly.
p2 = 2 * padding
for x in range(-padding, w-padding, texwidth-p2):
for y in range(-padding, h-padding, texwidth-p2):
x2 = min(x+texwidth, w+padding)
y2 = min(y+texwidth, h+padding)
w2 = x2-x
h2 = y2-y
## render to texture
glfbo.glFramebufferTexture2D(glfbo.GL_FRAMEBUFFER, glfbo.GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0)
self.paintGL(region=(x, h-y-h2, w2, h2), viewport=(0, 0, w2, h2)) # only render sub-region
glBindTexture(GL_TEXTURE_2D, tex) # fixes issue #366
## read texture back to array
data = glGetTexImage(GL_TEXTURE_2D, 0, format, type)
data = np.fromstring(data, dtype=np.ubyte).reshape(texwidth,texwidth,4).transpose(1,0,2)[:, ::-1]
output[x+padding:x2-padding, y+padding:y2-padding] = data[padding:w2-padding, -(h2-padding):-padding]
finally:
self.opts['viewport'] = None
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
if tex is not None:
glDeleteTextures([tex])
if fb is not None:
glfbo.glDeleteFramebuffers([fb])
return output
Somewhere in your code you are setting the center to be an int. setCameraPosition, maybe? It needs to be a Vector object, instead.

Set orientation of object in python using quaternions with pyglet, pywavefront and OpenGL

I want to set the orientation of an object in python using quaternions. I get my quaternions periodically via a serial port (this part works). My goal is to create a program similar to the following javascript project : https://github.com/ZaneL/quaternion_sensor_3d_nodejs (but with this object and in python)
Right now I can rotate the object using the keyboard with the following code (notice the rotation is around a non-zero point):
#window.event
def on_key_press(symbol, modifiers):
glTranslated(0, 0, 200)
if symbol == key.Q:
glRotated(22,0,1,0)
if symbol == key.W:
glRotated(-22,0,1,0)
glTranslated(0, 0, -200)
But this rotation is relative and I want to set the absolute orientation (with respect to some initial orientation). And I need to use quaternions, since quaternions specify the desired orientation.
So I want to do something like this:
#window.event
def on_key_press(symbol, modifiers):
if symbol == key.Q:
q = np.array([1,0,0,0])
if symbol == key.W:
q = np.array([0,1,0,0])
#set orientation based on q
Here is my complete code:
import pyglet
import pywavefront
from pywavefront import visualization
from pyglet.gl import *
from pyglet.window import key
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
path = '../models/10475_Rocket_Ship_v1_L3.obj'
window = pyglet.window.Window(resizable=True)
window.projection = pyglet.window.Projection3D(zfar=1000)
scene = pywavefront.Wavefront(path)
#window.event
def on_draw():
# print('draw')
window.clear()
visualization.draw(scene)
#window.event
def on_key_press(symbol, modifiers):
glTranslated(0, 0, 200)
if symbol == key.Q:
glRotated(22,0,1,0)
if symbol == key.W:
glRotated(-22,0,1,0)
glTranslated(0, 0, -200)
if __name__ == "__main__":
glViewport(0, 0, 500,500)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, 500, 0.0, 500, 0.0, 1.0)
glMatrixMode (GL_MODELVIEW)
glLoadIdentity()
glTranslated(0, 0, 100)
for _ in range(4):
glRotated(25,0,1,0)
glTranslated(35, 0, 0)
glRotated(100,0,1,0)
glTranslated(0, 0, 200)
glRotated(-100,1,0,0)
glTranslated(-100, -275, -250)
glScale(0.75, 0.75, 0.75)
glClearColor(0.85, 0.85, 0.85, 1);
pyglet.app.run()
I also want to simplify the code for setting the initial orientation and position of the object (as this one was found using trial and error). Preferably 1 or 2 operations.
I don't need to use piglet or pywavefront. As long as the object renders properly.
Edit:
I currently have the rotation working. But I want to rotate the entire model around the z-axis to compensate for a non-zero initial jaw angle(my monitor is not perfectly magnetic North of my object). My 6-axis sensor is upside down so the model is also upside down.
from squaternion import Quaternion
import numpy as np
...
q = Quaternion(q['quat_w'],q['quat_x'],q['quat_y'],q['quat_z'])
# delete current matrix and replace with copy of initialized matrix:
glPopMatrix()
glPushMatrix()
e = q.to_euler(degrees=True)
# get initial yaw angle:
global init_yaw
if init_yaw == None:
init_yaw = e[2]
print(q)
print(init_yaw)
glTranslated(0, 0, 200)
# glRotated(init_yaw,0,0,1) #this doesn't work, it rotates using euler angles and it needs to rotate around the z axis
r = np.array(q.to_rot())
r4x4 = np.array([[r[0,0],r[1,0],r[2,0],0],
[r[0,1],r[1,1],r[2,1],0],
[r[0,2],r[1,2],r[2,2],0],
[0,0,0,1]])
glMultMatrixd(r4x4)
glTranslated(0, 0, -200)
I was able to solve it.
I use the squaternion library to store the quaternion. It comes with build-in methods for quaternion multiplication and converting to rotation matrix.
The rotation matrix is 3x3 and needs to be converted to a 4x4 rotation-translation matrix and converted to Column-major order.
First the quaternion needs to be rotated to compensate for the offset in the Yaw angle.
Then the quaternion needs to be rotated around the x axis to compensate for the fact the sensor is mounted upside down.
Then the 4x4 matrix is calculated.
Then then matrix is applied (after translating and before translating back)
In order to simplify the initial rotation and translating operations (which were found using trial and error) all I had to do was print and inspect the matrix. After rounding relatively small values (<0.01) to zero I was able to find a simpler matrix. I used this code for inspection:
a = (GLdouble * 16)()
mvm = glGetDoublev(GL_MODELVIEW_MATRIX, a)
print(list(a))
array = np.array(list(a)).reshape([4,4])
print(array)
Here is my final code:
path = '../models/10475_Rocket_Ship_v1_L3.obj'
import pyglet
import pywavefront
from pywavefront import visualization
from pyglet.gl import *
from pyglet.window import key
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import serial
import json
from squaternion import Quaternion
import numpy as np
ser = serial.Serial('COM4',115200,timeout=0)
window = pyglet.window.Window(resizable=True)
window.projection = pyglet.window.Projection3D(zfar=1000)
scene = pywavefront.Wavefront(path)
buffer = ''
init_yaw = None
def timer(self):
global buffer
len = ser.in_waiting
if len > 0:
string = ser.read(len).decode("utf-8")
buffer_old = str(buffer)
buffer += string
last = buffer.rfind('\n')
if last >= 0:
second_last = buffer[0:last].rfind('\n')
if second_last >= 0:
# extract last full line (starts and ends with newline):
last_line = buffer[second_last+1:last]
try:
q = json.loads(last_line)
q = Quaternion(q['quat_w'],q['quat_x'],q['quat_y'],q['quat_z'])
buffer = buffer[last:] # delete everything before last newline
except:
print('invalid input')
print('buffer:',buffer)
buffer = ''
return
# delete current matrix and replace with initial matrix:
glPopMatrix()
glPushMatrix()
e = q.to_euler(degrees=True)
global init_yaw
if init_yaw == None:
init_yaw = e[2]
print(q)
print(init_yaw)
glTranslated(0, 0, 200)
q_yaw = Quaternion.from_euler(0,0,180-init_yaw,degrees=True)
q = q_yaw*q
#flip model around x axis, because sensor is upside down:
q_flip = Quaternion.from_angle_axis(180, [1,0,0],degrees=True)
q = q*q_flip
r = np.array(q.to_rot())
r4x4 = np.array([[r[0,0],r[1,0],r[2,0],0],
[r[0,1],r[1,1],r[2,1],0],
[r[0,2],r[1,2],r[2,2],0],
[0,0,0,1]])
glMultMatrixd(r4x4)
glTranslated(0, 0, -200)
#window.event
def on_draw():
window.clear()
visualization.draw(scene)
if __name__ == "__main__":
glViewport(0, 0, 500,500)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, 500, 0.0, 500, 0.0, 1.0)
glMatrixMode (GL_MODELVIEW)
glLoadIdentity()
glClearColor(0.85, 0.85, 0.85, 1);
array = np.array([[0,0,1,0],
[1,0,0,0],
[0,1,0,0],
[0,-150,-600,1]])
glLoadMatrixd(array)
glPushMatrix();
pyglet.clock.schedule_interval(timer, 1/60);
pyglet.app.run()

Adding simple perspective to a 3D renderer in pygame

I have created a 3D renderer in pyGame however I would now like to add perspective. I have been trying for a while now and can't seem to figure it out.
I have read that the most simplest form of perspective is to multiply the x and y coordinates by the inverse of the z coordinate so that the x and y are dependant on the z value. This means that the x and y distances should decrease with the z coordinate increasing and x and y should increase with z decreasing. I have managed to get this to work slightly however it seems to accumulate so when I spin the box left and right the back of the box gets very small and seems to accumulate a negative scale instead of remaining at a constant size with being at the set z distance.
Here is my code:
wireframe.py:
class Wireframe:
def __init__(self):
self.nodes = np.zeros((0,4))
self.edges = []
def addNodes(self, node_array):
ones_column = np.ones((len(node_array), 1))
ones_added = np.hstack((node_array, ones_column))
self.nodes = np.vstack((self.nodes, ones_added))
def addEdges(self, edgeList):
self.edges += edgeList
def outputNodes(self):
print("\n --- Nodes ---")
for i, (x, y, z, _) in enumerate(self.nodes):
print(" %d: (%.2f, %.2f, %.2f)" % (i, node.x, node.y, node.z))
def outputEdges(self):
print("\n --- Edges ---")
for i, (node1, node2) in enumerate(self.edges):
print(" %d: %d -> %d" % (i, node1, node2))
def translate(self, axis, d):
if axis in ['x', 'y', 'z']:
for node in self.nodes:
setattr(node, axis, getattr(node, axis) + d)
def scale(self, centre_x, centre_y, scale):
for node in self.nodes:
node.x = centre_x + scale * (node.x - centre_x)
node.y = centre_y + scale * (node.y - centre_y)
node.z *= scale
def findCentre(self):
num_nodes = len(self.nodes)
meanX = sum([node.x for node in self.nodes]) / num_nodes
meanY = sum([node.y for node in self.nodes]) / num_nodes
meanZ = sum([node.z for node in self.nodes]) / num_nodes
return (meanX, meanY, meanZ)
def rotateZ(self, centre, radians):
cx, cy, cz = centre
for node in self.nodes:
x = node.x - cx
y = node.y - cy
d = math.hypot(y,x)
theta = math.atan2(y,x) + radians
node.x = cx + d * math.cos(theta)
node.y = cy + d * math.sin(theta)
def rotateX(self, centre, radians):
cx, cy, cz = centre
for node in self.nodes:
y = node.y - cy
z = node.z - cz
d = math.hypot(y,z)
theta = math.atan2(y, z) + radians
node.z = cz + d * math.cos(theta)
node.y = cy + d * math.sin(theta)
def rotateY(self, centre, radians):
cx, cy, cz = centre
for node in self.nodes:
x = node.x - cx
z = node.z - cz
d = math.hypot(x, z)
theta = math.atan2(x, z) + radians
node.z = cz + d * math.cos(theta)
node.x = cx + d * math.sin(theta)
def transform(self, matrix):
self.nodes = np.dot(self.nodes, matrix)
def transform_for_perspective(self):
for node in self.nodes:
print(node[0], node[1], node[2])
if node[2] != 0:
node[0] = node[0]*(1/(1-(node[2]*0.00005)))
node[1] = node[1]*(1/(1-(node[2]*0.00005)))
node[2] = node[2]*1
def translationMatrix(self, dx=0, dy=0, dz=0):
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[dx,dy,dz,1]])
def scaleMatrix(self, sx=0, sy=0, sz=0):
return np.array([[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]])
def rotateXMatrix(self, radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[1,0,0,0],
[0,c,-s,0],
[0,s,c,0],
[0,0,0,1]])
def rotateYMatrix(self, radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[c,0,s,0],
[0,1,0,0],
[-s,0,c,0],
[0,0,0,1]])
def rotateZMatrix(self, radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[c,-s, 0, 0],
[s,c,0,0],
[0,0,1,0],
[0,0,0,1]])
def movCamera(self, tilt, pan):
return np.array([[1,0,0,200],
[0,1,0,0],
[pan,tilt,1,0],
[0,0,0,0]])
projectionViewer.py
from wireframe import *
import pygame
import numpy as np
class ProjectionViewer:
''' Displays 3D Objects on a Pygame Screen '''
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Wireframe Display')
self.background = (10,10,50)
self.wireframes = {}
self.displayNodes = True
self.displayEdges = True
self.nodeColour = (255,255,255)
self.edgeColour = (200,200,200)
self.nodeRadius = 4
def run(self):
key_to_function = {
pygame.K_LEFT: (lambda x: x.translateAll([-10, 0, 0])),
pygame.K_RIGHT:(lambda x: x.translateAll([ 10, 0, 0])),
pygame.K_DOWN: (lambda x: x.translateAll([0, 10, 0])),
pygame.K_UP: (lambda x: x.translateAll([0, -10, 0])),
pygame.K_a: (lambda x: x.rotate_about_Center('Y', -0.08)),
pygame.K_d: (lambda x: x.rotate_about_Center('Y', 0.08)),
pygame.K_w: (lambda x: x.rotate_about_Center('X', -0.08)),
pygame.K_s: (lambda x: x.rotate_about_Center('X', 0.08)),
pygame.K_EQUALS: (lambda x: x.scale_centre([1.25,1.25,1.25])),
pygame.K_MINUS: (lambda x: x.scale_centre([0.8,0.8,0.8])),
pygame.K_q: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_z: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_x: (lambda x: x.rotateAll('Z', -0.1)),
pygame.K_p: (lambda x: x.perspectiveMode()),
pygame.K_t: (lambda x: x.translate_Camera())
}
running = True
flag = False
while running:
keys = pygame.key.get_pressed()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if keys[pygame.K_LEFT]:
key_to_function[pygame.K_LEFT](self)
if keys[pygame.K_RIGHT]:
key_to_function[pygame.K_RIGHT](self)
if keys[pygame.K_DOWN]:
key_to_function[pygame.K_DOWN](self)
if keys[pygame.K_UP]:
key_to_function[pygame.K_UP](self)
if keys[pygame.K_EQUALS]:
key_to_function[pygame.K_EQUALS](self)
if keys[pygame.K_MINUS]:
key_to_function[pygame.K_MINUS](self)
if keys[pygame.K_LEFT]:
key_to_function[pygame.K_LEFT](self)
if keys[pygame.K_q]:
key_to_function[pygame.K_q](self)
if keys[pygame.K_w]:
key_to_function[pygame.K_w](self)
if keys[pygame.K_a]:
key_to_function[pygame.K_a](self)
if keys[pygame.K_s]:
key_to_function[pygame.K_s](self)
if keys[pygame.K_z]:
key_to_function[pygame.K_z](self)
if keys[pygame.K_x]:
key_to_function[pygame.K_x](self)
if keys[pygame.K_p]:
key_to_function[pygame.K_p](self)
if keys[pygame.K_t]:
key_to_function[pygame.K_t](self)
if keys[pygame.K_d]:
key_to_function[pygame.K_d](self)
self.display()
pygame.display.flip()
def addWireframe(self, name, wireframe):
self.wireframes[name] = wireframe
#translate to center
wf = Wireframe()
matrix = wf.translationMatrix(-self.width/2,-self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
wf = Wireframe()
matrix = wf.translationMatrix(self.width,self.height,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def display(self):
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
if self.displayEdges:
for n1, n2 in wireframe.edges:
pygame.draw.aaline(self.screen, self.edgeColour, wireframe.nodes[n1][:2], wireframe.nodes[n2][:2],1)
wireframe.transform_for_perspective()
if self.displayNodes:
for node in wireframe.nodes:
pygame.draw.circle(self.screen, self.nodeColour, (int(node[0]), int(node[1])), self.nodeRadius, 0)
def translateAll(self, vector):
''' Translate all wireframes along a given axis by d units '''
wf = Wireframe()
matrix = wf.translationMatrix(*vector)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def scaleAll(self, vector):
wf = Wireframe()
matrix = wf.scaleMatrix(*vector)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def rotateAll(self, axis, theta):
wf = Wireframe()
if axis == 'X':
matrix = wf.rotateXMatrix(theta)
elif axis == 'Y':
matrix = wf.rotateYMatrix(theta)
elif axis == 'Z':
matrix = wf.rotateZMatrix(theta)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#wireframe.transform_for_perspective()
def moveCameraX(self,x,y):
wf = Wireframe()
matrix = wf.movCamera(x,y)
print("test")
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def moveCameraZ(self,x,y):
wf = Wireframe()
matrix = wf.testMat((0,0,val))
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def perspectiveMode(self):
#First translate the centre of screen to 0,0
wf = Wireframe()
matrix = wf.translationMatrix(-self.width/2,-self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#perform the perspectivecorrection
wf = Wireframe()
matrix = wf.translationMatrix(-self.width/2,-self.height/2,0)
for wireframe in self.wireframes.values():
matrix = wf.perspectiveCorrection(1.2)
wireframe.transform(matrix)
wf = Wireframe()
matrix = wf.translationMatrix(self.width/2,self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def rotate_about_Center(self, Axis, theta):
#First translate Centre of screen to 0,0
wf = Wireframe()
matrix = wf.translationMatrix(-self.width/2,-self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#Do Rotation
wf = Wireframe()
if Axis == 'X':
matrix = wf.rotateXMatrix(theta)
elif Axis == 'Y':
matrix = wf.rotateYMatrix(theta)
elif Axis == 'Z':
matrix = wf.rotateZMatrix(theta)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#Translate back to centre of screen
wf = Wireframe()
matrix = wf.translationMatrix(self.width/2,self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#Do perspective if needed
def scale_centre(self, vector):
#Transform center of screen to origin
wf = Wireframe()
matrix = wf.translationMatrix(-self.width/2,-self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
#Scale the origin by vector
wf = Wireframe()
matrix = wf.scaleMatrix(*vector)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
wf = Wireframe()
matrix = wf.translationMatrix(self.width/2,self.height/2,0)
for wireframe in self.wireframes.values():
wireframe.transform(matrix)
def add_perspective(self):
for wireframe in self.wireframes.values():
for node in wireframe.nodes:
if node[2] != 0:
print("Point ----------")
print("x node", node[0])
print("y node", node[1])
print("z node", node[2])
node[0] = node[0] + (10/node[2])
node[1] = node[1] + (10/node[2])
main.py
from projectionViewer import ProjectionViewer
import wireframe
import numpy as np
cube = wireframe.Wireframe()
cube_nodes = [(x, y, z) for x in (-100, 100) for y in (-100, 100) for z in (-100, 100)]
print(cube_nodes)
cube.addNodes(np.array(cube_nodes))
cube.addEdges([(n, n + 4) for n in range(0, 4)])
cube.addEdges([(n, n + 1) for n in range(0, 8, 2)])
cube.addEdges([(n, n + 2) for n in (0, 1, 4, 5)])
pv = ProjectionViewer(1200, 1000)
pv.addWireframe('cube', cube)
pv.run()
The code that does the multiplying is in the wireframe file and the transform_for_perspective() function.
def transform_for_perspective(self):
for node in self.nodes:
print(node[0], node[1], node[2])
if node[2] != 0:
node[0] = node[0]*(1/(1-(node[2]*0.00005)))
node[1] = node[1]*(1/(1-(node[2]*0.00005)))
node[2] = node[2]*1
If anyone could tell me where I am going wrong and explain in which order I need to call the perspective matrix, i.e rotation then perspective or perspective and then rotation.
Also, Because Pygame starts at (0,0) in the top left corner this means that if I want to rotate about the centre of the screen I have to translate the centre of the screen, perform the rotation matrix and then translate it back to the centre. What does this mean for perspective? do I have to translate the centre of the screen to the top left and then perform the perspective matrix and then translate it back again?
Any help would be much appreciated.
The transformation that you are applying in transform_for_perspective should only be applied once. However, it seems that you are calling it on every frame, and as it stores the output in the same variable (self.nodes) it is applied many times.
Consider saving the output of that transformation in a new field (such as self.perspective_nodes).
Also, the transformation was not working for me, I tried to do some variations and came up with this:
class Wireframe:
def __init__(self):
self.nodes = np.zeros((0, 4))
self.perspective_nodes = None
self.edges = []
....
def transform_for_perspective(self, center):
self.perspective_nodes = self.nodes.copy()
for i in range(len(self.nodes)):
node = self.nodes[i]
p_node = self.perspective_nodes[i]
print(node[0], node[1], node[2])
if node[2] != 0:
p_node[0] = center[0] + (node[0]-center[0])*250/(200-(node[2]))
p_node[1] = center[1] + (node[1]-center[1])*250/(200-(node[2]))
p_node[2] = node[2] * 1
You also need to modify display in projectionViewer:
def display(self):
self.screen.fill(self.background)
for wireframe in self.wireframes.values():
wireframe.transform_for_perspective((self.width/2, self.height/2))
if self.displayNodes:
for node in wireframe.perspective_nodes:
pygame.draw.circle(self.screen, self.nodeColour, (int(
node[0]), int(node[1])), self.nodeRadius, 0)
if self.displayEdges:
for n1, n2 in wireframe.edges:
pygame.draw.aaline(
self.screen, self.edgeColour, wireframe.perspective_nodes[n1][:2], wireframe.perspective_nodes[n2][:2], 1)

What is the lookat matrix multiplication function in python for a dragon superbible opengl example?

I mostly ported over the dragon example from SB OpenGL. The output gif of the program is below the code.
My question what is what's the lookat function in python?
Supporting files: dragon.zip simply put pydragon.py into the folder 'dragon' and run
Source code of pydragon.py
#!/usr/bin/python3
import sys
import time
sys.path.append("./shared")
from sbmloader import SBMObject # location of sbm file format loader
from ktxloader import KTXObject # location of ktx file format loader
from sbmath import m3dDegToRad, m3dRadToDeg, m3dTranslateMatrix44, m3dRotationMatrix44, m3dMultiply, m3dOrtho, m3dPerspective, rotation_matrix, translate, m3dScaleMatrix44
fullscreen = True
import numpy.matlib
import numpy as np
import math
try:
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.raw.GL.ARB.vertex_array_object import glGenVertexArrays, glBindVertexArray
except:
print ('''
ERROR: PyOpenGL not installed properly.
''')
sys.exit()
identityMatrix = [1,0,0,0, 0,1,0,0, 0,0,1,0, 0,0,0,1]
clear_program = GLuint(0)
append_program = GLuint(0)
resolve_program = GLuint(0)
class textures:
color = GLuint(0)
normals = GLuint(0)
class uniforms_block:
mv_matrix = (GLfloat * 16)(*identityMatrix)
view_matrix = (GLfloat * 16)(*identityMatrix)
proj_matrix = (GLfloat * 16)(*identityMatrix)
uniforms_buffer = GLuint(0)
class uniforms:
mvp = GLuint(0)
fragment_buffer = GLuint(0)
head_pointer_image = GLuint(0)
atomic_counter_buffer = GLuint(0)
dummy_vao = GLuint(0)
uniform = uniforms()
myobject = SBMObject()
def length(v):
return math.sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])
def normalize(v):
l = length(v)
#if (v[0] == 0 and v[1] == 0 and v[2] ==0):
# return [0.0, 1/3, 0.0]
return [v[0]/l, v[1]/l, v[2]/l]
def dot(v0, v1):
return v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2]
def cross(v0, v1):
return [
v0[1]*v1[2]-v1[1]*v0[2],
v0[2]*v1[0]-v1[2]*v0[0],
v0[0]*v1[1]-v1[0]*v0[1]]
def m3dLookAt(eye, target, up):
mz = normalize( (eye[0]-target[0], eye[1]-target[1], eye[2]-target[2]) ) # inverse line of sight
mx = normalize( cross( up, mz ) )
my = normalize( cross( mz, mx ) )
tx = dot( mx, eye )
ty = dot( my, eye )
tz = -dot( mz, eye )
return np.array([mx[0], my[0], mz[0], 0, mx[1], my[1], mz[1], 0, mx[2], my[2], mz[2], 0, tx, ty, tz, 1])
def scale(s):
return [s,0,0,0, 0,s,0,0, 0,0,s,0, 0,0,0,1]
def link_from_shaders(shaders, shader_count, delete_shaders, check_errors=False):
program = GLuint(0)
program = glCreateProgram()
for i in range(0, shader_count):
glAttachShader(program, shaders[i]);
glLinkProgram(program);
if (delete_shaders):
for i in range(0, shader_count):
glDeleteShader(shaders[i]);
return program
def shader_load(filename, shader_type):
result = GLuint(0)
with open ( filename, "rb") as data:
result = glCreateShader(shader_type)
glShaderSource(result, data.read() )
glCompileShader(result)
return result
def load_shaders():
global clear_program
global append_program
global resolve_program
global uniform
shaders = [GLuint(0), GLuint(0)]
shaders[0] = shader_load("fragmentlist_shaders/clear.vs.glsl", GL_VERTEX_SHADER);
shaders[1] = shader_load("fragmentlist_shaders/clear.fs.glsl", GL_FRAGMENT_SHADER);
if (clear_program):
glDeleteProgram(clear_program);
clear_program = link_from_shaders(shaders, 2, True);
shaders[0] = shader_load("fragmentlist_shaders/append.vs.glsl", GL_VERTEX_SHADER);
shaders[1] = shader_load("fragmentlist_shaders/append.fs.glsl", GL_FRAGMENT_SHADER);
if (append_program):
glDeleteProgram(append_program);
append_program = link_from_shaders(shaders, 2, True);
uniform.mvp = glGetUniformLocation(append_program, "mvp");
shaders[0] = shader_load("fragmentlist_shaders/resolve.vs.glsl", GL_VERTEX_SHADER);
shaders[1] = shader_load("fragmentlist_shaders/resolve.fs.glsl", GL_FRAGMENT_SHADER);
if (resolve_program):
glDeleteProgram(resolve_program)
resolve_program = link_from_shaders(shaders, 2, True);
class Scene:
def __init__(self, width, height):
global uniforms_buffer
global fragment_buffer
global atomic_counter_buffer
global head_pointer_image
global dummy_vao
global myobject
self.width = width
self.height = height
load_shaders()
glGenBuffers(1, uniforms_buffer)
glBindBuffer(GL_UNIFORM_BUFFER, uniforms_buffer)
glBufferData(GL_UNIFORM_BUFFER, sizeof(GLfloat * 16 *3), None, GL_DYNAMIC_DRAW)
myobject.load("dragon.sbm")
glGenBuffers(1, fragment_buffer)
glBindBuffer(GL_SHADER_STORAGE_BUFFER, fragment_buffer);
glBufferData(GL_SHADER_STORAGE_BUFFER, 1024 * 1024 * 16, None, GL_DYNAMIC_COPY)
glGenBuffers(1, atomic_counter_buffer);
glBindBuffer(GL_ATOMIC_COUNTER_BUFFER, atomic_counter_buffer);
glBufferData(GL_ATOMIC_COUNTER_BUFFER, 4, None, GL_DYNAMIC_COPY);
head_pointer_image = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, head_pointer_image);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_R32UI, 1024, 1024);
glGenVertexArrays(1, dummy_vao);
glBindVertexArray(dummy_vao);
def display(self):
green = [ 0.0, 0.1, 0.0, 0.0 ]
currentTime = time.time()
f = currentTime
zeros = [ 0.0, 0.0, 0.0, 0.0 ]
gray = [ 0.1, 0.1, 0.1, 0.0 ]
ones = [ 1.0 ]
glViewport(0, 0, self.width , self.height);
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_ATOMIC_COUNTER_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT);
glUseProgram(clear_program);
glBindVertexArray(dummy_vao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glUseProgram(append_program)
model_matrix = (GLfloat * 16)(*identityMatrix)
model_matrix = scale(6.0)
view_matrix = (GLfloat * 16)(*identityMatrix)
view_matrix = m3dLookAt([math.cos(f * 0.35) * 120.0, math.cos(f * 0.4) * 30.0, math.sin(f * 0.35) * 120.0],
[0.0, -20.0, 0.0],
[0.0, 1, 0.0])
mv_matrix = (GLfloat * 16)(*identityMatrix)
mv_matrix = m3dMultiply(view_matrix , model_matrix)
proj_matrix = (GLfloat * 16)(*identityMatrix)
proj_matrix = m3dPerspective(m3dDegToRad(50.0), float(self.width) / float(self.height), 0.1, 1000.0)
glUniformMatrix4fv(uniform.mvp, 1, GL_FALSE, m3dMultiply(proj_matrix , mv_matrix))
zero = 0;
glBindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, atomic_counter_buffer)
# next line not working ????
#glBufferSubData(GL_ATOMIC_COUNTER_BUFFER, 0, sys.getsizeof(zero), zero);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, fragment_buffer)
glBindImageTexture(0, head_pointer_image, 0, GL_FALSE, 0, GL_READ_WRITE, GL_R32UI)
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_ATOMIC_COUNTER_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT)
myobject.render()
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_ATOMIC_COUNTER_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT)
glUseProgram(resolve_program)
glBindVertexArray(dummy_vao)
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_ATOMIC_COUNTER_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT)
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4)
glutSwapBuffers()
def reshape(self, width, height):
self.width = width
self.height = height
def keyboard(self, key, x, y ):
global fullscreen
print ('key:' , key)
if key == b'\x1b': # ESC
sys.exit()
elif key == b'f' or key == b'F': #fullscreen toggle
if (fullscreen == True):
glutReshapeWindow(512, 512)
glutPositionWindow(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
else:
glutFullScreen()
fullscreen = True
print('done')
def init(self):
pass
def timer(self, blah):
glutPostRedisplay()
glutTimerFunc( int(1/60), self.timer, 0)
time.sleep(1/60.0)
if __name__ == '__main__':
start = time.time()
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(512, 512)
w1 = glutCreateWindow('OpenGL SuperBible - Fragment List')
glutInitWindowPosition(int((1360/2)-(512/2)), int((768/2)-(512/2)))
fullscreen = False
many_cubes = False
#glutFullScreen()
scene = Scene(512,512)
glutReshapeFunc(scene.reshape)
glutDisplayFunc(scene.display)
glutKeyboardFunc(scene.keyboard)
glutIdleFunc(scene.display)
#glutTimerFunc( int(1/60), scene.timer, 0)
scene.init()
glutMainLoop()
The output is supposed to appear like the following:
Ported from fragmentlist.cpp found there from the Superbible OpenGL 7th ed.
Current Question:
Any ideas why the texture rendered on the dragon is not translucent as the expected output has it?
The view space is the local system which is defined by the point of view onto the scene.
The position of the view, the line of sight and the upwards direction of the view, define a coordinate system relative to the world coordinate system. The objects of a scene have to be drawn in relation to the view coordinate system, to be "seen" from the viewing position. The inverse matrix of the view coordinate system is named the view matrix. This matrix transforms from world coordinates to view coordinates.
The code below defines a matrix that exactly encapsulates the steps necessary to calculate a look at the scene:
Converting model coordinates into view system coordinates.
Rotation, to look in the direction of the view.
Movement to the eye position.
Euclidean length of a vector:
def length(v):
return math.sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2])
Unit vector:
def normalize(v):
l = length(v)
return [v[0]/l, v[1]/l, v[2]/l]
Dot product:
def dot(v0, v1):
return v0[0]*v1[0]+v0[1]*v1[1]+v0[2]*v1[2]
Cross product:
def cross(v0, v1):
return [
v0[1]*v1[2]-v1[1]*v0[2],
v0[2]*v1[0]-v1[2]*v0[0],
v0[0]*v1[1]-v1[0]*v0[1]]
The following code does the same as gluLookAt or glm::lookAt does:
The parameter eye is the point of view, target is the point which is looked at and up is the upwards direction.
def m3dLookAt(eye, target, up):
mz = normalize( (eye[0]-target[0], eye[1]-target[1], eye[2]-target[2]) ) # inverse line of sight
mx = normalize( cross( up, mz ) )
my = normalize( cross( mz, mx ) )
tx = dot( mx, eye )
ty = dot( my, eye )
tz = -dot( mz, eye )
return np.array([mx[0], my[0], mz[0], 0, mx[1], my[1], mz[1], 0, mx[2], my[2], mz[2], 0, tx, ty, tz, 1])
Use it like this:
view_matrix = m3dLookAt([0, 0, 20], [0, 0, 0], [0, 1, 0])

How to find PyGame Window Coordinates of an OpenGL Vertice?

I am trying to figure out the coordinates of the vertices of two rectangles in a pygame window that is using OpenGL to create the 3D objects.
import pygame
from pygame.locals import *
import random
from OpenGL.GL import *
from OpenGL.GLU import *
rect1 = [(-5.125,0,-40),(-3.125,0,-40),(-3.125,5,-40),(-5.125,5,-40),]
rect2 = [(3.125,0,-40),(5.125,0,-40),(5.125,5,-40),(3.125,5,-40)]
edges = ((0,1),(1,2),(2,3),(3,0))
#This draws the rectangles edges
def Target():
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect1[vertex])
glEnd()
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect2[vertex])
glEnd()
def main():
try:
pygame.init()
display = (320,240)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(45, (display[0]/display[1]), .1, 1000)
while True:
#iterates through events to check for quits
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
Target()
pygame.display.flip()
pygame.time.wait(100)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
except Exception as e:
print (e)
main()
How do I grab the coordinates on the pygame window(320,240) of the object?
The projection matrix describes the mapping from 3D points of a scene, to 2D points of the viewport. It transforms from eye space to the clip space, and the coordinates in the clip space are transformed to the normalized device coordinates (NDC) by dividing with the w component of the clip coordinates. The NDC are in range (-1,-1,-1) to (1,1,1).
At Perspective Projection the projection matrix describes the mapping from 3D points in the world as they are seen from of a pinhole camera, to 2D points of the viewport. The eye space coordinates in the camera frustum (a truncated pyramid) are mapped to a cube (the normalized device coordinates).
Perspective Projection Matrix:
r = right, l = left, b = bottom, t = top, n = near, f = far
2*n/(r-l) 0 0 0
0 2*n/(t-b) 0 0
(r+l)/(r-l) (t+b)/(t-b) -(f+n)/(f-n) -1
0 0 -2*f*n/(f-n) 0
where :
aspect = w / h
tanFov = tan( fov_y / 2 );
2 * n / (r-l) = 1 / (tanFov * aspect)
2 * n / (t-b) = 1 / tanFov
Since the projection matrix is defined by the field of view and the aspect ratio it is possible to recover the viewport position with the field of view and the aspect ratio. Provided that it is a symmetrical perspective projection, where the field of view is not dispalced (as in your case).
First you have to transform the mose position to normalized device coordianates:
w = with of the viewport
h = height of the viewport
x = X position of the mouse
y = Y position ot the mouse
ndc_x = 2.0 * x/w - 1.0;
ndc_y = 1.0 - 2.0 * y/h; // invert Y axis
Then you have to converte the normalized device coordinates to view coordinates:
z = z coodinate of the geometry in view space
viewPos.x = -z * ndc_x * aspect * tanFov;
viewPos.y = -z * ndc_y * tanFov;
If you want to check if the mouse hovers over your rectangles, then the code may look like this:
mpos = pygame.mouse.get_pos()
z = 40
ndc = [ 2.0 * mpos[0]/width - 1.0, 1.0 - 2.0 * mpos[1]/height ]
tanFov = math.tan( fov_y * 0.5 * math.pi / 180 )
aspect = width / height
viewPos = [z * ndc[0] * aspect * tanFov, z * ndc[1] * tanFov ]
onRect1 = 1 if (viewPos[0]>=rect1[0][0] and viewPos[0]<=rect1[1][0] and viewPos[1]>=rect1[0][1] and viewPos[1]<=rect1[2][1] ) else 0
onRect2 = 1 if (viewPos[0]>=rect2[0][0] and viewPos[0]<=rect2[1][0] and viewPos[1]>=rect2[0][1] and viewPos[1]<=rect2[2][1] ) else 0
See further:
How to recover view space position given view space depth value and ndc xy
Is it possble get which surface of cube will be click in OpenGL?
OpenGL - Mouse coordinates to Space coordinates
In the following I added the algorithm to your example. If the mouse hovers over an rectangle, then the rectangle is colored in red.
import pygame
from pygame.locals import *
import random
from OpenGL.GL import *
from OpenGL.GLU import *
import math
rect1 = [(-5.125,0,-40),(-3.125,0,-40),(-3.125,5,-40),(-5.125,5,-40),]
rect2 = [(3.125,0,-40),(5.125,0,-40),(5.125,5,-40),(3.125,5,-40)]
edges = ((0,1),(1,2),(2,3),(3,0))
fov_y = 45
width = 320
height = 200
#This draws the rectangles edges
def Target():
mpos = pygame.mouse.get_pos()
z = 40
ndc = [ 2.0 * mpos[0]/width - 1.0, 1.0 - 2.0 * mpos[1]/height ]
tanFov = math.tan( fov_y * 0.5 * math.pi / 180 )
aspect = width / height
viewPos = [z * ndc[0] * aspect * tanFov, z * ndc[1] * tanFov ]
onRect1 = 1 if (viewPos[0]>=rect1[0][0] and viewPos[0]<=rect1[1][0] and viewPos[1]>=rect1[0][1] and viewPos[1]<=rect1[2][1] ) else 0
onRect2 = 1 if (viewPos[0]>=rect2[0][0] and viewPos[0]<=rect2[1][0] and viewPos[1]>=rect2[0][1] and viewPos[1]<=rect2[2][1] ) else 0
glColor3f( 1, 1-onRect1, 1-onRect1 )
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect1[vertex])
glEnd()
glColor3f( 1, 1-onRect2, 1-onRect2 )
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect2[vertex])
glEnd()
def main():
try:
pygame.init()
display = (width,height)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
glMatrixMode(GL_PROJECTION)
gluPerspective(fov_y, (display[0]/display[1]), .1, 1000)
glMatrixMode(GL_MODELVIEW)
while True:
#iterates through events to check for quits
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
Target()
pygame.display.flip()
pygame.time.wait(100)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
except Exception as e:
print (e)
main()
Extension to the answer
Of course you can also do it the other way around. You can transform the corner points of the rectangle to normalized device coordinates and compare them to the mouse position, in normalized device coordinates.
For this you have to read the projection matrix by glGetFloatv(GL_PROJECTION_MATRIX):
prjMat = (GLfloat * 16)()
glGetFloatv(GL_PROJECTION_MATRIX, prjMat)
And you need a function which transform a 3 dimensional cartesian vector by a projection matrix. This is done by multiplying the vector by the projection matrix, which gives homogeneous clip space coordinates. The normalized device coordinates are calculated by dividing the x, y, and z component by the w component.
def TransformVec3(vecA,mat44):
vecB = [0, 0, 0, 0]
for i0 in range(0, 4):
vecB[i0] = vecA[0] * mat44[0*4+i0] + vecA[1] * mat44[1*4+i0] + vecA[2] * mat44[2*4+i0] + mat44[3*4+i0]
return [vecB[0]/vecB[3], vecB[1]/vecB[3], vecB[2]/vecB[3]]
The following function tests if the mouse position is in an rectangle defined by a lower left and a upper right point (the corner points have to be in view space coordinates):
def TestRec(prjMat, mpos, ll, tr):
ll_ndc = TransformVec3(ll, prjMat)
tr_ndc = TransformVec3(tr, prjMat)
ndc = [ 2.0 * mpos[0]/width - 1.0, 1.0 - 2.0 * mpos[1]/height ]
inRect = 1 if (ndc[0]>=ll_ndc[0] and ndc[0]<=tr_ndc[0] and ndc[1]>=ll_ndc[1] and ndc[1]<=tr_ndc[1] ) else 0
return inRect
Again I added the algorithm to your example. If the mouse hovers over an rectangle, then the rectangle is colored in red.
import pygame
from pygame.locals import *
import random
from OpenGL.GL import *
from OpenGL.GLU import *
import math
rect1 = [(-5.125,0,-40),(-3.125,0,-40),(-3.125,5,-40),(-5.125,5,-40),]
rect2 = [(3.125,0,-40),(5.125,0,-40),(5.125,5,-40),(3.125,5,-40)]
edges = ((0,1),(1,2),(2,3),(3,0))
fov_y = 45
width = 320
height = 200
def TransformVec3(vecA,mat44):
vecB = [0, 0, 0, 0]
for i0 in range(0, 4):
vecB[i0] = vecA[0] * mat44[0*4+i0] + vecA[1] * mat44[1*4+i0] + vecA[2] * mat44[2*4+i0] + mat44[3*4+i0]
return [vecB[0]/vecB[3], vecB[1]/vecB[3], vecB[2]/vecB[3]]
def TestRec(prjMat, mpos, ll, tr):
ll_ndc = TransformVec3(ll, prjMat)
tr_ndc = TransformVec3(tr, prjMat)
ndc = [ 2.0 * mpos[0]/width - 1.0, 1.0 - 2.0 * mpos[1]/height ]
inRect = 1 if (ndc[0]>=ll_ndc[0] and ndc[0]<=tr_ndc[0] and ndc[1]>=ll_ndc[1] and ndc[1]<=tr_ndc[1] ) else 0
return inRect
#This draws the rectangles edges
def Target():
prjMat = (GLfloat * 16)()
glGetFloatv(GL_PROJECTION_MATRIX, prjMat)
mpos = pygame.mouse.get_pos()
onRect1 = TestRec(prjMat, mpos, rect1[0], rect1[2])
onRect2 = TestRec(prjMat, mpos, rect2[0], rect2[2])
glColor3f( 1, 1-onRect1, 1-onRect1 )
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect1[vertex])
glEnd()
glColor3f( 1, 1-onRect2, 1-onRect2 )
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(rect2[vertex])
glEnd()
def main():
try:
pygame.init()
display = (width,height)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
glMatrixMode(GL_PROJECTION)
gluPerspective(fov_y, (display[0]/display[1]), .1, 1000)
glMatrixMode(GL_MODELVIEW)
while True:
#iterates through events to check for quits
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
Target()
pygame.display.flip()
pygame.time.wait(100)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
except Exception as e:
print (e)
main()

Categories