Unwanted cutting problem in visualization - python

I’m newbie at VTK.
When I try to visualize a sphere, and I adjust z value in sphere.SetCenter(0.0, 0.0, z_value), I only can visualize cut sphere.
I guess there is a viewing frame(:cube). How can I extend my viewing cube?
half sphere
z_value = -1.0
ren = vtk.vtkRenderer()
sphere = vtk.vtkSphere()
sphere.SetCenter(0.0, 0.0, z_value)
sphere.SetRadius(0.1)
# The sample function generates a distance function from the implicit
# function. This is then contoured to get a polygonal surface.
sample = vtk.vtkSampleFunction()
sample.SetImplicitFunction(sphere)
#sample.SetModelBounds(-100.0, 100.0, -100.0, 100.0, -100.0, 100.0)
#sample.SetSampleDimensions(20, 20, 20)
sample.ComputeNormalsOff()
# contour
surface = vtk.vtkContourFilter()
surface.SetInputConnection(sample.GetOutputPort())
surface.SetValue(0, 0.0)
# mapper
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(surface.GetOutputPort())
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(.2, .2, .5)
# A renderer and render window
ren = vtk.vtkRenderer()
ren.SetBackground(0, 0, 0)
# add the actor
ren.AddActor(actor)
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.AddRenderer(ren)
# This allows the interactor to initalize itself. It has to be
# called before an event loop.
iren.Initialize()
# We'll zoom in a little by accessing the camera and invoking a "Zoom"
# method on it.
ren.ResetCamera()
#ren.GetActiveCamera().Zoom(1.5)
renWin.Render()
# Start the event loop.
iren.Start()

To fix your script, you should uncomment the sample.SetModelBounds() line and set the actual bounds of your sphere, i.e.
sample.SetModelBounds(centerX - radius, centerX + radius, centerY - radius, centerY + radius, centerZ - radius, centerZ + radius,)
That said, if you only want to display a sphere, you've better to use the vtkSphereSource. So you can replace the sphere-sample-contour part by :
sphere = vtk.vtkSphereSource()
sphere.SetCenter(0.0, 0.0, z_value)
sphere.SetRadius(0.1)
sphere.Update()
and below
mapper.SetInputConnection(sphere.GetOutputPort())

Related

Anchoring Rendered 3d Model to CharUcoBoard with OpenCV to OpenGL Pose Estimation

I am having difficulty spotting the issue in my code with regards to an OpenCV to OpenGL pseudo augmented reality program. I say "pseudo" because the constraint is taking images and mat objects from openCV and converting them to OpenGL.
I have working models rendered, have used the cv2.rodrigues method and camera calibration to come up with some translation and rotation vectors, I have inverted the matrix and taken the transpose for proper display in the openGL context. The issue I have stems from the fact that the model gets warped when I rotate my CharUcoBoard and begins to drift in the opposite direction when I move my camera. I assume it has something to do with the fact I am using outdated tech, on an unsupported platform (MacOS Big Sur, 11.6). Unfortunately I do not have time to switch platforms or utilize more abstracted software. Ergo, I am stuck with what I got.
I have images, but since this is a new account am unable to post them directly.
I have tried several methods, including hard coding in my calibrated camera parameters, modifying the initial identity matrix to coincide with my projected matrix and adding additional calibration images from all manner of angles, rotations and camera distances.
Ive tried using glOrtho and glMultMatrixf to no avail. Perhaps I missed something in my code.
The math part:
self.INVERSE_MATRIX = np.array([[1.0, 1.0, 1.0, 1.0],
[-1.0, -1.0, -1.0, -1.0],
[-1.0, -1.0, -1.0, -1.0],
[1.0, 1.0, 1.0, 1.0]])
def _boardHandle(self, image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
res = cv2.aruco.detectMarkers(gray, dictionary, parameters=arucoParams)
corners = res[0]
ids = res[1]
if ids is not None:
res2 = cv2.aruco.interpolateCornersCharuco(corners, ids, gray,
board)
charucoRetval = res2[0]
charucoCorners = res2[1]
charucoIds = res2[2]
# with camera calibration
retval, rvecs, tvecs = cv2.aruco.estimatePoseCharucoBoard(charucoCorners, charucoIds, board,
self.mtx, self.dist, self.rvecs, self.tvecs)
if retval:
rmtx = cv2.Rodrigues(rvecs)[0]
view_matrix = np.array([[rmtx[0][0], rmtx[0][1], rmtx[0][2], tvecs[0]],
[rmtx[1][0], rmtx[1][1], rmtx[1][2], tvecs[1]],
[rmtx[2][0], rmtx[2][1], rmtx[2][2], tvecs[2]],
[0.0, 0.0, 0.0, 1.0]], dtype=object)
view_matrix = view_matrix * self.INVERSE_MATRIX
view_matrix = np.transpose(view_matrix)
return view_matrix
return self.INVERSE_MATRIX
The rendering pipeline and frame grabbing:
def _draw_scene(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
# get image from webcam
image = self.webcam.get_current_frame()
# convert image to OpenGL texture format
bg_image = cv2.flip(image, 0)
bg_image = Image.fromarray(bg_image)
ix = bg_image.size[0]
iy = bg_image.size[1]
bg_image = bg_image.tobytes("raw", "BGRX", 0, -1)
# create background texture
glBindTexture(GL_TEXTURE_2D, self.bgTexture)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, bg_image)
# draw background
glBindTexture(GL_TEXTURE_2D, self.bgTexture)
glPushMatrix()
glTranslatef(0.0, 0.0, -10.0)
self._draw_background()
glPopMatrix()
# handle boards/pieces
image = self._boardHandle(image)
#drawing the other pieces
glutSwapBuffers()
The above blocks handle the camera calibration files, the initial creation of the modelview matrix, and the rendering of the chessboard. I had originally assumed that estimating the pose and then utilizing that in a modelview matrix would have given me an accurate rendering, which seems to be the case when the camera is just shy of centered above the board. Im relatively new at openGL however and may have missed something crucial.
I know display lists are outdated as well, but at this point have been unable to make the above work with VAO's, VBO's or EBO's. That will have to be a future project for me.
Any help/pointers or resources will be appreciated.

Drawing a ball in the center of the image with Python itk

import itk
ImageType = itk.Image[itk.UC, 3]
newimage = ImageType.New()
size = itk.Size[3]()
size = [100, 100, 80]
index = itk.Index[3]()
index = [0, 0, 0]
region = itk.ImageRegion[3]()
region.SetSize(size)
region.SetIndex(index)
newimage.SetRegions(region)
newimage.Allocate()
newimage.FillBuffer(63)
origin = itk.Point[itk.D, 3]()
origin = (0.0, 0.0, 0.0)
spacing = itk.Vector[itk.D, 3]()
spacing = (0.40, 0.40, 1.0)
newimage.SetOrigin(origin)
newimage.SetSpacing(spacing)
writer = itk.ImageFileWriter[ImageType].New()
writer.SetInput(newimage)
writer.SetFileName('wynik3.nii')
writer.Update()
I have this code and I need to draw a light gray ball with a radius of 31 in the center of the image but I have no idea how to do it. I'm also not sure if the code works properly because it should create a dark-gray image but when I open it in SliceDrop it's all black.
Taking cue from a similar question:
Use itkEllipseSpatialObject and itkSpatialObjectToImageFilter.
With itkEllipseSpatialObject you can create a sphere of the proper size.
With itkSpatialObjectToImageFilter you can rasterize that sphere into a binary image mask.
Both classes should be available via Python.
Your code produces a completely grey volume. Why would you expect it to create a ball?
Here's code to do it in SimpleITK:
import SimpleITK as sitk
# create a spherical gaussian blob
gaussian = sitk.GaussianSource(sitk.sitkUInt8,
size = [100,100,80],
sigma = [20,20,20],
mean = [20,20,40],
spacing = [.4, .4, 1.0])
# threshold to create a binary ball
ball = sitk.BinaryThreshold(gaussian, 150.0, 255.0, 63, 0)
sitk.WriteImage(ball, "ball.nii.gz")
If you prefer to stick to ITK, you could do the same thing using the itkGaussianImageSource and itkBinaryThresholdImageFilter classes.

"stroke" required after "showing" Pango layout?

I have been chasing a problem between PyCairo and PangoCairo. The following code illustrates it:
import math
import cairo
import gi
gi.require_version('Pango', '1.0')
gi.require_version('PangoCairo', '1.0')
from gi.repository import Pango, PangoCairo
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 400, 400)
ctx = cairo.Context(surface)
# TOP LEFT CIRCLE
ctx.save()
ctx.arc(100.0, 100.0, 50, 0, 2 * math.pi)
ctx.set_source_rgba(0.0, 0.0, 1.0, 1.0)
ctx.set_line_width(2.0)
ctx.stroke()
ctx.restore()
# CENTER TEXT
ctx.save()
layout = PangoCairo.create_layout(ctx)
layout.set_font_description(Pango.font_description_from_string('Arial 10.0'))
layout.set_markup('<b>Foo Bar</b>', -1)
ctx.set_source_rgba(0.0, 1.0, 0.0, 1.0)
_, text_extents = layout.get_pixel_extents()
text_width, text_height = text_extents.width, text_extents.height
ctx.translate(200.0, 200.0)
ctx.translate(-text_width / 2, -text_height / 2)
ctx.move_to(0.0, 0.0)
PangoCairo.show_layout(ctx, layout)
# ctx.stroke() # WHY?
ctx.restore()
# BOTTOM RIGHT CIRCLE
ctx.save()
ctx.arc(300.0, 300.0, 50, 0, 2 * math.pi)
ctx.set_source_rgba(1.0, 0.0, 0.0, 1.0)
ctx.set_line_width(2.0)
ctx.stroke()
ctx.restore()
surface.write_to_png('test.png')
It results in the following picture:
My intention is to draw two circles and text. The line between the text and the bottom right circle is not intended to exist. I can make the line disappear by adding / uncommenting the ctx.stroke() call directly underneath PangoCairo.show_layout in the center text code block.
It works, but it does not feel right. The text does not require a line stroke. What is going wrong? Is the stroke actually required or have I made another mistake?
Try ctx.new_path() instead of ctx.stroke().
Showing the layout seems to set the current point and thus the following line_to that arc does implicitly at its beginning actually shows a line.

How to set clipping planes with opengl and pyglet

I am troubleshooting a problem with my code that if the depth value of any primitive is not zero it will not render on the screen. I suspect that it gets clipped away.
Is there an easy pythonic way to set my clipping planes in pyglet ?
This is my code so far:
import pyglet
from pyglet.gl import *
import pywavefront
from camera import FirstPersonCamera
def drawloop(win,camera):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
#glClearColor(255,255,255,255)
glLoadIdentity()
camera.draw()
pyglet.graphics.draw(2, pyglet.gl.GL_POINTS,
('v3f', (10.0, 15.0, 0.0, 30.0, 35.0, 150.0))
)
glPointSize(20.)
return pyglet.event.EVENT_HANDLED
def main():
win = pyglet.window.Window()
win.set_exclusive_mouse(True)
win.clear()
camera = FirstPersonCamera(win)
#win.event
def on_draw():
drawloop(win,camera)
def on_update(delta_time):
camera.update(delta_time)
pyglet.clock.schedule(on_update)
pyglet.app.run()
if __name__ == '__main__':
main()
I am using the FirstPersonCamera snippet from here:
https://gist.github.com/mr-linch/f6dacd2a069887a47fbc
I am troubleshooting a problem with my code that if the depth value of any primitive is not zero it will not render on the screen. I suspect that it gets clipped away.
You have to set up a projection matrix to solve the issue. Either set up an orthographic projection matrix or a perspective projection matrix.
The projection matrix describes the mapping from 3D points of the view on a scene, to 2D points on the viewport. It transforms from eye space to the clip space, and the coordinates in the clip space are transformed to the normalized device coordinates (NDC) by dividing with the w component of the clip coordinates. The NDC are in range (-1,-1,-1) to (1,1,1). Every geometry which is out of the clippspace is clipped.
At Orthographic Projection the coordinates in the view space are linearly mapped to clip space coordinates and the clip space coordinates are equal to the normalized device coordinates, because the w component is 1 (for a cartesian input coordinate).
The values for left, right, bottom, top, near and far define a box. All the geometry which is inside the volume of the box is "visible" on the viewport.
At Perspective Projection the projection matrix describes the mapping from 3D points in the world as they are seen from of a pinhole camera, to 2D points of the viewport. The eye space coordinates in the camera frustum (a truncated pyramid) are mapped to a cube (the normalized device coordinates).
To set a projection matrix the projection matrix stack has to be selected by glMatrixMode.
An orthographic projection can be set by glOrhto:
w, h = 640, 480 # default pyglet window size
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho( -w/2, w/2, -h/2, h/2, -1000.0, 1000.0) # [near, far] = [-1000, 1000]
glMatrixMode(GL_MODELVIEW)
....
An perspective projection can be set by gluPerspective:
w, h = 640, 480 # default pyglet window size
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective( 90.0, 640.0/480, 0.1, 1000.0) # fov = 90 degrees; [near, far] = [0.1, 1000]
glMatrixMode(GL_MODELVIEW)
....
I recommend to use the following coordinates, to "see" the points in both of the above cases:
e.g.:
pyglet.graphics.draw(2, pyglet.gl.GL_POINTS,
('v3f', (-50.0, -20.0, -200.0, 40.0, 20.0, -250.0)))
glPointSize(20.0)

PyOpenGL transforming view coordinates into object coordinates for ArcBall navigation

I am following this tutorial for arcball navigation in 3d:
https://en.wikibooks.org/wiki/OpenGL_Programming/Modern_OpenGL_Tutorial_Arcball
I managed to perform all the steps and the navigation works but I cant seem to comprehend last step in tutorial:
An extra trick is converting the rotation axis from camera coordinates
to object coordinates. It's useful when the camera and object are
placed differently. For instace, if you rotate the object by 90° on
the Y axis ("turn its head" to the right), then perform a vertical
move with your mouse, you make a rotation on the camera X axis, but it
should become a rotation on the Z axis (plane barrel roll) for the
object. By converting the axis in object coordinates, the rotation
will respect that the user work in camera coordinates (WYSIWYG). To
transform from camera to object coordinates, we take the inverse of
the MV matrix (from the MVP matrix triplet).
The problem is that when i turn the model in the first step axis of rotation transform as well and they are not aligned with my "camera view". Of course I would like to keep my rotation axes always aligned with my camera view.
Can someone please give me an advice how to tackle it? In the tutorial there is a code but not much of explanation on what it is actually doing plus I only speak Python.
Thank you,
Jacob
My code:
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import math
import os
import numpy as np
size = 30
speed = 500
amplitude_amplificator = 80
color_table = ((1,0,0),
(0,1,0),
(0,0,1),
(1,1,0),
(1,0,1),
(0,1,1),
(1,0.5,0),
(0.5,1,0),
(0.5,1,0.5),
(0,0.5,0)
)
locations = ((0,-975, 0),
(0, 975, 0),
(-1273,-975, 0),
(-1273, 975, 0),
(-2482, -975, 0),
(-2482, 975, 0),
(-3737, -975, 0),
(-3737, 975, 0)
)
lines = ((0,2),
(2, 4),
(4, 6),
(1, 3),
(3, 5),
(5, 7),
(0, 1),
(2, 3),
(4, 5),
(6, 7),
)
amplitudes = ((3.38829249165602, 2.38305866657961, 2.52151563664636),
(5.08487438107113, 2.36432294667884, 3.0843991148654),
(3.44312569856563, 1.23112415468012, 1.29869765112226),
(4.0421066637935, 1.40655294535107, 1.36083778879317),
(3.78074337117764, 0.648255908566916, 0.752239154016233),
(5.08887133464996, 0.607037324785205, 0.543523234321567),
(4.49095206021647, 0.432732677308301, 2.18289872563964),
(5.14707697114171, 0.335119576625248, 2.15666871777855)
)
phases = ((-146.873017352057,0,-95.316526141321),
(-149.008372080797, 5.24886681104675, 78.3075732082314),
(-148.241584335287, 5.54327579087787, -118.279685417256),
(-151.844141596427, 6.48705235395368, -113.246406750217),
(-148.14233553496, 27.9523171503408, 65.8254568277543),
(-157.058723259828, 38.8760924034639, 85.2339573112435),
(-153.417593784393, -120.329988461629, 16.0421535833842),
(-156.779107376825, 83.2350395893582, 10.7592173681729)
)
# DRAW CUBE
def Cube(po,si,co):
POS = (
(po[0]+si, po[1]-si, po[2]-si),
(po[0]+si, po[1]+si, po[2]-si),
(po[0]-si, po[1]+si, po[2]-si),
(po[0]-si, po[1]-si, po[2]-si),
(po[0]+si, po[1]-si, po[2]+si),
(po[0]+si, po[1]+si, po[2]+si),
(po[0]-si, po[1]-si, po[2]+si),
(po[0]-si, po[1]+si, po[2]+si)
)
edges = (
(0,1),
(0,3),
(0,4),
(2,1),
(2,3),
(2,7),
(6,3),
(6,4),
(6,7),
(5,1),
(5,4),
(5,7)
)
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glColor3f(co[0],co[1],co[2])
glVertex3fv(POS[vertex])
glEnd()
#DRAW ORIGINAL SHAPE IN LINES
def Line_orig(po):
glBegin(GL_LINES)
for edge in po:
for vertex in edge:
glVertex3fv(locations[vertex])
glEnd()
#Hemisphere mapping
def map_hemisphere(x,y):
z = math.sqrt(abs(1-math.pow(x,2)-math.pow(y,2)))
return z
# Calculate angle of two spatial vectors
def angle_calculation(a,b):
r = math.degrees(math.acos((np.dot(a, b))/(np.linalg.norm(a)*np.linalg.norm(b))))
return r
def main():
mouse_pressed = 0
pygame.init()
display = (1200,800)
pygame.display.set_mode(display, DOUBLEBUF|OPENGL)
gluPerspective(45, (display[0]/display[1]), 0.1, 30000.0)
glTranslatef(0,0.0,-10000)
#glRotatef(90, 1, 0, 0)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
time = pygame.time.get_ticks()/1000
norm_mouse_pos = (2*pygame.mouse.get_pos()[0]/display[0]-1,2*pygame.mouse.get_pos()[1]/display[1]-1,map_hemisphere(2*pygame.mouse.get_pos()[0]/display[0]-1,2*pygame.mouse.get_pos()[1]/display[1]-1))
if pygame.mouse.get_pressed()[0]==1:
if mouse_pressed == 0:
mouse_pressed = 1
clear = lambda: os.system('cls')
clear()
p1 = (norm_mouse_pos[0],norm_mouse_pos[1],map_hemisphere(norm_mouse_pos[0],norm_mouse_pos[1]))
print(p1)
else:
p2 = (norm_mouse_pos[0],norm_mouse_pos[1],map_hemisphere(norm_mouse_pos[0],norm_mouse_pos[1]))
cist = np.cross(p1, p2)
print(angle_calculation(p1,p2))
glRotatef( angle_calculation(p1,p2) , -cist[0] , cist[1] , cist[2] )
else:
mouse_pressed = 0
# Translation of the model via keyboard handling
keys=pygame.key.get_pressed()
if keys[K_w]:
glTranslatef(0, 100, 0)
if keys[K_s]:
glTranslatef(0, -100, 0)
if keys[K_a]:
glTranslatef(-100, 0, 0)
if keys[K_d]:
glTranslatef(100, 0, 0)
# Drawing the Cubes at Nodes Loactions
for item, el in enumerate(locations):
Cube((el[0] + amplitudes[item][0]*math.sin(time + phases[item][0]*(3.1415927/180))*amplitude_amplificator,
el[1] + amplitudes[item][1]*math.sin(time + phases[item][1]*(3.1415927/180))*amplitude_amplificator,
el[2] + amplitudes[item][2]*math.sin(time + phases[item][2]*(3.1415927/180))*amplitude_amplificator
), size, color_table[item])
# Drawing the Original Shapes (Specified nodes in Lines Tuple)
Line_orig(lines)
# Drawing the Deformed Shape
glBegin(GL_LINES)
for edge in lines:
for vertex in edge:
glVertex3fv((locations[vertex][0] + amplitudes[vertex][0]*math.sin(time + phases[vertex][0]*(3.1415927/180))*amplitude_amplificator,
locations[vertex][1] + amplitudes[vertex][1]*math.sin(time + phases[vertex][1]*(3.1415927/180))*amplitude_amplificator ,
locations[vertex][2] + amplitudes[vertex][2]*math.sin(time + phases[vertex][2]*(3.1415927/180))*amplitude_amplificator,
))
glEnd()
# OpenGL Management
pygame.display.flip()
pygame.time.wait(10)
main()
The problem is that when i turn the model in the first step axis of rotation transform as well and they are not aligned with my "camera view". Of course I would like to keep my rotation axes always aligned with my camera view.
In a rendering, each mesh of the scene usually is transformed by the model matrix, the view matrix and the projection matrix.
Projection matrix:
The projection matrix describes the mapping from 3D points of a scene, to 2D points of the viewport.
View matrix:
The view matrix describes the direction and position from which the scene is looked at. The view matrix transforms from the wolrd space to the view (eye) space.
Model matrix:
The model matrix defines the location, oriantation and the relative size of a mesh in the scene. The model matrix transforms the vertex positions from of the mesh to the world space.
If you want to rotate the szene around an axis in view space, the you have to do the following:
Transform the model by all the rotations and translations that you have done before the new rotation operation.
Apply the new rotation operation.
Apply the view translation
Apply the projection matrix
Size the OpenGL fixed function pipeline has a matrix stack, this operations have to be done in the reverse order.
e.g. See the documentation of glMultMatrix:
glMultMatrix multiplies the current matrix with the one specified using m, and replaces the current matrix with the product.
In OpenGL there is one matrix stack for each matrix mode (See glMatrixMode). The matrix modes are GL_MODELVIEW, GL_PROJECTION, and GL_TEXTURE.
First you have to setup the projection matrix on the separated projection matrix stack:
glMatrixMode( GL_PROJECTION );
gluPerspective(45, (display[0]/display[1]), 0.1, 30000.0)
Next create a model matrix
a = (GLfloat * 16)()
modelMat = glGetFloatv(GL_MODELVIEW_MATRIX, a)
Init the model view matrix in the main loop:
glMatrixMode( GL_MODELVIEW );
glLoadIdentity()
Calcualte the new rotation and translation:
axis = (p2[0]- p1[0], p2[1]- p1[1])
glRotatef( angle_calculation(p1,p2), axis[1], axis[0], 0 )
Multiply the model matrix by the previous model matrix and store the combined model matrix:
glMultMatrixf( modelMat )
modelMat = glGetFloatv(GL_MODELVIEW_MATRIX, a)
Setup the view and apply the new model matrix:
glLoadIdentity()
glTranslatef(0,0.0,-10000)
glMultMatrixf( modelMat )
The final code may look like this:
.....
glMatrixMode( GL_PROJECTION );
gluPerspective(45, (display[0]/display[1]), 0.1, 30000.0)
a = (GLfloat * 16)()
modelMat = glGetFloatv(GL_MODELVIEW_MATRIX, a)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
glMatrixMode( GL_MODELVIEW );
glLoadIdentity()
norm_mouse_pos = (2*pygame.mouse.get_pos()[0]/display[0]-1,2*pygame.mouse.get_pos()[1]/display[1]-1,map_hemisphere(2*pygame.mouse.get_pos()[0]/display[0]-1,2*pygame.mouse.get_pos()[1]/display[1]-1))
if pygame.mouse.get_pressed()[0]==1:
if mouse_pressed == 0:
mouse_pressed = 1
clear = lambda: os.system('cls')
clear()
p1 = (norm_mouse_pos[0],norm_mouse_pos[1],map_hemisphere(norm_mouse_pos[0],norm_mouse_pos[1]))
else:
p2 = (norm_mouse_pos[0],norm_mouse_pos[1],map_hemisphere(norm_mouse_pos[0],norm_mouse_pos[1]))
cist = np.cross(p1, p2)
axis = (p2[0]- p1[0], p2[1]- p1[1])
glRotatef( angle_calculation(p1,p2) , axis[1] , axis[0] , 0 )
else:
mouse_pressed = 0
# Translation of the model via keyboard handling
keys=pygame.key.get_pressed()
if keys[K_w]:
glTranslatef(0, 100, 0)
if keys[K_s]:
glTranslatef(0, -100, 0)
if keys[K_a]:
glTranslatef(-100, 0, 0)
if keys[K_d]:
glTranslatef(100, 0, 0)
glMultMatrixf( modelMat )
modelMat = glGetFloatv(GL_MODELVIEW_MATRIX, a)
glLoadIdentity()
glTranslatef(0,0.0,-10000)
glMultMatrixf( modelMat )
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
.....
The "ValueError: math domain error", which sometimes occurs, is because the arcus cosine of a value is defined only, if the value is in the range [-1, 1]. Clamp the value to this range (min(1,max(cos_a,-1))):
def angle_calculation(a,b):
cos_a = np.dot(a, b) / (np.linalg.norm(a)*np.linalg.norm(b))
r = math.degrees(math.acos( min(1,max(cos_a,-1)) ))
return r

Categories