difference in giving parameters to opengl code in LWJGL and PyOpenGL - python

i am learning opengl with python.and following this course
https://www.youtube.com/watch?v=WMiggUPst-Q&list=PLRIWtICgwaX0u7Rf9zkZhLoLuZVfUksDP&index=2
just to be able to do.he is using LWJGL, i am PyOpengl. i noticed some of his methods (glgenVertexArray, gldeleteVertexArray ...ex) is used without parameters even docs says otherwise. while i wrote same code in python it's says
glGenVertexArrays requires 1 arguments (n, arrays), received 0: ()
it' wants a parameter from me for the same method. it's not a problem here(i think) give 1 but when its come to glDeleteVertexArrays if i dont give 1 and the list that i am keeping vao,vbo ids its raises this
Traceback (most recent call last):
File "C:\Users\TheUser\AppData\Local\Programs\Python\Python38-32\lib\site-packages\OpenGL\latebind.py", line 43, in call
return self._finalCall( *args, **named )
TypeError: 'NoneType' object is not callable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/TheUser/Desktop/MyPytonDen/ThinMatrixOpenGl/engineTester/MainGameLoop.py", line 22, in
Loader.CleanUP()
File "C:\Users\TheUser\Desktop\MyPytonDen\ThinMatrixOpenGl\renderEngine\Loader.py", line 12, in CleanUP
glDeleteVertexArrays()
File "C:\Users\TheUser\AppData\Local\Programs\Python\Python38-32\lib\site-packages\OpenGL\latebind.py", line 47, in call
return self._finalCall( *args, **named )
File "C:\Users\TheUser\AppData\Local\Programs\Python\Python38-32\lib\site-packages\OpenGL\wrapper.py", line 689, in wrapperCall
pyArgs = tuple( calculate_pyArgs( args ))
File "C:\Users\TheUser\AppData\Local\Programs\Python\Python38-32\lib\site-packages\OpenGL\wrapper.py", line 436, in calculate_pyArgs
raise ValueError(
ValueError: glDeleteVertexArrays requires 2 arguments (n, arrays), received 0: ()
i handle this as i say but i dont think its appropriate.
so i am asking what its acctuly want from me (docs wasn't explicit enough for me) and why it's wants for PyOpenGl but not LWJGL
and this is the file:
from ThinMatrixOpenGl.renderEngine.RawModel import RawModel
from OpenGL.GL import *
import numpy as np
VAOs = []
VBOs = []
def CleanUP():
print(VAOs, VBOs)
for vao in VAOs:
glDeleteVertexArrays(int(vao), VAOs)
for vbo in VBOs:
glDeleteBuffers(int(vbo), VBOs)
def LoadToVao(positions):
global VAOs
VAO_ID = CreateVao()
VAOs.append(VAO_ID)
storeDataInAttribList(0, positions)
unbindVao()
return RawModel(vao_id=VAO_ID, vertex_count=(len(positions) / 3))
def CreateVao():
VAO_ID = glGenVertexArrays(1)
glBindVertexArray(VAO_ID)
return VAO_ID
def storeDataInAttribList(attrib_number: int, data: float):
global VBOs
VBO_id = glGenBuffers(1)
VBOs.append(VBO_id)
glBindBuffer(GL_ARRAY_BUFFER, VBO_id)
buffer = StoreDataInFloatBuffer(data)
glBufferData(GL_ARRAY_BUFFER, buffer, GL_STATIC_DRAW)
glVertexAttribPointer(attrib_number, 3, GL_FLOAT, GL_FALSE, 0, None)
glBindBuffer(GL_ARRAY_BUFFER, 0)
def unbindVao():
glBindVertexArray(0)
def StoreDataInFloatBuffer(data: float):
buffer = np.array(data, dtype=np.float32)
return buffer

See the OpenGL 4.6 API Core Profile Specification - 10.3.1 Vertex Array Objects
void DeleteVertexArrays( sizei n, const uint *arrays );
See PyOpneGL - glDeleteVertexArrays:
Signature
glDeleteVertexArrays( GLsizei ( n ) , const GLuint *( arrays ) )-> void
def glDeleteVertexArrays( n , arrays )
The 2nd argument must be an array with the element type "unit":
def CleanUP():
np_vaos = np.array([vao], dtype="uint")
glDeleteVertexArrays(np_vaos.size, np_vaos)
In newer PyOpenGL versions, however, the second argument can also be a list:
def CleanUP():
glDeleteVertexArrays(len(VAOs), VAOs)
When using LWJGL, the size argument (n) is deduced from the Java array object. Different libraries in different languages provide different overloads for the OpenGL API functions. If a function behaves unexpectedly and differs from the OpenGL specification, you must consult the API documentation for the libraries.

Related

PyOpenGL, pygame, and errors when drawing a shape

I've been writing a custom snake game using python, pygame, and pyopengl. I'm trying to draw a shape on the screen. However, I've stumbled upon this error:
Traceback (most recent call last):
File "F:\Projects\python\Python_Game\venv\lib\site-packages\OpenGL\latebind.py", line 43, in __call__
return self._finalCall( *args, **named )
TypeError: 'NoneType' object is not callable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "F:\Projects\python\Python_Game\src\game.py", line 35, in <module>
main()
File "F:\Projects\python\Python_Game\src\game.py", line 31, in main
game.draw_shapes()
File "F:\Projects\python\Python_Game\src\game_classes.py", line 203, in draw_shapes
f.draw()
File "F:\Projects\python\Python_Game\src\game_classes.py", line 128, in draw
shape.draw()
File "F:\Projects\python\Python_Game\src\opengl_classes.py", line 128, in draw
glVertex2fv(cosine, sine)
File "F:\Projects\python\Python_Game\venv\lib\site-packages\OpenGL\latebind.py", line 47, in __call__
return self._finalCall( *args, **named )
File "F:\Projects\python\Python_Game\venv\lib\site-packages\OpenGL\wrapper.py", line 689, in wrapperCall
pyArgs = tuple( calculate_pyArgs( args ))
File "F:\Projects\python\Python_Game\venv\lib\site-packages\OpenGL\wrapper.py", line 450, in calculate_pyArgs
yield converter(args[index], self, args)
File "F:\Projects\python\Python_Game\venv\lib\site-packages\OpenGL\arrays\arrayhelpers.py", line 115, in asArraySize
byteSize = handler.arrayByteCount( result )
AttributeError: ("'NumberHandler' object has no attribute 'arrayByteCount'", <function asArrayTypeSize.<locals>.asArraySize at 0x000002642A35DCA0>)
The console is throwing me a TypeError and an Attribute error. I'm not sure if this is due to my code or an issue with one of the libraries. I'm using Python 3.9.1, pygame 2.0.1, and PyOpenGL 3.1.5.
Here's the snippet of my script where the issue arises:
class Circle:
def __init__(self, pivot: Point, radius: int, sides: int, fill: bool, color: Color):
self.pivot = pivot
self.radius = radius
self.sides = sides
self.fill = fill
self.color = color
# Draw the shape of the circle
def draw(self):
glColor3f(self.color.r, self.color.g, self.color.b)
if self.fill:
glBegin(GL_POLYGON)
else:
glBegin(GL_LINE_LOOP)
for i in range(100):
cosine = self.radius * cos(i*2*pi/self.sides) + self.pivot.x
sine = self.radius * sin(i*2*pi/self.sides) + self.pivot.y
glVertex2fv(cosine, sine)
glEnd()
The argument of glVertex2fv must be an array with 2 elements. If you have two separate coordinates which are not aggregated in an array you must use glVertex2f. See glVertex:
glVertex2fv(cosine, sine)
glVertex2f(cosine, sine)

How do I pickle pyEphem objects for multiprocessing?

I am trying to calculate some values of satellites, the data-generation takes quite long so I want to implement this using multiprocessing.
The problem is that I get this error from pyEphem, TypeError: can't pickle ephem.EarthSatellite objects. The pyEphem objects are not used in the functions that I want to parallelize.
This is an example file of my code (minimized).
This is my main file:
main.py
import ephem
import numpy
import math
import multiprocessing as mp
from SampleSats import Sats
GPS_Satellites = []
SFrames = 1
TLE = ["GPS BIIR-3 (PRN 11)",
"1 25933U 99055A 18090.43292845 -.00000054 00000-0 00000+0 0 9994",
"2 25933 51.8367 65.0783 0165007 100.2058 316.9161 2.00568927135407"]
# PRN TLE file from CelesTrak
GPS_Satellites.append(Sats(TLE))
Position = ephem.Observer()
Position.date = '2018/3/31 00:00' # 1st January 2018 at 00:00 UTC
Position.lon, Position.lat = "36.845663", "-37.161123" # Coordinates for desired Position
# Calculate Satellites
for Frames in range(SFrames):
print("Generate Signals for Time: ", Position.date)
for Sats in GPS_Satellites: # par
Sats.compute(Position)
if ((float(repr(Sats.ephemeris.alt)) * 180 / math.pi) < 5) or ( # Calculate angle above horizon
(float(repr(Sats.ephemeris.alt)) * 180 / math.pi) > 90):
Sats.visible = 0
else:
Sats.visible = 1
with mp.Pool() as pool:
for value, obj in zip(pool.map(Sats.genSignal, GPS_Satellites), GPS_Satellites):
obj.Signal = value
Position.date = Position.date + 6*ephem.second # 1 Subframe is 6 seconds long
This is the Sats class that i wrote:
sats.py:
import ephem
import numpy
class Sats:
"""Save Satellites as Objects"""
def __init__(self, tle):
""":param tle: Two Line Element for ephemeris data also used to get PRN Number from name"""
self.ephemeris = ephem.readtle(tle[0], tle[1], tle[2])
self.visible = 1
self.subframes = 0
self.CAseq = [x for x in range(1023)]
self.Out = []
self.Signal = numpy.zeros(int(300*20*1023), dtype=numpy.int8)
def compute(self, pos):
self.ephemeris.compute(pos)
self.Out.append(numpy.arange(0, 299, 1))
self.subframes += 1
def calcData(self, bit, prn):
return (self.Out[self.subframes - 1].item(0)[0][bit] + self.CAseq[prn]) % 2
def genSignal(self):
if(self.visible == 1):
for bit in range(300): # 1 Subframe is 300 Bit long
for x in range(20): # The PRN Sequence reoccurs every ms -> 20 times per pit
for prn in range(1023): # length of the prn sequence
self.Signal[bit*x*prn] = (-1 if (self.calcData(bit, prn))==0 else 1)
else:
self.Signal = numpy.zeros(300*20*1023)
return self.Signal
Traceback:
Traceback (most recent call last):
File "C:/Users/PATH_TO_PROJECT/SampleTest.py", line 33, in <module>
for value, obj in zip(pool.map(Sats.genSignal, GPS_Satellites), GPS_Satellites):
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 266, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 644, in get
raise self._value
File "C:\Program Files\Python36\lib\multiprocessing\pool.py", line 424, in _handle_tasks
put(task)
File "C:\Program Files\Python36\lib\multiprocessing\connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "C:\Program Files\Python36\lib\multiprocessing\reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle ephem.EarthSatellite objects
The reason is something like this... when you try to pickle a function, it can attempt to pickle globals(), so whatever you have in your global namespace is also pickled (just in case your function has a reference to something in globals() -- yes, that's unexpected, but that's how it is). So, an easy fix is to isolate the function you want to pickle in another file -- in this case, put the multiprocessing stuff in one file and the other code in another file... so there's less in globals() for the pickler to struggle with. Another thing that might help is to use multiprocess instead of multiprocessing -- multiprocess uses the dill serializer instead of pickle, so you have a better chance of serializing objects that will be sent across the workers in the Pool.

Dynamic Mode Decomposition using the modred module in python

I am trying to write a code for dynamic mode decomposition using the modred module in python. I write the CustomVector class and CustomVecHandle class folowing the modred Documentation like these:
class CustomVector(mr.Vector):
def __init__(self, grids, data_array):
self.grids = grids
self.data_array = data_array
self.weighted_ip = mr.InnerProductTrapz(*self.grids)
def __add__(self, other):
"""Return a new object that is the sum of self and other"""
sum_vec = deepcopy(self)
sum_vec.data_array = self.data_array + other.data_array
return sum_vec
def __mul__(self, scalar):
"""Return a new object that is ``self * scalar`` """
mult_vec = deepcopy(self)
mult_vec.data_array = mult_vec.data_array * scalar
return mult_vec
def inner_product(self, other):
return self.weighted_ip(self.data_array, other.data_array)
class CustomVecHandle(mr.VecHandle):
def __init__(self, vec_path, base_handle=None, scale=None):
mr.VecHandle.__init__(self, base_handle, scale)
self.vec_path = vec_path
def _get(self):
# read in the values
print ("reading data from {}".format(self.vec_path))
reader = vtk.vtkPolyDataReader()
reader.SetFileName(self.vec_path)
reader.Update()
grids = dsa.WrapDataObject(reader.GetOutput()).Points
data_array = dsa.WrapDataObject(reader.GetOutput()).PointData['U']
return CustomVector(grids, data_array)
def _put(self, vec):
print ("writing data to {}".format(self.vec_path))
common_writer.SetFileName(self.vec_path)
U_common_reader[:]=vec.data_array
ier1 = common_writer.Write() # return 1 means success
grids_common_reader[:]=vec.grids
ier2 = common_writer.Write()
if ier1!=1 or ier2!=1:
raise Error()
return
And another function is defined outside all classes as:
def inner_product(v1, v2):
return v1.inner_product(v2)
I have created a list object as:
vec_handles=[CustomVecHandle(os.path.join(data_root,d,"U_zNormal.vtk")) for d in dirs]
Then I use the folowing code to compute the modes:
myDMD=mr.DMDHandles(inner_product,max_vecs_per_node=50)
myDMD.compute_decomp(vec_handles)
But when I running the code I get the error that:
Traceback (most recent call last):
File "vtkDMD.py", line 192, in <module>
eigvals,Rlo_eigvecs,Llo_eigvecs,cm_eigvals,cl_eigvecs = myDMD.compute_decomp(vec_handles)
File "C:\Python27\lib\site-packages\modred\dmd.py", line 679, in compute_decomp
self.vec_handles)
File "C:\Python27\lib\site-packages\modred\vectorspace.py", line 495, in compute_symmetric_inner_product_mat
IP_burn = self.inner_product(test_vec, test_vec)
File "vtkDMD.py", line 142, in inner_product
return v1.inner_product(v2)
File "vtkDMD.py", line 138, in inner_product
return self.weighted_ip(self.data_array, other.data_array)
File "C:\Python27\lib\site-packages\modred\vectors.py", line 159, in __call__
return self.inner_product(vec1, vec2)
File "C:\Python27\lib\site-packages\modred\vectors.py", line 168, in inner_product
IP = np.trapz(IP, x=grid)
File "C:\pv54\bin\lib\site-packages\numpy\lib\function_base.py", line 2941, in trapz
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
ValueError: operands could not be broadcast together with shapes (2,)(28330,)
Firstly I think it`s because the wrong parameters given to the function :
return CustomVector(grids, data_array)
Then I replace grids with grids.T which means the transpose of grids, but the same error appears again. And I get the same error when I try to replace data_array with data_array.T.
So, does anybody have a good idea to solve this error?
I encountered the same issue while computing POD using the same module. The two arrays you are dealing with are of the shape (2,) & (28330,). Try and convert them to numpy arrays where both the dimensions are specified. That will solve the problem.

Pass a function as argument to a process target with Pool.map()

I'm developing a software to benchmark some scripts Python using different methods (mono-thread, multi-threads, multi-processes). So I need to execute the same function (with same arguments, etc...) in differents processes.
How to pass the function to execute as argument to a process target ?
What I currently understand is that a reference to a function cannot work because the function referenced is not visible for other processes, that's why I tried with a custom manager for the shared memory.
Here a simplified code:
#!/bin/python
from multiprocessing import Pool
from multiprocessing.managers import BaseManager
from itertools import repeat
class FunctionManager(BaseManager):
pass
def maFunction(a, b):
print(a + b)
def threadedFunction(f_i_args):
(f, i, args) = f_i_args
f(*args)
FunctionManager.register('Function', maFunction)
myManager = FunctionManager()
myManager.start()
myManager.Function(0, 0) # Test 1
threadedFunction((maFunction, 0, (1, 1))) # Test 2
p = Pool()
args = zip(repeat(myManager.Function), range(10), repeat(2, 2))
p.map(threadedFunction, args) # Does not work
p.join()
myManager.shutdown()
The current pickling error at "p.map()" is the following :
2
0
Traceback (most recent call last):
File "./test.py", line 27, in <module>
p.map(threadedFunction, args) # Does not work
File "/usr/lib/python3.5/multiprocessing/pool.py", line 260, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/usr/lib/python3.5/multiprocessing/pool.py", line 608, in get
raise self._value
File "/usr/lib/python3.5/multiprocessing/pool.py", line 385, in _handle_tasks
put(task)
File "/usr/lib/python3.5/multiprocessing/connection.py", line 206, in send
self._send_bytes(ForkingPickler.dumps(obj))
File "/usr/lib/python3.5/multiprocessing/reduction.py", line 50, in dumps
cls(buf, protocol).dump(obj)
_pickle.PicklingError: Can't pickle <class 'weakref'>: attribute lookup weakref on builtins failed
I got a bit different error from running your code. Your key problem I think is that you pass a function to FunctionManager.register() instead of a class. I also had to remove your zip to make it work and create a list manually, but this you can probably fix. This is just an example.
The following code works and does something using your exact structure. I would do this a bit differently and not use BaseManager, but I assume you have your reasons.
#!/usr/bin/python3.5
from multiprocessing import Pool
from multiprocessing.managers import BaseManager
from itertools import repeat
class FunctionManager(BaseManager):
pass
class maClass(object):
def __init__(self):
pass
def maFunction(self,a, b):
print(a + b)
def threadedFunction(f_i_args):
(f, i, args) = f_i_args
f(*args)
FunctionManager.register('Foobar', maClass)
myManager = FunctionManager()
myManager.start()
foobar = myManager.Foobar()
foobar.maFunction(0, 0) # Test 1
threadedFunction((foobar.maFunction, 0, (1, 1))) # Test 2
p = Pool()
#args = list(zip(repeat(foobar.maFunction), range(10), repeat(2, 2)))
args = []
for i in range(10):
args.append([foobar.maFunction, i, (i,2)])
p.map(threadedFunction, args) # Does now work
p.close()
p.join()
myManager.shutdown()
Or did I misunderstand your problem completely?
Hannu

Python OpenCL host program to cl program parameter passing

Hi I am trying OpenCL using python. I am trying to pass an array and a const variable to the cl program and simply copying the const variable to array on the cl device. This should be very simple but I am getting the following error:
Traceback (most recent call last):
File "<pyshell#103>", line 1, in <module>
test()
File "D:/Programming/Programs_OpenCL_Python/Host_CL_Parameter_Passing.py", line 141, in test
event = prg.test( queue, (10,1), None, a_dev, b)
File "C:\Python27\lib\site-packages\pyopencl-2012.1-py2.7-win32.egg\pyopencl\__init__.py", line 457, in kernel_call
self.set_args(*args)
File "C:\Python27\lib\site-packages\pyopencl-2012.1-py2.7-win32.egg\pyopencl\__init__.py", line 509, in kernel_set_args
% (i+1, str(e), advice))
LogicError: when processing argument #2 (1-based): Kernel.set_arg failed: invalid value - invalid kernel argument
Here's the code Code:
def test():
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
a = np.empty(10, dtype = int)
b = int(1)
a_dev = cl.Buffer(ctx, cl.mem_flags.WRITE_ONLY, a.nbytes)
prg = cl.Program( ctx, """__kernel void test(__global int *a, const int b){
int i = get_global_id(0);
a[i] = b;
}""").build()
event = prg.test( queue, (10,1), None, a_dev, b)
event.wait()
cl.enqueue_copy( queue, a, a_dev)
print a
Can someone tell me the problem and give me a solution? This is driving me crazy.
Thankyou
You need to convert the integer argument to the numpy int32 type:
event = prg.test( queue, (10,1), None, a_dev, np.int32(b))
BTW I was able to figure that out by looking at the Mandelbrot Example

Categories