Render to screen and to FBO give different result - python

I wrote a program to render a square with texture to screen and to a FBO. I believe the rendering result should be the same for both case, only the output destination is different. Surprisingly, it gives me completely different result.
For the first image, I directly output it to screen. It is good, without any problems. For the second image, I rendered it to a FBO with a texture, and then render that texture to screen. In this case, the output is totally messed up, it seems the output only contain the up-left corner of the image.
Related code is:
def render(self):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._fbids[1])
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[0])
gl.glUseProgram(0)
self.drawQuad2()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[1]) # This is for second, messed up output.
# gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[0]) # This is for first, normal output.
gl.glUseProgram(0)
self.drawQuad2()
def drawQuad2(self):
gl.glBegin(gl.GL_QUADS)
gl.glTexCoord2f(0, 1)
gl.glVertex2f(-1, 1)
gl.glTexCoord2f(1, 1)
gl.glVertex2f(1, 1)
gl.glTexCoord2f(1, 0)
gl.glVertex2f(1, -1)
gl.glTexCoord2f(0, 0)
gl.glVertex2f(-1, -1)
gl.glEnd()
gl.glFlush()
def setupTexture(self, num, rawstr, width, height):
texids = gl.glGenTextures(num)
for tid in texids:
print 'texture binded %s' % tid
gl.glBindTexture(gl.GL_TEXTURE_2D, tid)
gl.glTexImage2D(
gl.GL_TEXTURE_2D,
0, gl.GL_RGBA, width, height,
0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE,
rawstr)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glEnable(gl.GL_TEXTURE_2D);
return texids
def setupFramebuffer(self, num, texids):
assert(len(texids) == num)
fbids = gl.glGenFramebuffers(num)
for i in range(len(fbids)):
print 'framebuffer binded %s with texture %s' % (fbids[i], texids[i])
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbids[i])
gl.glFramebufferTexture2D(
gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
gl.GL_TEXTURE_2D, texids[i], 0)
return fbids
def reshape(self, w, h):
if not h:
return
print 'reshape. w:%s, h:%s' % (w, h)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glViewport(0, 0, w, h)
gl.glOrtho(-1, 1, -1, 1, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()

You must use the glViewport function to tell OpenGL the size of the target window it shall address with its drawing operations. So when you switch to render to FBO, you must call glViewport with something that makes sense for the target texture; usually the target texture itself. When switching to render to the window you call glViewport with the target window size.
If destination viewport is larger than the destination framebuffer this effectively results in a zoom-in.

Related

Pyglet render into texture

I'm writing a drawing program using pyglet, and I want to be able to have the image being created as separate from the window's buffer (for instance, the image could be larger than the window, or may want to draw to this image at a different rate than the main window is being re-drawn). I want to be able to draw into this off-screen image, then display it in the window, but pyglet doesn't allow drawing to anything else than a window. Is there any simple way I can do this?
I've tried creating a second hidden pyglet window, but this gets rendered at the same rate as the main window which I definitely don't want.
The closest I found was Pyglet draw text into texture, but the code there isn't complete, and also no longer works as the opengl version used by pyglet has moved on.
The following code works for me, perhaps someone else can improve on my answer:
import pyglet
from pyglet.gl import *
from ctypes import byref
W, H = 800, 600
image = pyglet.image.create(W, H)
texture = image.get_texture()
window = pyglet.window.Window(width=W, height=H)
fbo_id = gl.GLuint(0)
glGenFramebuffers(1, byref(fbo_id))
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id)
glBindTexture(GL_TEXTURE_2D, texture.id)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture.id, 0)
rect1 = pyglet.shapes.Rectangle(0, 0, W, H, (255, 0, 0) )
rect2 = pyglet.shapes.Rectangle(W//4, H//4, W//2, H//2, (0, 0, 255) )
label = pyglet.text.Label("Hello World", font_name="Times New Roman", font_size=36,
x=W//2, y=H//2, anchor_x="center", anchor_y="center")
rect1.draw()
rect2.draw()
label.draw()
#window.event
def on_mouse_drag(x, y, dx, dy, xxx, modifiers):
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id)
line = pyglet.shapes.Line(x-dx, y-dy, x, y, 3, (0, 255, 255))
line.draw()
#window.event
def on_draw():
glBindFramebuffer(GL_FRAMEBUFFER, 0)
window.clear()
texture.blit(0, 0)
pyglet.app.run()

Why is canvas.create_rectangle not working when used in tkinter.after()

I am trying to display an overlay using the overlay package which is built on top of tkinter. When I run this process, the output is correct and I see the overlay displays the correct fill colors (i.e. 3 rectangles are green, red, blue)
_overlay = Overlay(
window.data["kCGWindowBounds"]["X"],
window.data["kCGWindowBounds"]["Y"]
)
screen = Screen(
window.data["kCGWindowBounds"]["X"],
window.data["kCGWindowBounds"]["Y"],
window.data["kCGWindowBounds"]["Height"],
window.data["kCGWindowBounds"]["Width"]
)
screen.capture()
pattern = Pattern()
pattern.convertImageToRGB()
pixelOne = pattern.getColorOnePixel()
pixelTwo = pattern.getColorTwoPixel()
_overlay.displayPattern(pixelOne, pixelTwo)
overlay.Window.launch()
...
def displayPattern(self, pixelOne, pixelTwo):
colors = COLOR_ONE_PIXEL_MAP.get(pixelOne, "Undefined") + COLOR_TWO_PIXEL_MAP.get(pixelTwo, "Undefined")
pattern = COLOR_PATTERN_MAP.get(colors, "Undefined")
canvas = tk.Canvas(
self.window.root,
width=162,
height=12,
bd=0,
highlightthickness=0
)
canvas.pack(
padx=0,
pady=0
)
if pattern != "Undefined":
print(pattern)
canvas.create_rectangle(-1, -1, 54, 12, fill=COLOR_FILL_MAP.get(pattern[:2]), outline=COLOR_FILL_MAP.get(pattern[:2]))
canvas.create_rectangle(54, -1, 108, 12, fill=COLOR_FILL_MAP.get(pattern[3] + pattern[4]), outline=COLOR_FILL_MAP.get(pattern[3] + pattern[4]))
canvas.create_rectangle(108, -1, 162, 12, fill=COLOR_FILL_MAP.get(pattern[6] + pattern[7]), outline=COLOR_FILL_MAP.get(pattern[6] + pattern[7]))
self.window.root.update()
Now I need to continue to run this process so that when information on the window that the program is using updates, this function gets run again. So I update this logic to this:
overlay.Window.after(
500,
proceed,
window.data["kCGWindowBounds"]["X"],
window.data["kCGWindowBounds"]["Y"],
window.data["kCGWindowBounds"]["Height"],
window.data["kCGWindowBounds"]["Width"]
)
overlay.Window.launch()
...
def proceed(x, y, height, width):
while True:
screen = Screen(
x,
y,
height,
width
)
screen.capture()
pattern = Pattern()
pattern.convertImageToRGB()
pixelOne = pattern.getColorOnePixel()
pixelTwo = pattern.getColorTwoPixel()
_overlay.displayPattern(pixelOne, pixelTwo)
time.sleep(.5)
I'm seeing the correct output in the print() (i.e. GG RR BB green red blue), but those fill colors are no longer being applied to the canvas.create_rectangle method. Is there something I'm missing with the after() process and packing tkinter canvas elements?
tkinter (like other GUIs) doesn't draw/update elements at once but it waits for end of your function to redraw/update all elements in one moment - this way it has less work and window doesn't blink.
Problem is that your function runs while True so it never ends.
You may use root.update() inside loop to force tkinter to redraw elements
def proceed(x, y, height, width):
while True:
screen = Screen(
x,
y,
height,
width
)
screen.capture()
pattern = Pattern()
pattern.convertImageToRGB()
pixelOne = pattern.getColorOnePixel()
pixelTwo = pattern.getColorTwoPixel()
_overlay.displayPattern(pixelOne, pixelTwo)
overlay.Window.update() # <--- force tkinter to redraw/update window
time.sleep(.5)
OR you can use after(500, process, ...) instead of while True and time.sleep()
def proceed(x, y, height, width):
screen = Screen(
x,
y,
height,
width
)
screen.capture()
pattern = Pattern()
pattern.convertImageToRGB()
pixelOne = pattern.getColorOnePixel()
pixelTwo = pattern.getColorTwoPixel()
_overlay.displayPattern(pixelOne, pixelTwo)
overlay.Window.after(500, process, x, y, height, width) # <--- run again after 500ms

How to use raw pixel data in pyglet to change image size

I need to be able to change the width or height of an image before loading it into a sprite. I currently am using numpy to get either the center row or column and keep inserting it back into the image until it is the correct size. I do not know if this code even works yet, because I have also been having trouble loading the raw_image back into a sprite and displaying it.
def set_image_size(img, x, y):
raw_img = img.get_image_data()
format = 'RGBA'
pitch = raw_img.width * len(format)
rgba = np.array(list(img.get_image_data().get_data(format, pitch))).reshape(-1, raw_img.width, len(format))
mid_y = rgba[round(raw_img.height/2),:] # This is needed to stretch along Y
while rgba.shape[0] < y:
rgba = np.insert(rgba, round(raw_img.height/2), mid_y, 0)
mid_x = rgba[:,round(raw_img.width/2)] # This is needed to stretch along X
while rgba.shape[1] < x:
rgba = np.insert(rgba, round(raw_img.width/2), mid_x, 1)
raw_img.set_data(format, pitch, ''.join(map(chr, rgba.tostring())))
return raw_img
When I try blitting this onto an abstract image and blitting that to the screen, I get a weird striped red version of my image. I don't need to worry too much about quality, I just want to "stretch/tile" only the inside of the image so that the edges don't get warped. Also, my images are very small, only 15 by 15 pixels or so.
How can I fix either of these issues?
EDIT:
When I use sprite.scale, it gets stretched in weird ways:
self.sprite = pyglet.sprite.Sprite(self.image, self.x, self.y, batch=batch)
self.sprite.scale_x = self.width / 16
Original Sprite:
Stretched (with sprite.scale):
This isn't as much of a problem for this sprite, because I can use OpenGL Quads to draw it, but I need to start using more complex sprites that need to be scaled in the above way.
So the problem lies with the scale of the image resolution you're originally working with, being 16px wide and 7px high. Up-scaling an image by default won't do any clever neighbor interpolation and "fill in the blanks" in the way you expect it to.
To solve this, you can tell OpenGL that each new pixel in your up-scaled image should take "inspiration" from its neighbors. You do this by adding to your render loop the following each time you render the sprite:
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
It's important that this is called before every render of the sprites you need to add the filter to. Not sure what the technical jargon is for this, but it has to be setup each render sequence, because the next time you loop over your render function it gets reset. There's a fancy word for this I'm sure of it.
Here's a working example:
from pyglet import *
from pyglet.gl import *
key = pyglet.window.key
class main(pyglet.window.Window):
def __init__ (self, width=800, height=600, fps=False, *args, **kwargs):
super(main, self).__init__(width, height, *args, **kwargs)
self.x, self.y = 0, 0
glEnable(GL_TEXTURE_2D) # Strictly speaking not needed, I think this is default or won't affect this test image in any significant way.
self.keys = {}
self.mouse_x = 0
self.mouse_y = 0
# The image itself is 800x400 originally
# Positioned at x=0 and y=50 will place it at the bottom essentially
self.sprite = pyglet.sprite.Sprite(pyglet.image.load('test.png'), 50, 50, batch=None)
self.sprite.scale = 20 # Scale the height, not the width
self.alive = 1
def on_draw(self):
self.render()
def on_close(self):
self.alive = 0
def on_mouse_motion(self, x, y, dx, dy):
self.mouse_x = x
def on_key_release(self, symbol, modifiers):
try:
del self.keys[symbol]
except:
pass
def on_key_press(self, symbol, modifiers):
if symbol == key.ESCAPE: # [ESC]
self.alive = 0
self.keys[symbol] = True
def render(self):
self.clear()
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
self.sprite.draw()
self.flip()
def run(self):
while self.alive == 1:
self.render()
# -----------> This is key <----------
# This is what replaces pyglet.app.run()
# but is required for the GUI to not freeze
#
event = self.dispatch_events()
if __name__ == '__main__':
x = main()
x.run()
And here's a test-image you can use:
(<-- Small white square, not zoomed in for effect)
And here's the result:

Pyglet ignores alpha channel

I'm trying to make a level editor for a game I'm writing in Python using Pyglet. I want to be able to modify the darkness of any block I place. As such, I have added a darkness attribute to the Block class. Here is that class:
class Block:
def __init__(self, name, image, wall):
self.name = name
self.image = image
self.wall = wall
self.darkness = 0
def set_darkness(self, darkness):
self.darkness = darkness
def draw(self, x, y):
self.image.blit(x, y)
pyglet.graphics.draw(4, GL_QUADS, ('v2i', (x, y, x + block_width, y, x + block_width, y + block_height, x, y + block_width)), ('c4B', (0, 0, 0, self.darkness) * 4))
I am trying to darken an image by drawing a black rectangle on top of it, and then adjusting the transparency of that rectangle. However, no matter what I set self.darkness to in __init__, the rectangle continues to be completely opaque.
I tried enabling color blending with pyglet.gl by adding this code to the top of my program:
from pyglet.gl import *
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
This changed nothing. I also tried using Pyglet's SolidColorImagePattern class to create a black rectangle with the alpha channel set to self.darkness, but that also did nothing. How do I make this rectangle transparent?
For any OpenGL instruction, a valid and current OpenGL Context is of need. Of course pyglet provides an OpenGL context, but the context is created and made current when the pyglet (OpenGL) window is created (see pyglet.window).
Create the window, before setting up Blending to solve the issue:
from pyglet.gl import *
window = pyglet.window.Window(400, 400, "OpenGL window")
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)

wxPython - GC.DrawText removes background bitmap

I'm trying to draw a text on existing bitmap, but when I use the Graphics Context's DrawText method, the background is removed. But this happens only when I create background image from empty bitmap (using DrawText on Bitmap from loaded image works well).
I think that the issue happens because I'm using MemoryDC to create an empty bitmap, but I'm quite new to wxPython, so I have no idea how to fix it.
Here's what I've done so far:
import wx
def GetEmptyBitmap(w, h, color=(0,0,0)):
"""
Create monochromatic bitmap with desired background color.
Default is black
"""
b = wx.EmptyBitmap(w, h)
dc = wx.MemoryDC(b)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangle(0, 0, w, h)
return b
def drawTextOverBitmap(bitmap, text='', fontcolor=(255, 255, 255)):
"""
Places text on the center of bitmap and returns modified bitmap.
Fontcolor can be set as well (white default)
"""
dc = wx.MemoryDC(bitmap)
gc = wx.GraphicsContext.Create(dc)
font = wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
gc.SetFont(font, fontcolor)
w,h = dc.GetSize()
tw, th = dc.GetTextExtent(text)
gc.DrawText(text, (w - tw) / 2, (h - th) / 2)
return bitmap
app = wx.App()
bmp_from_img = bmp = wx.Image(location).Rescale(200, 100).ConvertToBitmap()
bmp_from_img = drawTextOverBitmap(bmp_from_img, "From Image", (255,255,255))
bmp_from_empty = GetEmptyBitmap(200, 100, (255,0,0))
bmp_from_empty = drawTextOverBitmap(bmp_from_empty, "From Empty", (255,255,255))
frame = wx.Frame(None)
st1 = wx.StaticBitmap(frame, -1, bmp_from_img, (0,0), (200,100))
st2 = wx.StaticBitmap(frame, -1, bmp_from_empty, (0, 100), (200, 100))
frame.Show()
app.MainLoop()
As I said, the StaticBitmap which uses the loaded image is displayed correctly, but the one created with EmptyBitmap has no background.
Do you have any ideas how to make it work?
Thank you
This seems like a bug to me. Use the following to make it work:
def GetEmptyBitmap(w, h, color=(0,0,0)):
# ...
# instead of
# b = wx.EmptyBitmap(w, h)
# use the following:
img = wx.EmptyImage(w, h)
b = img.ConvertFromBitmap()
# ...
I think not the wx.MemoryDC is to blame, but the platform-specific bitmap creation routines, where there is more going on under the hood. By starting off with a wx.Image the output seems to be more predictable/useful.

Categories