I'm trying to draw a text on existing bitmap, but when I use the Graphics Context's DrawText method, the background is removed. But this happens only when I create background image from empty bitmap (using DrawText on Bitmap from loaded image works well).
I think that the issue happens because I'm using MemoryDC to create an empty bitmap, but I'm quite new to wxPython, so I have no idea how to fix it.
Here's what I've done so far:
import wx
def GetEmptyBitmap(w, h, color=(0,0,0)):
"""
Create monochromatic bitmap with desired background color.
Default is black
"""
b = wx.EmptyBitmap(w, h)
dc = wx.MemoryDC(b)
dc.SetBrush(wx.Brush(color))
dc.DrawRectangle(0, 0, w, h)
return b
def drawTextOverBitmap(bitmap, text='', fontcolor=(255, 255, 255)):
"""
Places text on the center of bitmap and returns modified bitmap.
Fontcolor can be set as well (white default)
"""
dc = wx.MemoryDC(bitmap)
gc = wx.GraphicsContext.Create(dc)
font = wx.Font(16, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
gc.SetFont(font, fontcolor)
w,h = dc.GetSize()
tw, th = dc.GetTextExtent(text)
gc.DrawText(text, (w - tw) / 2, (h - th) / 2)
return bitmap
app = wx.App()
bmp_from_img = bmp = wx.Image(location).Rescale(200, 100).ConvertToBitmap()
bmp_from_img = drawTextOverBitmap(bmp_from_img, "From Image", (255,255,255))
bmp_from_empty = GetEmptyBitmap(200, 100, (255,0,0))
bmp_from_empty = drawTextOverBitmap(bmp_from_empty, "From Empty", (255,255,255))
frame = wx.Frame(None)
st1 = wx.StaticBitmap(frame, -1, bmp_from_img, (0,0), (200,100))
st2 = wx.StaticBitmap(frame, -1, bmp_from_empty, (0, 100), (200, 100))
frame.Show()
app.MainLoop()
As I said, the StaticBitmap which uses the loaded image is displayed correctly, but the one created with EmptyBitmap has no background.
Do you have any ideas how to make it work?
Thank you
This seems like a bug to me. Use the following to make it work:
def GetEmptyBitmap(w, h, color=(0,0,0)):
# ...
# instead of
# b = wx.EmptyBitmap(w, h)
# use the following:
img = wx.EmptyImage(w, h)
b = img.ConvertFromBitmap()
# ...
I think not the wx.MemoryDC is to blame, but the platform-specific bitmap creation routines, where there is more going on under the hood. By starting off with a wx.Image the output seems to be more predictable/useful.
Related
I'm writing a drawing program using pyglet, and I want to be able to have the image being created as separate from the window's buffer (for instance, the image could be larger than the window, or may want to draw to this image at a different rate than the main window is being re-drawn). I want to be able to draw into this off-screen image, then display it in the window, but pyglet doesn't allow drawing to anything else than a window. Is there any simple way I can do this?
I've tried creating a second hidden pyglet window, but this gets rendered at the same rate as the main window which I definitely don't want.
The closest I found was Pyglet draw text into texture, but the code there isn't complete, and also no longer works as the opengl version used by pyglet has moved on.
The following code works for me, perhaps someone else can improve on my answer:
import pyglet
from pyglet.gl import *
from ctypes import byref
W, H = 800, 600
image = pyglet.image.create(W, H)
texture = image.get_texture()
window = pyglet.window.Window(width=W, height=H)
fbo_id = gl.GLuint(0)
glGenFramebuffers(1, byref(fbo_id))
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id)
glBindTexture(GL_TEXTURE_2D, texture.id)
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, texture.id, 0)
rect1 = pyglet.shapes.Rectangle(0, 0, W, H, (255, 0, 0) )
rect2 = pyglet.shapes.Rectangle(W//4, H//4, W//2, H//2, (0, 0, 255) )
label = pyglet.text.Label("Hello World", font_name="Times New Roman", font_size=36,
x=W//2, y=H//2, anchor_x="center", anchor_y="center")
rect1.draw()
rect2.draw()
label.draw()
#window.event
def on_mouse_drag(x, y, dx, dy, xxx, modifiers):
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id)
line = pyglet.shapes.Line(x-dx, y-dy, x, y, 3, (0, 255, 255))
line.draw()
#window.event
def on_draw():
glBindFramebuffer(GL_FRAMEBUFFER, 0)
window.clear()
texture.blit(0, 0)
pyglet.app.run()
Good evening! I need a global variable in a function to be used in another function, however, when I try to declare this variable as a global variable, it throws the error "Statement expected, found Py:EQ", this in the line where the global code snippet is id, confidence = recognizer.predict(faceimage) specifically above the = sign on line 53. How do I fix this error?
# install opencv "pip install opencv-python"
import cv2
# distance from camera to object(face) measured
# centimeter
Known_distance = 76.2
# width of face in the real world or Object Plane
# centimeter
Known_width = 14.3
# Colors
GREEN = (0, 255, 0)
RED = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# defining the fonts
fonts = cv2.FONT_HERSHEY_COMPLEX
# face detector object
face_detector = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
# focal length finder function
def Focal_Length_Finder(measured_distance, real_width, width_in_rf_image):
# finding the focal length
focal_length = (width_in_rf_image * measured_distance) / real_width
return focal_length
# distance estimation function
def Distance_finder(Focal_Length, real_face_width, face_width_in_frame):
distance = (real_face_width * Focal_Length) / face_width_in_frame
# return the distance
return distance
def microFacialExpressions(recognizer, width, height):
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
global id, confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
def face_data(image):
face_width = 0 # making face width to zero
# converting color image to gray scale image
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detecting face in the image
faces = face_detector.detectMultiScale(gray_image, 1.3, 5)
# looping through the faces detect in the image
# getting coordinates x, y , width and height
for (x, y, h, w) in faces:
# draw the rectangle on the face
cv2.rectangle(image, (x, y), (x + w, y + h), GREEN, 2)
# getting face width in the pixels
face_width = w
# return the face width in pixel
return face_width
# reading reference_image from directory
ref_image = cv2.imread("Ref_image.jpg")
# find the face width(pixels) in the reference_image
ref_image_face_width = face_data(ref_image)
# get the focal by calling "Focal_Length_Finder"
# face width in reference(pixels),
# Known_distance(centimeters),
# known_width(centimeters)
Focal_length_found = Focal_Length_Finder(
Known_distance, Known_width, ref_image_face_width)
print(Focal_length_found)
# show the reference image
cv2.imshow("ref_image", ref_image)
# initialize the camera object so that we
# can get frame from it
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
# looping through frame, incoming from
# camera/video
while True:
# reading the frame from camera
_, frame = cap.read()
# calling face_data function to find
# the width of face(pixels) in the frame
face_width_in_frame = face_data(frame)
# check if the face is zero then not
# find the distance
if face_width_in_frame != 0:
# finding the distance by calling function
# Distance finder function need
# these arguments the Focal_Length,
# known_width(centimeters),
# and Known_distance(centimeters)
Distance = Distance_finder(
Focal_length_found, Known_width, face_width_in_frame)
if Distance <= 50 and id:
print("Level S Alert!")
# draw line as background of text
cv2.line(frame, (30, 30), (230, 30), RED, 32)
cv2.line(frame, (30, 30), (230, 30), BLACK, 28)
# Drawing Text on the screen
cv2.putText(
frame, f"Distance: {round(Distance, 2)} CM", (30, 35),
fonts, 0.6, GREEN, 2)
# show the frame on the screen
cv2.imshow("frame", frame)
# quit the program if you press 'q' on keyboard
if cv2.waitKey(1) == ord("q"):
break
# closing the camera
cap.release()
# closing the windows that are opened
cv2.destroyAllWindows()
The global statement does not support assigning to a name, only declaring the name to be a global variable, rather than local variable. While global statements are legal pretty much anywhere, it is strongly recommended to put such declarations at the top of the function.
def microFacialExpressions(recognizer, width, height):
global id, confidence
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
detectorFace = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
camera = cv2.VideoCapture(0)
recognizer = cv2.face.EigenFaceRecognizer_create()
recognizer.read("classifierEigen.yml")
width, height = 220, 220
while(True):
connected, image = camera.read()
# Grayscale conversion
grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
facesDetected = detectorFace.detectMultiScale(GrayImage,scaleFactor=1.5, minSize=(100, 100))
for (x, y, l, a) in facesDetected:
faceimage = cv2.resize(greyimage[y:y + a, x:x + l], (width, height))
cv2.rectangle(image, (x, y), (x + l, y + a), (0,0,255), 2)
confidence = recognizer.predict(faceimage)
#If ID is equal to 1, issue the message "Safe to exit" if not, issue the message "Hostile area"
if id == 1:
warning="Safe to exit"
else:
warning = "Hostile area"
cv2.putText(image, warning, (x,y +(a+30)), font, 2, (0,0,255))
return warning
Given that both variables are repeatedly changed in the loop, it's not clear why the last value of either is special enough to need in the global scope. I suspect neither variable needs to be declared global at all.
I have a folder with .ttf and .otf fonts and would like to write them on my ImageDraw object but with NO shading. A single RGB only. I have tried bitmap fonts but they A) don't look nice and B) use more than one color anyway.
I have read that there is a library for converting .bdf to .pil. If I convert arial.ttf to arial.bdf and then to arial.pil, will this be what I'm looking for? The text will almost always be dropped onto a background--so should I consider writing the text first on a blank canvas, do a color reduction, and then paste that canvas onto my background?
I have previously made this program using Java and it writes text very nicely on my bitmaps. One color, symmetrical, etc. Image below.
Below are the two attempts with python. The blockier one is a bitmap font, the other is regular arial.ttf.
Here is my code:
def personalize(self):
names = self.personalize_entry.get("1.0", 'end-1c').split('\n')
num_names = len(names)
num_grids = math.ceil(num_names/20)
answer = ask_grid_background()
separator = Image.new('RGB', (473, 1), color_dict['P'])
background = Image.new('RGB', (473, 821), color_dict['.'])
if answer:
showinfo("Bitmap", "Give me the design.")
file_path = filedialog.askopenfilename()
filename = path_leaf(file_path)
filename = filename[:-4]
__, __, center = read(file_path)
if center == 0:
messagebox.showinfo("Hmmm", f"I couldn't find a center...are you sure this is a basic set up?")
return False
img = Image.open(file_path)
size_num = img.size
section = img.crop((5, (size_num[1] - 55 - center), 478, (size_num[1] - center - 15)))
background.paste(separator, (0, 0))
for i in range(20):
background.paste(section, (0, (41 * i + 1)))
background.paste(separator, (0, (41 * i) + 41))
else:
background.paste(separator, (0, 0))
for i in range(20):
# background.paste(section,(0,(41*i+1)))
background.paste(separator, (0, (41 * i) + 41))
draw = ImageDraw.Draw(background)
fnt = ImageFont.truetype("Fonts/PIXEAB__.ttf",36)
draw.text((10, 10), names[0], font=fnt, fill=(0, 0, 0))
background.show()
ImageDraw has an undocumented member fontmode, which can be set to '1' (cf. Pillow's image modes) to turn off the anti-aliasing of the rendered text.
Let's compare common rendered text, draw.fontmode is implicitly set to 'L':
from PIL import Image, ImageDraw, ImageFont
image = Image.new('RGB', (800, 200), (255, 255, 255))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype('arial.ttf', 150)
draw.text((10, 10), 'Hello World', font=font, fill=(0, 0, 0))
image.save('image.png')
Now, let's explicitly set draw.fontmode = '1':
from PIL import Image, ImageDraw, ImageFont
image = Image.new('RGB', (800, 200), (255, 255, 255))
draw = ImageDraw.Draw(image)
draw.fontmode = '1'
font = ImageFont.truetype('arial.ttf', 150)
draw.text((10, 10), 'Hello World', font=font, fill=(0, 0, 0))
image.save('image.png')
Et voilà – no anti-aliasing, all pixels are solid black.
----------------------------------------
System information
----------------------------------------
Platform: Windows-10-10.0.16299-SP0
Python: 3.9.1
PyCharm: 2021.1
Pillow: 8.2.0
----------------------------------------
I'm trying to generate an image that contains the text of a given string. For now I'm using the Image.draw.text() from the Pyhton PIL library to generate the result and the process is working fine, but it's exceedingly slow and it takes a handful of seconds to finish drawing, which is really hampering the scalability of the program.
This is the current function to generate images:
def draw_ascii(ascii_image, new_width) :
# Source text, and wrap it.
adjusted_ascii = ascii_image.replace("\n", " ")
text = textwrap.fill(adjusted_ascii, new_width)
# Font size, color and type.
fontcolor = (0, 0, 0)
fontsize = 14
font = ImageFont.truetype("FreeMono.ttf", fontsize)
# Determine text size using a scratch image.
img = Image.new("RGBA", (1,1))
draw = ImageDraw.Draw(img)
textsize = draw.textsize(text, font)
# Creating the final image and put the text on it
background = (255, 255, 255)
img = Image.new("RGB", textsize, background)
draw = ImageDraw.Draw(img)
draw.text((0, 0), text, fontcolor, font)
return(img)
I wrote a program to render a square with texture to screen and to a FBO. I believe the rendering result should be the same for both case, only the output destination is different. Surprisingly, it gives me completely different result.
For the first image, I directly output it to screen. It is good, without any problems. For the second image, I rendered it to a FBO with a texture, and then render that texture to screen. In this case, the output is totally messed up, it seems the output only contain the up-left corner of the image.
Related code is:
def render(self):
gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._fbids[1])
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[0])
gl.glUseProgram(0)
self.drawQuad2()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[1]) # This is for second, messed up output.
# gl.glBindTexture(gl.GL_TEXTURE_2D, self._texids[0]) # This is for first, normal output.
gl.glUseProgram(0)
self.drawQuad2()
def drawQuad2(self):
gl.glBegin(gl.GL_QUADS)
gl.glTexCoord2f(0, 1)
gl.glVertex2f(-1, 1)
gl.glTexCoord2f(1, 1)
gl.glVertex2f(1, 1)
gl.glTexCoord2f(1, 0)
gl.glVertex2f(1, -1)
gl.glTexCoord2f(0, 0)
gl.glVertex2f(-1, -1)
gl.glEnd()
gl.glFlush()
def setupTexture(self, num, rawstr, width, height):
texids = gl.glGenTextures(num)
for tid in texids:
print 'texture binded %s' % tid
gl.glBindTexture(gl.GL_TEXTURE_2D, tid)
gl.glTexImage2D(
gl.GL_TEXTURE_2D,
0, gl.GL_RGBA, width, height,
0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE,
rawstr)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(
gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glEnable(gl.GL_TEXTURE_2D);
return texids
def setupFramebuffer(self, num, texids):
assert(len(texids) == num)
fbids = gl.glGenFramebuffers(num)
for i in range(len(fbids)):
print 'framebuffer binded %s with texture %s' % (fbids[i], texids[i])
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbids[i])
gl.glFramebufferTexture2D(
gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0,
gl.GL_TEXTURE_2D, texids[i], 0)
return fbids
def reshape(self, w, h):
if not h:
return
print 'reshape. w:%s, h:%s' % (w, h)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
gl.glViewport(0, 0, w, h)
gl.glOrtho(-1, 1, -1, 1, -1, 1)
gl.glMatrixMode(gl.GL_MODELVIEW)
gl.glLoadIdentity()
You must use the glViewport function to tell OpenGL the size of the target window it shall address with its drawing operations. So when you switch to render to FBO, you must call glViewport with something that makes sense for the target texture; usually the target texture itself. When switching to render to the window you call glViewport with the target window size.
If destination viewport is larger than the destination framebuffer this effectively results in a zoom-in.