I have a SH1106 display connected to my Raspberry Pi that I'm controlling using luma.oled.
I can display all kind of content in different fonts, which is great. However, I can't figure out how to add something to what's currently being displayed without refreshing the whole display. My code is like this:
from os import system
import serial
from time import sleep
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import sh1106
from PIL import ImageFont
# config display
device = sh1106(i2c(port=1, address=0x3C), rotate=0)
device.clear()
FA_solid = ImageFont.truetype('/home/pi/Desktop/tests/fa-solid-900.ttf', 16)
FA_regular = ImageFont.truetype('/home/pi/Desktop/tests/fa-regular-400.ttf', 16)
text_large = ImageFont.truetype('/home/pi/Desktop/tests/coolvetica condensed rg.ttf', 48)
text_small = ImageFont.truetype('/home/pi/Desktop/tests/coolvetica condensed rg.ttf', 16)
# display things
def show_icon(code):
with canvas(device) as draw:
draw.text((112, 0), text=code, font=FA_solid, fill="white")
def large_text(content, paddingleft =0, paddingtop =0):
with canvas(device) as draw:
draw.text((0, 0), text=content, font=text_large, fill="white")
def small_text(content, paddingleft =0, paddingtop =0):
with canvas(device) as draw:
draw.text((0, 0), text=content, font=text_small, fill="white")
show_icon("\uf124")
sleep(2)
large_text("Hi ;)")
sleep(10)
device.clear()
This display an icon from fontawesome in the upper right corner, then clears the screen and displays Hi. How can I change this to display the icon + hi? Ideally I'd have "zones" on the screen where I can change the icon zone while keeping the text displayed and vice versa. Thanks!
EDIT --------------------
Here's my code, adapted from Mark's answer below. Better but still not there yet. The Zones 1 and 3 stay the same while 2 is updated but when I redraw the screen, it is blank for half a second and then updates, which I don't want.
def UpdateDisplay(z1,z2,z3):
"""Pass in the three zones and they will be sent to the screen"""
device = sh1106(i2c(port=1, address=0x3C), rotate=0)
# Make a black canvas the size of the entire screen
whole = Image.new("1", (128,64))
# Now paste in the 3 zones to form the whole
whole.paste(z1, (2,2)) # zone1 at top-left
whole.paste(z2, (66,2)) # zone2 at top-right
whole.paste(z3, (2,34)) # zone3 across the bottom
# I save the image here, but you would write it to the screen with "device.display()"
device.display(whole)
return
# Make zone1 dark grey and annotate it
z1 = Image.new("1", (60,30))
z1draw = ImageDraw.Draw(z1)
z1draw.text((10,10),"Zone1", fill="white")
# Make zone2 mid-grey and annotate it
z2 = Image.new("1", (60,30))
z2draw = ImageDraw.Draw(z2)
z2draw.text((10,10),"Zone2", fill="white")
# Make zone3 light grey and annotate it
z3 = Image.new("1", (124,28))
z3draw = ImageDraw.Draw(z3)
z3draw.text((10,10),"Zone3", fill="white")
# Blit all zones to display
UpdateDisplay(z1,z2,z3)
sleep(5)
# Make zone2 mid-grey and annotate it
z2 = Image.new("1", (60,30))
z2draw = ImageDraw.Draw(z2)
z2draw.text((10,10),"Zone2 changed", fill="white")
UpdateDisplay(z1,z2,z3)
I don't have an SH1106 to test with and I have never used the luma library, so there may be a much simpler way of doing what you want. If so, maybe someone will kindly ping me and I'll delete this answer.
I have used PIL quite a lot, so I looked in here around line 28:
background = Image.new("RGB", device.size, "white")
background.paste(frame.resize(size, resample=Image.LANCZOS), posn)
device.display(background.convert(device.mode))
So, it seems you can create a PIL Image and send it to the display like that. The first line creates a blank white canvas the same size as the entire display, the second line pastes another PIL Image onto the canvas at the specified position and the last line sends the image to the display. So, all you need to do, is define your N "zones" separately and draw in them separately (each being a PIL Image), then when you want to update the display, paste your N zones in at the positions you want them and send the completed picture to the display.
Sorry I can't be more precise, but I have nothing to test with. Here's a little example with 3 zones that can be drawn individually and then assembled to a whole before calling device.display()
#!/usr/bin/env python3
from PIL import Image, ImageDraw
def UpdateDisplay(z1,z2,z3):
"""Pass in the three zones and they will be sent to the screen"""
# Make a black canvas the size of the entire screen
whole = Image.new("RGB", (128,64), (0,0,0))
# Now paste in the 3 zones to form the whole
whole.paste(z1, (2,2)) # zone1 at top-left
whole.paste(z2, (66,2)) # zone2 at top-right
whole.paste(z3, (2,34)) # zone3 across the bottom
# I save the image here, but you would write it to the screen with "device.display()"
whole.save('result.png')
return
# Make zone1 dark grey and annotate it
z1 = Image.new("RGB", (60,30), (64,64,64))
z1draw = ImageDraw.Draw(z1)
z1draw.text((10,10),"Zone1")
# Make zone2 mid-grey and annotate it
z2 = Image.new("RGB", (60,30), (128,128,128))
z2draw = ImageDraw.Draw(z2)
z2draw.text((10,10),"Zone2")
# Make zone3 light grey and annotate it
z3 = Image.new("RGB", (124,28), (192,192,192))
z3draw = ImageDraw.Draw(z3)
z3draw.text((10,10),"Zone3")
# Blit all zones to display
UpdateDisplay(z1,z2,z3)
# Now change just zone 2 and update display
z2.paste("red", (0,0,z2.width,z2.height))
UpdateDisplay(z1,z2,z3)
Here is the original display:
And here again after updating just zone2:
ok so I mostly figured it out:
from luma.core.interface.serial import i2c
from luma.core.render import canvas
from luma.oled.device import sh1106
from PIL import ImageFont, Image, ImageDraw
### setting up display using LUMA oled
device = sh1106(i2c(port=1, address=0x3C), rotate=0)
device.clear()
### Initialize drawing zone (aka entire screen)
output = Image.new("1", (128,64))
add_to_image = ImageDraw.Draw(output)
### I have the exterior temp and altitude I want to display. Each has an assigned zone for the icon (FontAwesome) and the data
# temp_ext
temp_zone = [(14,44), (36,64)]
temp_start = (14,44)
temp_icon_zone = [(0,48), (15,64)]
temp_icon_start = (3,48)
add_to_image.text(temp_icon_start, "\uf2c9", font=FA_solid, fill="white")
### every time I have a new reading, I basically draw a black rectangle over what I had and the rewrite the text
add_to_image.rectangle(temp_zone, fill="black", outline = "black")
add_to_image.text(temp_start, str(temp_c), font=text_medium, fill="white")
device.display(output)
This enables me to only update the part of the screen I want, leaving the rest as is and, crucially, not having a blank screen for half a second when rewriting info. Feel free to suggest optimizations!
I still need to look into memory usage, it feels kinda sluggish when the different zones are updating at once. But it works!
Related
I am making a scene where there is a thumbs-up image that is supposed to get bigger on mouse hover, and shrink back to normal size when the mouse is no longer hovering.
This is how I make the thumbs-up image:
thumbs_up_image = pygame.image.load("./plz_like.png")
thumbs_up_rect = thumbs_up_image.get_rect(topleft=(screen.get_width() // 2 - thumbs_up_image.get_width() + 75,
screen.get_height() // 2 + thumbs_up_image.get_height() - 225))
And this is how I make it get bigger:
if thumbs_up_rect.collidepoint(pygame.mouse.get_pos()):
thumbs_up_image = pygame.transform.scale(thumbs_up_image,
[n + 50 for n in thumbs_up_image.get_size()])
thumbs_up_rect = thumbs_up_image.get_rect()
This is how the image is blited:
screen.blit(thumbs_up_image, thumbs_up_rect)
The problem is that when I hover on the thumbs-up image, it first goes to the top-left corner of the screen. Then, when I hover on it again, it gets super big and pixelated.
What am I doing wrong?
I managed to figure it out by myself.
This is how I do it:
First, I prepared a bigger version of the image and it's rect: (as shown below)
big_thumbs_image = pygame.transform.scale(thumbs_up_image, [i + 50 for i in thumbs_up_image.get_size()])
big_thumbs_image_rect = thumbs_up_image.get_rect(
topleft=(screen.get_width() // 2 - thumbs_up_image.get_width() + 55,
screen.get_height() // 2 + thumbs_up_image.get_height() - 250))
Then, when the small image's rect collides with the mouse, blit the bigger image:
if thumbs_up_rect.collidepoint(pygame.mouse.get_pos()):
screen.blit(big_thumbs_image, big_thumbs_image_rect)
You are not showing the code that actually renders the image to the screen.; But basically: you are not saving the original size - at each hover event it will grow and grow (and it will grow once per frame, if that code is run in the mainloop).
You need a variable to hold the original image, one to tell your code the image has already been resized, and an else clause on this if to restore the original image: pygame won't do that for you.
Also, when you use the get_rect for the image, its top-left position will always be "0, 0" - you have to translate this top-left corner to a suitable coordinate- getting the rectangle center of the original sprite (wherever the data of its location on the screen is kept), and setting the same center on the new rect should work.
And finally, prefer "rotozoom" than "scale" - Pygame documentation is clear that the second method uses better algorithms for scaling.
Try using this pygame function:
pygame.transform.rotozoom(Surface, angle, scale)
I also had some issues with pixilation in a game but it seemed to work with this.
I am trying to screenshot a Microsoft Edge window using pywin32. This screenshot will then be used to a machine learning algorithm to play a game in Microsoft Edge. As you might guess, the program will be taking a screenshot multiple times, so I needed the screenshot to be fast as possible. To increase the speed, my program will resize the Microsoft Edge window to a small resolution (specifically, to 600 by 600). However, when the screenshot doesn't show the entire window even though I have moved it to a specified location.
My program:
import win32gui
import win32ui
import win32con
import win32api
from PIL import Image
import time
# grab a handle to the main desktop window
hdesktop = win32gui.GetDesktopWindow()
# determine the size of all monitors in pixels
width = 600
height = 600
left = 0
top = 0
# set window to correct location
print("You have 3 second to click the desired window!")
for i in range(3, 0, -1):
print(i)
time.sleep(1)
hwnd = win32gui.GetForegroundWindow()
win32gui.MoveWindow(hwnd, 0, 0, width, height, True)
# create a device context
desktop_dc = win32gui.GetWindowDC(hdesktop)
img_dc = win32ui.CreateDCFromHandle(desktop_dc)
# create a memory based device context
mem_dc = img_dc.CreateCompatibleDC()
# create a bitmap object
screenshot = win32ui.CreateBitmap()
screenshot.CreateCompatibleBitmap(img_dc, width, height)
mem_dc.SelectObject(screenshot)
# copy the screen into our memory device context
mem_dc.BitBlt((0, 0), (width, height), img_dc, (left, top),win32con.SRCCOPY)
bmpinfo = screenshot.GetInfo()
bmpstr = screenshot.GetBitmapBits(True)
im = Image.frombuffer(
'RGB',
(bmpinfo['bmWidth'], bmpinfo['bmHeight']),
bmpstr, 'raw', 'BGRX', 0, 1)
im.show()
# free our objects
mem_dc.DeleteDC()
win32gui.DeleteObject(screenshot.GetHandle())
My program first move and resizes the desired window (taken from win32gui.GetForegroundWindow()) by win32gui.MoveWindow(hwnd, 0, 0, width, height, True) Then, it tries to screenshot the window by taking the whole desktop window (hdesktop = win32gui.GetDesktopWindow() ) and then cropping it to the desired coordinates (mem_dc.BitBlt((0, 0), (width, height), img_dc, (left, top),win32con.SRCCOPY) ). I then convert the win32 screenshot to a PIL image so I could look at it. Note that the desired coordinates are the SAME coordinates used to move the window in the first place. However, when I try to run this program, the screenshot doesn't capture the entire window!
I have tried looking at the documentation of the MoveWindow and the BitBlt function, but I couldn't find the issue. The destination and source rectangle parameters is suppose to be (0,0), since of the MoveWindow function. The width and height parameters are the same. I also have tried experimenting with the bRepaint parameter, but it didn't make a difference.
Any Suggestions?
After experimenting with this question a little bit more, I finally found the problem.
In the comments, I said that ctypes.windll.shcore.SetProcessDpiAwareness(1) doesn't work. However, it did. When I upscale the height and width, then the dimensions between the screenshot and the window fits perfectly. However, the reason why width and height doesn't work for smaller dimensions (I was originally setting width and height to 500) is because Microsoft Edge doesn't allow it. If the width goes within a certain threshold, then the actual width of the window would go to the smallest width Microsoft Edge wants it to be. An easy work around was the set the width and height to a larger resolution, and it worked!
Thank you so much for everyone in the comments, especially #IInspectable.
I'm working on this ISS Tracker project, but instead of using the e-Paper display, I want to use the 240x135 Mini PiTFT display instead. I've been trying to rewrite the Python code to accomplish this, as the rendering code is the same, but I cannot get it to output to the new display correctly. I'm extremely noobish when it comes to Python code, so any help you could offer would be most appreciated. The source code is available at the above ISS Tracker link, and my altered code is below:
# International Space Station Tracker.
# using Raspberry Pi B+, Waveshare ePaper Display and ProtoStax enclosure
# --> https://www.waveshare.com/product/modules/oleds-lcds/e-paper/2.7inch-e-paper-hat-b.htm
# --> https://www.protostax.com/products/protostax-for-raspberry-pi-b
#
# It displays the current location of the ISS and also its tracked trajectory. The
# current location is shown by the ISS icon, and the trajectory by small circles.
# 15 minute markers are shown as small rectangles.
#
# ISS Current Location is obtained using Open Notify ISS Current Location API
# http://open-notify.org/Open-Notify-API/ISS-Location-Now/
#
# Written by Sridhar Rajagopal for ProtoStax.
# BSD license. All text above must be included in any redistribution
# *
import sys
sys.path.append(r'lib')
if sys.version_info[0] < 3:
raise Exception("Must be using Python 3")
from enum import Enum
import signal
import board
import digitalio
import adafruit_rgb_display.st7789 as st7789
import epd2in7b
import epdconfig
from PIL import Image, ImageDraw, ImageFont, ImageOps
from datetime import datetime
from time import time, sleep
import requests
# START mini pitft code
# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Create the ST7789 display:
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
image = Image.new("RGB", (width, height))
rotation = 90
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image, rotation)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Turn on the backlight
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
# END mini pitft code
# Update Interval for fetching positions
DATA_INTERVAL = 30 #seconds
# Update interval for the display
DISPLAY_REFRESH_INTERVAL = 2 # Number of DATA_INTERVAL between successive display updates (e.g. 2 => update display every second deta fetch)
# Note:
# The dimensions of the 2.7 in ePaper display are
# 264 x 176
# The dimensions of the Mini PiTFT display are
# 240 x 135
class Display(object):
def __init__(self, imageWidth, imageHeight):
self.imageWidth = imageWidth
self.imageHeight = imageHeight
# Draws the ISS current location and trajectory from array of positions
def drawISS(self, positions):
imageWhite = Image.new('1', (self.imageWidth, self.imageHeight), 255) # 1: clear the frame
imageMap = Image.open('world_map_m.bmp').convert('L')
imageWhite.paste(imageMap, (0,0))
imageRed = Image.new('1', (self.imageWidth, self.imageHeight), 255) # 1: clear the frame
issLogo = Image.open('iss.bmp').convert('L')
drawred = ImageDraw.Draw(imageRed)
for i,t in enumerate(positions):
(lat,lon) = t
# Map the lat, lon to our x/y coordinate system
(x,y) = self.mapLatLongToXY(lat, lon)
# last position in the positions array is the latest location
# Every 15 minutes, we add a rectangular marker
# and a small red circle to mark other locations
if (i == len(positions) - 1):
s = 10
# drawred.rectangle((x-s,y-s,x+s,y+s), fill=0)
imageRed.paste(issLogo, ((int)(x-s), (int)(y-s)))
elif (((i+1) % (15 * 60 / DATA_INTERVAL)) == 0): # every 15 minutes (so 15 * 60s / DATA_INTERVAL = number of readings within 15 minutes)
s = 2
drawred.rectangle((x-s,y-s,x+s,y+s), fill=0)
else:
s = 1
drawred.ellipse((x-s,y-s,x+s,y+s), outline=0)
# drawred.point((x,y), fill=0)
# Rotate image 180 degrees - Remove the # comments of the lines below to rotate the image and allow for alternate positioning/mounting of the Raspberry Pi
# imageRed = imageRed.transpose(Image.ROTATE_180)
# imageWhite = imageWhite.transpose(Image.ROTATE_180)
# return the rendered Red and White images
return imageWhite, imageRed
# Maps lat, long to x,y coordinates in 264x181 (the size of the world map)
# (90 to -90 lat and -180 to 180 lon) map to 0-181 (y) and 0-264 (x) respectively
# Simple algebra gives us the equations below
# Recalculate as appropriate for map size and coordinates
def mapLatLongToXY(self, lat, lon):
x = (int)(0.733 * lon + 132)
y = (int)(-1.006 * lat + 90.5)
return x, y
# The main function
def main():
# API to get ISS Current Location
URL = 'http://api.open-notify.org/iss-now.json'
# Initialize and clear the 2in7b (tri-color) display
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
display = Display(disp.height, disp.width)
# Store positions in list
positions = []
while(True):
t0 = time()
r = requests.get(url = URL)
# extracting data in json format
data = r.json()
print(data)
lat = float(data['iss_position']['latitude'])
lon = float(data['iss_position']['longitude'])
positions.append((lat, lon))
print(positions)
# Refresh the display on the first fetch and then on every DISPLAY_REFRESH_INTERVAL fetch
if ((len(positions) >= 1) and ((len(positions)-1) % DISPLAY_REFRESH_INTERVAL)):
disp.init()
(imageWhite, imageRed) = display.drawISS(positions)
# We're drawing the map in white and the ISS location and trajectory in red
# Swap it around if you'd like the inverse color scheme
disp.display(disp.getbuffer(imageWhite), disp.getbuffer(imageRed))
sleep(2)
disp.sleep()
t1 = time()
sleepTime = max(DATA_INTERVAL - (t1 - t0), 0)
sleep(sleepTime) # sleep for 30 seconds minus duration of get request and display refresh
# gracefully exit without a big exception message if possible
def ctrl_c_handler(signal, frame):
print('Goodbye!')
# XXX : TODO
#
# To preserve the life of the ePaper display, it is best not to keep it powered up -
# instead putting it to sleep when done displaying, or cutting off power to it altogether.
#
# dispconfig.module_exit() shuts off power to the module and calls GPIO.cleanup()
# The latest disp library chooses to shut off power (call module_exit) even when calling disp.sleep()
# disp.sleep() calls dispconfig.module_exit(), which in turns calls cleanup().
# We can therefore end up in a situation calling GPIO.cleanup twice
#
# Need to cleanup Waveshare disp code to call GPIO.cleanup() only once
# for now, calling dispconfig.module_init() to set up GPIO before calling module_exit to make sure
# power to the ePaper display is cut off on exit
# I have also modified dispconfig.py to initialize SPI handle in module_init() (vs. at the global scope)
# because slepe/module_exit closes the SPI handle, which wasn't getting initialized in module_init
# dispconfig.module_init()
# dispconfig.module_exit()
# print("Remeber to clear the display using cleardisplay.py if you plan to power down your Pi and store it, to prevent burn-in!")
exit(0)
signal.signal(signal.SIGINT, ctrl_c_handler)
if __name__ == '__main__':
main()
You can't do
disp.display(disp.getbuffer(imageWhite), disp.getbuffer(imageRed))
because your disp object has no method named "display"
Instead, you need to set the image in your disp object like so:
disp.image(imageRed, rotation)
In the original code, the display is divided into two sections - Red and Black. They get rendered by the ePaper separately, hence you needed two images.
In your display, your can construct a single image that has all colors, and then set that image to your disp.display
disp.display(newImage, rotation)
where newImage will be the image returned by the Display.drawISS method. You will need to modify the drawISS method (or create a new method, say called drawISS_TFT that returns a single image in RGB with both the map and the trajectory, in different colors (instead of the drawISS which draws out one image in Black and the other image in Red - just use a single image).
That should do the trick.
But first, to see your TFT display something, just replace
disp.display(disp.getbuffer(imageWhite), disp.getbuffer(imageRed))
with
disp.image(imageRed, rotation)
OR
disp.image(imageWhite, rotation)
If you see the ISS trajectory or World Map as appropriate, then you'll know you are on the right track before doing the other stuff.
-Sridhar
I have a gallery application where the users upload photos and my code gives it a border, writes some of the photo attributes on the border and stores it.
image2 = Image.open('media/' + str(image.file))
width, height = image2.size;
image2 = ImageOps.expand(image2, border=(int(width/25),int(height/20),int(width/25),int(height/10)), fill='rgb(0,0,0)')
(Note that here my bottom border is longer than the top because I am writing attributes on the bottom border.)
Now I'm building an edit feature for the uploaded images where the user can change the attributes of the uploaded images. But the attributes that are already written on the border have to be overwritten.
So here, my approach is to put a black patch on the bottom border and re-write the new attributes without changes the top and side borders and without changing the aspect ratio. All of this has to be done using PIL.
Question is how do I put a black box on the bottom border?
I tried ImageOps.fit() as mentioned here https://pillow.readthedocs.io/en/3.3.x/reference/ImageOps.html#PIL.ImageOps.fit, but the aspect ratio doesn't seem to be right and I want to overwrite on the black border a black box and not crop the photo.
To me it seems like the easiest solution is just quickly draw the black pixels in the area that you want using a couple loops and Image.putpixel
from PIL import Image
img = Image.open('red.png')
for x in range(img.width):
for y in range(img.height - 40, img.height):
img.putpixel((x, y), (0, 0, 0))
img.save('red2.png')
The simplest way in my opinion is to create a new black image and paste onto your existing image -
from PIL import Image
im = Image.open('test.png')
blackBox = Image.new(im.mode, (im.width, 50), '#000')
im.paste(blackBox, (0, im.height - blackBox.height))
Alternatively, you could use ImageDraw - http://pillow.readthedocs.io/en/5.2.x/reference/ImageDraw.html - which you could use to draw rectangles and other shapes.
from PIL import Image, ImageDraw
im = Image.open('test.png')
d = ImageDraw.Draw(im)
d.rectangle((0, im.height - 50, im.width, im.height), fill='#000')
I've tried multiple ways of displaying large images with tkinterreally long image No matter what I've tried, there doesn't seem to be any code that works. The main issue is that Canvas has a maximum height limit of around 30,000 pixels.
Is there a way to display this whole image? increase, or get around the canvas limit? See the example image below.
There is no way around the size limit of the canvas, short of modifying and recompiling the underlying tk code. This would likely not be a trivial task.
Assuming you are trying to display the image on a typical computer screen, there are still ways to view the image. Basically it boils down to only loading the part of the image that the user can see at any one time.
For example, an image of the world is considerably larger than 64k by 64k, yet google maps is able to let you scroll around all you want. It does this by displaying the map as a series of tiles. As you move around the image, off-screen tiles are thrown away and new tiles are loaded.
This same technique can be used in tkinter, and can even be used with scrollbars instead of a dragging motion. You just need to hook the scrollbars up to a function rather than directly to the canvas. Then, when the function is called, it can compute which part of the image that the user is looking at, and load it into memory.
This is a rather unattractive answer, but an answer non-the-less. This divides up extremely long images into "tiles" of 1000 pixel lengths. It does not divide the width. I've spliced together code from several sources until I got it all to work. If someone could make this with a scroll-bar functionality, that would be cool.
from tkinter import *
from PIL import ImageTk as itk
from PIL import Image
import math
import numpy as np
Image.MAX_IMAGE_PIXELS = None #prevents the "photo bomb" warning from popping up. Have to have this for really large images.
#----------------------------------------------------------------------
# makes a simple window with a button right in the middle that let's you go "down" an image.
class MainWindow():
#----------------
def __init__(self, main):
# canvas for image
_, th, tw, rows, cols = self.getrowsandcols()
self.canvas = Canvas(main, width=tw, height=th)
#self.canvas.grid(row=0, column=0)
self.canvas.pack()
# images
self.my_images = self.cropimages() # crop the really large image down into several smaller images and append to this list
self.my_image_number = 0 #
# set first image on canvas
self.image_on_canvas = self.canvas.create_image(0, 0, anchor = NW, image = self.my_images[self.my_image_number])
# button to change image
self.upbutton = Button(main, text="UP", command=self.onUpButton)
self.downbutton = Button(main, text="DOWN", command=self.onDownButton)
self.upbutton.pack()
self.downbutton.pack()
#self.downbutton.grid(row=1, column=0)
#self.upbutton.grid(row=1, column=0)
#----------------
def getimage(self):
im = Image.open("Test_3.png") # import the image
im = im.convert("RGBA") # convert the image to color including the alpha channel (which is the transparency best I understand)
width, height = im.size # get the width and height
return width, height, im # return relevent variables/objects
def getrowsandcols(self):
width, height, im = self.getimage()
im = np.asarray(im) # Convert image to Numpy Array
tw = width # Tile width will equal the width of the image
th = int(math.ceil(height / 100)) # Tile height
rows = int(math.ceil(height / th)) # Number of tiles/row
cols = int(math.ceil(width / tw)) # Number of tiles/column
return im, th, tw, rows, cols #return selected variables
def cropimages(self):
self.my_images = [] # initialize list to hold Tkinter "PhotoImage objects"
im, th, tw, rows, cols = self.getrowsandcols() # pull in needed variables to crop the really long image
for r in range(rows): # loop row by row to crop all of the image
crop_im =im[r * th:((r * th) + th), 0:tw] # crop the image for the current row (r). (th) stands for tile height.
crop_im = Image.fromarray(crop_im) # convert the image from an Numpy Array to a PIL image.
crop_im = itk.PhotoImage(crop_im) # convert the PIL image to a Tkinter Photo Object (whatever that is)
self.my_images.append(crop_im) # Append the photo object to the list
crop_im = None
return self.my_images
def onUpButton(self):
# next image
if self.my_image_number == 0:
self.my_image_number = len(self.my_images)-1
else:
self.my_image_number -= 1 # every button pressed will
# change image
self.canvas.itemconfig(self.image_on_canvas, image=self.my_images[self.my_image_number]) # attaches the image from the image list to the canvas
def onDownButton(self):
# next image
self.my_image_number += 1 #every button pressed will
# return to first image
if self.my_image_number == len(self.my_images):
self.my_image_number = 0
# change image
self.canvas.itemconfig(self.image_on_canvas, image = self.my_images[self.my_image_number]) #attaches the image from the image list to the canvas
#----------------------------------------------------------------------
root = Tk()
MainWindow(root)
root.mainloop()