This question already has answers here:
Python 3.7, tkinter, jpg: couldn't recognize data in image file
(3 answers)
Closed 1 year ago.
I'm trying to get the image from the song album to display in the window with the song title and artist but it just doesn't do anything. I've tried replacing the "imageLabel" with
"imageLabel = tkinter.Label(window,image=tkinter.PhotoImage(file="CurrentSong.jpg"))" but it still doesn't work.
import requests
import time
import tkinter
token = ''
endpoint = "https://api.spotify.com/v1/me/player/currently-playing"
spotifyHeaders = {'Authorization':'Bearer ' + token}
requestAmount = 1
window = tkinter.Tk(className="|CurrentSong Spotify Song|")
window.geometry('400x400')
canvas = tkinter.Canvas(window,height=1000,width=1000)
canvas.pack()
songLabel = tkinter.Label(window,bg='grey')
songLabel.pack()
def GrabSpotifyCurSong(curSongJson):
return curSongJson['item']['name']
def GrabSpotifyCurArtist(curSongJson):
return curSongJson['item']['artists'][0]['name']
def GrabCurrentSongImage(curSongJson):
return curSongJson['item']['album']['images'][0]['url']
def displaySongs():
while True:
try:
curSong = requests.get(endpoint, headers=spotifyHeaders)
curSongJson = curSong.json()
break
except:
print("Please start listening to a song")
time.sleep(2)
with open('CurrentSong.png','wb+') as SongImage:
response = requests.get(GrabCurrentSongImage(curSongJson))
SongImage.write(response.content)
currentSong = GrabSpotifyCurSong(curSongJson)
currentArtist = GrabSpotifyCurArtist(curSongJson)
img = tkinter.PhotoImage(file="CurrentSong.png")
imageLabel = tkinter.Label(window,image=img)
# songLabel['text'] = f'{currentArtist} - {currentSong}'
# songLabel.place(height=400,width=400)
print(f'{currentArtist} - {currentSong}')
window.after(2500,displaySongs)
displaySongs()
window.mainloop()
Images with tkinter has to be PhotoImage instances, here it is just a string of location of the image and tkinter does not understand that. Furthermore, tkinter.PhotoImage does not recognize JPEG format, so you have to convert it to PNG or use PIL.ImageTk.PhotoImage to use JPEG.
For JPEG and other formats too:
First pip install Pillow and then:
import tkinter
from PIL import Image, ImageTk
....
img = ImageTk.PhotoImage(Image.open("CurrentSong.jpg"))
imageLabel = tkinter.Label(window,image=img)
Adding further here, you can also use ImageTk.PhotoImage(file="CurrentSong.jpg") but that will remove the flexibility that you could get if you want to, say, resize or do some filters to your image. If not, then use that.
For GIF, PGM, PPM, and PNG:
img = tkinter.PhotoImage(file="CurrentSong.png")
imageLabel = tkinter.Label(window,image=img)
Also note that if these are inside function you have to keep reference to the object to avoid it being collected by the gc after the function finishes running.
Related
Title, it returns the error "_tkinter.TclError: image "pyimage3" doesn't exist" I have tried changing the root = tk.Tk
() to root = tk.TopLevel() as suggested by previous users who received the same error.
The script goes through a list of image files on a CSV file, the idea is to create a slideshow and the CSV file saves the formats of the different slides.
The "iam" if statement refers to an "image and message" slide which is what's currently drawing the errors. I have tried opening the image using the same calls as the regular image slide type but it crashes in a new and unique way every time.
I can post more information as needed but if anybody has any ideas as to how I could fix this I would love to hear them.
# import required modules
import tkinter as tk
from tkinter import *
from PIL import Image
from PIL import ImageTk
import pandas as pd
import datetime
import display_save
# Initialize tkinter window
root = tk.Tk()
# Retreive data table
frame = pd.read_csv("data.csv")
# Establish variables
show_size = len(frame.index)
img = ImageTk.PhotoImage(Image.open("teapot.png"))
bg_img = ImageTk.PhotoImage(Image.open("teapot.png"))
time_step = 1
# Initialize image label as empty
img_lbl = Label(root)
img_lbl.pack()
# Initialize text label
txt_lbl=Label(root)
txt_lbl.pack()
img_txt_lbl = Label(root)
img_txt_lbl.pack()
# Keypress event management
res_lbl = Label(root)
def keypress(event):
global res_lbl
if(event.char == "f"):
root.attributes('-fullscreen', False)
elif(event.char == "r"):
res_lbl.pack()
res_lbl.config(text= str(root.winfo_width()) + " x " + str(root.winfo_height()))
def keyrelease(event):
global res_lbl
if (event.char == "f"):
root.attributes('-fullscreen', True)
elif (event.char == "r"):
res_lbl.pack_forget()
# bind key events
root.bind("<KeyPress>",keypress)
root.bind("<KeyRelease>",keyrelease)
x = 0
# Function to rotate images
def runtime():
global x
global img
global img_lbl
global txt_lbl
global img_txt_lbl
global bg_img
if(x <= show_size):
df = pd.read_csv('data.csv')
df = df.iloc[[x - 1]]
t = df.iloc[0]['type']
date_remv = df.iloc[0]['date_remove']
# If type is image, initialize
if(t == "img"):
img_lbl.pack()
txt_lbl.pack_forget()
img_txt_lbl.pack_forget()
root.config(bg='white')
p = df.iloc[0]['data']
temp = Image.open(p)
temp = temp.resize((root.winfo_width(), root.winfo_height()))
img = ImageTk.PhotoImage(temp)
# If type is message, initialize
elif (t == "msg"):
txt_lbl.pack()
img_lbl.pack_forget()
img_txt_lbl.pack_forget()
m = df.iloc[0]['data']
c = df.iloc[0]['data2']
txt_lbl.config(bg =c, text=m, anchor=CENTER, height=20, wraplength=1000, font=("Arial", 50))
root.config(bg=c)
# If type is an image and a message, initialize
elif (t == "iam"):
img_txt_lbl.pack()
txt_lbl.pack_forget()
img_lbl.pack_forget()
p = df.iloc[0]['data']
temp = Image.open("teapot.png")
temp = temp.resize((root.winfo_screenwidth(), root.winfo_screenheight()))
temp = ImageTk.PhotoImage(temp)
bg_img = temp
m = df.iloc[0]['data2']
img_txt_lbl.config(text=m, height=root.winfo_screenheight(), width=root.winfo_screenwidth(), wraplength=1000, font=("Arial", 50), compound='center')
root.config(bg='white')
# Check to make sure the slides list is up-to date
if(datetime.datetime.strptime(date_remv, display_save.format) <= datetime.datetime.now()):
index = df.iloc[0]['id']
display_save.delete_row(index)
root.after(time_step * 1000, runtime)
else:
x = 0
root.after(0, runtime)
x = x + 1
img_lbl.config(image=img)
img_txt_lbl.config(image=bg_img)
runtime()
root.attributes('-fullscreen', True)
root.mainloop()
Whenever you create a new image using ImageTk.PhotoImage() you have to make sure that image variable stays during the entire time your code runs. This means for example if you create the first image in a variable img then you must not use the SAME variable for a new image because once the old image is rewritten it's gone. So the trick here is to store the old image somewhere where it doesn't change. To do this simply append the old image to a new list where you can store all PHOTOIMAGEs as a memory. I recently answered the same type of question here and there might be a better explanation: How to create multiple images in tkinter that can scale.
I haven't tested if this works but I believe it should work.
I was able to get the current image from a Thorlabs uc480 camera using instrumental. My issue is when I try to adjust the parameters for grab_image. I can change cx and left to any value and get an image. But cy and top only works if cy=600 and top=300. The purpose is to create a GUI so that the user can select values for these parameters to zoom in/out an image.
Here is my code
import instrumental
from instrumental.drivers.cameras import uc480
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
paramsets = instrumental.list_instruments()
cammer = instrumental.instrument(paramsets[0])
plt.figure()
framer= cammer.grab_image(timeout='1s',copy=True,n_frames=1,exposure_time='5ms',cx=640,
left=10,cy=600,top=300)
plt.pcolormesh(framer)
The above code does not give an image if I choose cy=600 and top=10. Are there any particular value set to be used for these parameters? How can I get an image of the full sensor size?
Thorlabs has a Python programming interface available as a download on their website. It is very well documented, and can be installed locally via pip.
Link:
https://www.thorlabs.com/software_pages/ViewSoftwarePage.cfm?Code=ThorCam
Here is an example of a simple capture algorithm that might help get you started:
from thorlabs_tsi_sdk.tl_camera import TLCameraSDK
from thorlabs_tsi_sdk.tl_mono_to_color_processor import MonoToColorProcessorSDK
from thorlabs_tsi_sdk.tl_camera_enums import SENSOR_TYPE
# open the TLCameraSDK dll
with TLCameraSDK() as sdk:
cameras = sdk.discover_available_cameras()
if len(cameras) == 0:
print("Error: no cameras detected!")
with sdk.open_camera(cameras[0]) as camera:
#camera.disarm() # ensure any previous session is closed
# setup the camera for continuous acquisition
camera.frames_per_trigger_zero_for_unlimited = 0
camera.image_poll_timeout_ms = 2000 # 2 second timeout
camera.arm(2)
# need to save the image width and height for color processing
image_width = camera.image_width_pixels
image_height = camera.image_height_pixels
# initialize a mono to color processor if this is a color camera
is_color_camera = (camera.camera_sensor_type == SENSOR_TYPE.BAYER)
mono_to_color_sdk = None
mono_to_color_processor = None
if is_color_camera:
mono_to_color_sdk = MonoToColorProcessorSDK()
mono_to_color_processor = mono_to_color_sdk.create_mono_to_color_processor(
camera.camera_sensor_type,
camera.color_filter_array_phase,
camera.get_color_correction_matrix(),
camera.get_default_white_balance_matrix(),
camera.bit_depth
)
# begin acquisition
camera.issue_software_trigger()
# get the next frame
frame = camera.get_pending_frame_or_null()
# initialize frame attempts and max limit
frame_attempts = 0
max_attempts = 10
# if frame is null, try to get a frame until
# successful or until max_attempts is reached
if frame is None:
while frame is None:
frame = camera.get_pending_frame_or_null()
frame_attempts += 1
if frame_attempts == max_attempts:
raise TimeoutError("Timeout was reached while polling for a frame, program will now exit")
image_data = frame.image_buffer
if is_color_camera:
# transform the raw image data into RGB color data
color_data = mono_to_color_processor.transform_to_24(image_data, image_width, image_height)
save_data = np.reshape(color_data,(image_height, image_width,3))
camera.disarm()
You can also process the image after capture with the PIL library.
Introduction
I am trying to make a small tool for classifying images using the ipywidgets in a Jupyter Notebook, but I am having some trouble aligning the classes and the images. Do you have any suggestion how to fix this.
What I did
import ipywidgets as widgets
from IPython.display import display
import glob
# My images
image_paths = glob.glob("./images/*.png")
# Display image
def display_image(path):
file = open(path, "rb")
image = file.read()
return widgets.Image(
value=image,
format='png',
width=700,
height=700,
)
# Dropdown
def create_dropdown():
return widgets.Dropdown(
options=["1","2","3","4","5","6","7","8","9","10"],
value='5',
description='Category:',
disabled=False
)
# Creating widgets
input_dropdown = create_dropdown()
button = widgets.Button(description="Submit")
output_image = widgets.Image()
output_image.value = display_image(image_paths[-1]).value
# Define function to bind value of the input to the output variable
def bind_input_to_output(sender):
image_path = image_paths[-1]
image_score = input_dropdown.value
next_image_path = image_paths.pop()
print(image_score, image_path)
output_image.value = display_image(next_image_path).value
# Tell the text input widget to call bind_input_to_output() on submit
button.on_click(bind_input_to_output)
# Displaying widgets
display(output_image, input_dropdown, button)
Results
With the above code I end up categorising the upcoming picture, but I really don't understand why. It seems the widgets does not update the image the first time I press the button.
def bind_input_to_output(sender):
image_path = image_paths.pop()
image_score = input_dropdown.value
next_image_path = image_paths[-1]
print(image_score, image_path)
output_image.value = display_image(next_image_path).value
pop first and give next filename at last item
Sample Image
Hello,
I created an application in python that select the Region of Interest(ROI) of an image, record and label it. But I has a limit of one ROI per image, anyone know how to have multiple selection of ROI per image? Also on attached image, as you can see I have multiple window, I want it to be in one window with different options, what packages are use on this kind of application.
here's my code in python using opencv2. Thank you in advance for the help
for image in filelist:
img = cv2.imread(image)
fromCenter = False
r = cv2.selectROI(img, fromCenter)
lbl = simpledialog.askstring("Image Label", "Please Enter Label")
result = eTree.SubElement(results, "Image")
path = eTree.SubElement(result, 'Path')
roi = eTree.SubElement(result, 'ROI')
label = eTree.SubElement(result, 'Label')
path.text = str(image)
roi.text = str(r)
label.text = str(lbl)
tree = eTree.ElementTree(results)
i = i + 1
if i == count:
format = [('XML Files', '*.xml'), ('All Files', '*.*')]
save = filedialog.asksaveasfilename(filetype=format, defaultextension='*.xml')
tree.write(save, xml_declaration=True, encoding='utf-8', method="xml")
Well at least for the first part of the question, have you considered to try the cv2.createROIs() instead of cv2.createROI() ? When the image window is opened you then select your first ROI and press enter, then the second and press enter etc. And when you are finished then press the escape key. It returns x,y,w,h of each ROI. Note that you will have to change your code accordingly but it will allow you to select multiple ROI.
Input image:
Example:
import cv2
img = cv2.imread('rois.png')
fromCenter = False
ROIs = cv2.selectROIs('Select ROIs', img, fromCenter)
ROI_1 = img[ROIs[0][1]:ROIs[0][1]+ROIs[0][3], ROIs[0][0]:ROIs[0][0]+ROIs[0][2]]
ROI_2 = img[ROIs[1][1]:ROIs[1][1]+ROIs[1][3], ROIs[1][0]:ROIs[1][0]+ROIs[1][2]]
ROI_3 = img[ROIs[2][1]:ROIs[2][1]+ROIs[2][3], ROIs[2][0]:ROIs[2][0]+ROIs[2][2]]
cv2.imshow('1', ROI_1)
cv2.imshow('2', ROI_2)
cv2.imshow('3', ROI_3)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
For custom ROI you can use EasyROI. It supports rectangle, line, circle and polygon.
For using it:
pip install EasyROI
from EasyROI import EasyROI
roi_helper = EasyROI()
roi = roi_helper.draw_rectangle(frame, quantity=2)
I'm trying to save a captured 640x480 RGB image with NAO's front camera to my computer. I'm using python and PIL to do so. Unfortunately, the image just won't save on my computer, no matter what image type or path I use for the parameters of the Image.save()- Method. the image created with PIL contains valid RGB-information though. Here's my code sample from choregraphe:
import Image
def onInput_onStart(self):
cam_input = ALProxy("ALVideoDevice")
nameId = cam_input.subscribeCamera("Test_Cam", 1, 2, 13, 20)
image = cam_input.getImageRemote(nameId) #captures an image
w = image[0] #get the image width
h = image[1] #get the image height
pixel_array = image[6] #contains the image data
result = Image.fromstring("RGB", (w, h), pixel_array)
#the following line doesnt work
result.save("C:\Users\Claudia\Desktop\NAO\Bilder\test.png", "PNG")
cam_input.releaseImage(nameId)
cam_input.unsubscribe(nameId)
pass
Thank you so much for your help in advance!
- a frustrated student
In the comment, you say the code is pasted from choregraphe, so I guess you launch it using choregraphe.
If so, then the code is injected into your robot then started.
So your image is saved to the NAO hard drive and I guess your robot doesn't have a folder named: "C:\Users\Claudia\Desktop\NAO\Bilder\test.png".
So change the path to "/home/nao/test.png", start your code, then log into your NAO using putty or browse folder using winscp (as it looks like you're using windows).
And you should see your image-file.
In order for your code to run correctly it needs to be properly indented. Your code should look like this:
import Image
def onInput_onStart(self):
cam_input = ALProxy("ALVideoDevice")
nameId = cam_input.subscribeCamera("Test_Cam", 1, 2, 13, 20)
image = cam_input.getImageRemote(nameId) #captures an image
w = image[0] #get the image width
h = image[1] #get the image height
pixel_array = image[6] #contains the image data
...
Make sure to indent everything that's inside the def onInput_onStart(self): method.
Sorry for the late response, but it maybe helpful for someone. You should try it with naoqi. Here is the documentation for retriving images
http://doc.aldebaran.com/2-4/dev/python/examples/vision/get_image.html
The original code was not working for me so I made some tweeks.
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="nao.local.",
help="Robot IP address. On robot or Local Naoqi: use
'nao.local.'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
pass
"""
First get an image, then show it on the screen with PIL.
"""
# Get the service ALVideoDevice.
video_service = session.service("ALVideoDevice")
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = video_service.subscribe("python_client",0,3,13,1)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = video_service.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
print ("acquisition delay ", t1 - t0)
#video_service.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
image_string = str(bytearray(array))
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), image_string)
# Save the image.
im.save("C:\\Users\\Lenovo\\Desktop\\PROJEKTI\\python2-
connect4\\camImage.png", "PNG")
Be careful to use Python 2.7.
The code runs on your computer not the NAO robot!