Why do I get two different results from the same code? - python

I'm wondering why I am getting two different results from this code that I am using from github. If I run the compiled package on my cell phone, I get a barcode readout of 991245243. However if I use my computer, the readout is 0991245243. Can anyone explain why?
What I am trying to do is have the results from the barcode scanner check against a spreadsheet and then return the results from the adjacent cells. I want to look for exact matches because it could be catastrophic if multiple results are found and they conflict.
# Kivy OpenCV Barcode Scanner
# done by Vijeth P H
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from pyzbar import pyzbar
import webbrowser
import cv2
# Create global variables, for storing and displaying barcodes
outputtext=''
weblink=''
leb=Label(text=outputtext,size_hint_y=None,height='48dp',font_size='45dp')
found = set() # this will not allow duplicate barcode scans to be stored
togglflag=True
class MainScreen(BoxLayout):
# first screen that is displayed when program is run
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='vertical' # vertical placing of widgets
self.cam=cv2.VideoCapture(0) # start OpenCV camera
self.cam.set(3,1280) # set resolution of camera
self.cam.set(4,720)
self.img=Image() # Image widget to display frames
# create Toggle Button for pause and play of video stream
self.togbut=ToggleButton(text='Pause',group='camstart',state='down',size_hint_y=None,height='48dp',on_press=self.change_state)
self.but=Button(text='Stop',size_hint_y=None,height='48dp',on_press=self.stop_stream)
self.add_widget(self.img)
self.add_widget(self.togbut)
self.add_widget(self.but)
Clock.schedule_interval(self.update,1.0/30) # update for 30fps
# update frame of OpenCV camera
def update(self,dt):
if togglflag:
ret, frame = self.cam.read() # retrieve frames from OpenCV camera
if ret:
buf1=cv2.flip(frame,0) # convert it into texture
buf=buf1.tostring()
image_texture=Texture.create(size=(frame.shape[1],frame.shape[0]),colorfmt='bgr')
image_texture.blit_buffer(buf,colorfmt='bgr',bufferfmt='ubyte')
self.img.texture=image_texture # display image from the texture
barcodes = pyzbar.decode(frame) # detect barcode from image
for barcode in barcodes:
(x, y, w, h) = barcode.rect
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
weblink=barcodeData
text = "{} ({})".format(barcodeData, barcodeType)
cv2.putText(frame, text, (x, y - 10),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
if barcodeData not in found: # check if detected barcode is a duplicate
outputtext=text
leb.text=outputtext # display the barcode details
found.add(barcodeData)
self.change_screen()
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
cv2.destroyAllWindows()
exit(0)
# change state of toggle button
def change_state(self,*args):
global togglflag
if togglflag:
self.togbut.text='Play'
togglflag=False
else:
self.togbut.text='Pause'
togglflag=True
def stop_stream(self,*args):
self.cam.release() # stop camera
def change_screen(self,*args):
main_app.sm.current='second' # once barcode is detected, switch to second screen
class SecondScreen(BoxLayout):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.orientation='vertical'
self.lab1=Label(text='Output: ',size_hint_y=None,height='48dp',font_size='45dp')
self.but1=Button(text='Open in Web Browser',on_press=self.open_browser,size_hint_y=None,height='48dp')
self.add_widget(self.lab1)
self.add_widget(leb)
self.add_widget(self.but1)
def open_browser(self,*args):
webbrowser.open(weblink) # this opens link in browser
class TestApp(App):
def build(self):
self.sm=ScreenManager() # screenmanager is used to manage screens
self.mainsc=MainScreen()
scrn=Screen(name='main')
scrn.add_widget(self.mainsc)
self.sm.add_widget(scrn)
self.secondsc=SecondScreen()
scrn=Screen(name='second')
scrn.add_widget(self.secondsc)
self.sm.add_widget(scrn)
return self.sm
if __name__ == '__main__':
main_app=TestApp()
main_app.run()
cv2.destroyAllWindows()
Here is a picture as requested.

As the behaviors of the reader on the two platforms differ, and the decoding is wrong, one can suspect a bug in the reader that causes access to uninitialized memory. (If the problem was just a misread, not a bug, the texts would be strictly identical.)
Or can it be that the text display clips off the first character on the phone ?
Update:
The new information that on one platform the code is said to be an EAN13 and on the other a UPC-A explains the extra 0. (You should have said it upfront.) The two barcode standards require a slight difference in the way the number is reported.
But this does not explain why the code is completely wrong and why different symbologies are reported. (Maybe the default settings are different on the two patforms.)

Related

Resize video frame to fit tkinter window size when the window is resized

I've written a piece of code to play a video using OpenCV on tkinter. Its part of a game I've made and compiled into an application. But I've noticed that when I play the game in a different computers, since the screen sizes are different, the video doesn't fit exactly to screen size like I want it to. The same goes for the background images I used in different pages but I wrote a piece of code to resize the background images to screen size. Here it is:
def resizeimage(self,event) :
width, height = self.winfo_width(), self.winfo_height()
image = self.bg_image.resize((width,height))
self.image1 = ImageTk.PhotoImage(image)
self.bg_label.config(image = self.image1)
I've bound this function to the label that displays the background image like this:
self.bg_image = Image.open("project_pics\\start_pg.png")
bg_image = ImageTk.PhotoImage(self.bg_image)
self.bg_label = Label(self,image=bg_image)
self.bg_label.image = bg_image
self.bg_label.bind('<Configure>',self.resizeimage)
self.bg_label.grid(sticky="nwse")
here, self.bg_image is the image to be displayed as background and self.bg_label is the label that displays the image.
I know that I can implement something similar by resizing the frames, in my code to play the video, but I cant seem to figure out a quick, efficient a way to do so. Here is the code for the video player:
from tkinter import *
from tkinter.ttk import Button
from PIL import Image, ImageTk
import time
import cv2 as cv2
from threading import Thread
from Scripts.music_player import m_player
from Scripts.styles import Styles
# The Video Player
class VideoPlayer :
def __init__(self,parent) :
self.parent = parent
self.play = False
def player(self,vid_file,m_file,nxt_func):
def get_frame():
ret,frame = vid.read()
if ret and self.play :
return(ret,cv2.cvtColor(frame,cv2.COLOR_BGR2RGB))
else :
return(ret,None)
def update() :
ret,frame = get_frame()
if ret and self.play :
img = Image.fromarray(frame)
photo = ImageTk.PhotoImage(image=img)
photo.image=img
self.canvas.itemconfig(self.vid_frame,image=photo)
self.canvas.image=photo
self.parent.after(delay,lambda : update())
else :
time.sleep(0.01)
# stopping vid_music and starting game music
m_player.music_control(m_file,True,-1,0)
m_player.music_control("project_media\\signal.ogg",False,-1,0)
nxt_func()
def skip() :
self.play = False
self.parent.clear()
self.play = True
# starting music
m_player.music_control("project_media\\signal.ogg",True,-1,0)
m_player.music_control(m_file,False,-1,0)
vid = cv2.VideoCapture(vid_file)
width = vid.get(cv2.CAP_PROP_FRAME_WIDTH)
height = vid.get(cv2.CAP_PROP_FRAME_HEIGHT)
self.canvas = Canvas(self.parent, width = width, height = height)
self.canvas.place(relx=0.5,rely=0.5,anchor=CENTER)
self.vid_frame = self.canvas.create_image(0, 0, anchor = NW)
# Skip button
if vid_file != "project_media\\glitch.mp4" :
skip_thread = Thread(target=skip)
skip = Button(self.parent,text="Skip",command=skip_thread.start,style="skip.TButton")
skip.place(relx=0.88,rely=0.04)
delay = 5
update()
My question is this. How could I efficiently resize my frames to fit screen size without slowing down execution?. Also, the function I'm using right now to resize my background images also seems to be slowing down execution. So I can see something like a glitch on the screen every time I change pages. So is there any other way I can resize my background images. Sorry if the code is a bit messy. I'm a beginner and this is the first game I've made .

Click screenshot of part of the screen using python kivy

At the moment the below code shows a camera layout using laptop webcam just fine - I want to show a rectangle frame within the camera window - user will hold a book aligned to the frame and I need to capture an image of the book i.e capture image of the part within the frame. I am struggling to
Show a transparent rectangle as a frame (this is inside a boxlayout inside a camera floatlayout)
Grab an image of only the part within the frame
There is a button below the camera layout at click of which the image will be saved to a folder on the machine
Please can somebody guide me how to proceed and whether this can be achieved in any other way using any other module
import kivy
from PIL import ImageGrab
from kivy.uix.boxlayout import BoxLayout
from numpy import shape
kivy.require('1.7.2')
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.camera import Camera
from kivy.uix.button import Button
from kivy.core.window import Window
from kivy.graphics import Color, Rectangle
class CamApp(App):
# Function to take a screenshot
def screengrab(self, *largs):
im2 = ImageGrab.grab(bbox=None)
im2.show()
#outname = self.fileprefix + '_%(counter)04d.png'
#Window.screenshot(name=outname)
def build(self):
# create a floating layout as base
camlayout = FloatLayout(size=(600, 600))
cam = Camera() # Get the camera
cam = Camera(resolution=(1024, 1024), size=(300, 300))
cam.play = True # Start the camera
camlayout.add_widget(cam)
boxlayout = BoxLayout(id='imageBox', size_hint=[0.5, 0.7], pos_hint={'center_x': .5, 'center_y': .5})
boxlayout.bind(size=self.update_rect, pos=self.update_rect)
with boxlayout.canvas.before:
#Color(0, 1, 0, 1) # green; colors range from 0-1 instead of 0-255
self.rect = Rectangle(size=boxlayout.size,
pos=boxlayout.pos, outline='black')
camlayout.add_widget(boxlayout)
button = Button(text='Take Picture', size_hint=(0.12, 0.12))
button.bind(on_press=self.screengrab)
camlayout.add_widget(button) # Add button to Camera Layout
self.fileprefix = 'snap'
return camlayout
def update_rect(self,instance, value):
self.rect.pos = instance.pos
self.rect.size = instance.size
if __name__ == '__main__':
CamApp().run()
Click to see output
In kivy, the Color() function takes arguments representing RGBA. That means the last argument is alpha, and passing it a float will make it transparent.
So while Color(0, 1, 0, 1) is a solid color, Color(0, 1, 0, .5) will be semitransparent.
As for grabbing an image of only part of the frame, you might consider grabbing the whole frame, then using PIL to crop the image with
from PIL import Image
im = Image.open(r"C:\path\to\picture\my_screenshot.png")
im.crop((left, top, right, bottom))
Just replace left, top, right, and bottom with the dimensions you want to cut out.

Kivy segmentation fault on blitting opencv picture

I am trying to cast video from my external cam, that is captured through some aliexpress EasyCap, to my kivy app. One issue I've faced is that crashes with segmentation fault on trying to
texture = Texture.create(size=(frame.shape[0], frame.shape[1]))
I've found that the problem is on kivy's side. It sometimes can't create NPOT textures. So, I've changed it to POT shape and copied what is possible to another numpy array.
flipped = cv2.flip(frame, 0)
buf = np.zeros((512, 512, 3), dtype=np.uint8)
for i in range(min(frame.shape[0], 512)):
for j in range(min(frame.shape[1], 512)):
buf[i, j] = flipped[i, j]
buf = buf.tostring()
texture = Texture.create(size=(512, 512))
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.texture = texture
But it still crashes with the same old segmentation fault on the following line:
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
If it is relevant, cv2.imshow("image", buf) before buf.tostring() show the image correctly.
Here's the original code:
from kivy.app import App
from kivy.uix.image import Image
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
import threading
from time import sleep
import numpy as np
class KivyCamera(Image):
def __init__(self, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.fps = 30
self.capture = cv2.VideoCature(0)
threading.Thread(target=self.update).start()
def update(self):
while True:
ret, frame = self.capture.read()
if ret:
buf = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[0], frame.shape[1])
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.texture = texture
sleep(1.0 / self.fps)
class CamApp(App):
def build(self):
return KivyCamera()
if __name__ == "__main__":
CamApp().run()
Few issues with your code:
The GUI updates should always be done in the mainthread as indicated by John Anderson. Hence instead of separate thread, the update() function should be called through Clock schedule.
The size tuple in the Texture.create statement should be reversed and the colorfmt="bgr" should be added. It should be: texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr").
There is no layout defined. Layouts like BoxLayout should be added as a placeholder of Image widget.
Below is the modified working version of your code:
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.graphics.texture import Texture
import cv2
class KivyCamera(BoxLayout):
def __init__(self, **kwargs):
super(KivyCamera, self).__init__(**kwargs)
self.img1=Image()
self.add_widget(self.img1)
self.capture = cv2.VideoCapture(0)
Clock.schedule_interval(self.update, 1.0/33.0)
def update(self, *args):
ret, frame = self.capture.read()
if ret:
buf = cv2.flip(frame, 0).tostring()
texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt="bgr")
texture.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
self.img1.texture = texture
class CamApp(App):
def build(self):
return KivyCamera()
if __name__ == "__main__":
CamApp().run()
The while loop is removed and Clock.schedule_interval is used with 1 / 33. steps.
Hope this helps.
Not well documented, but I believe the Texture manipulation (specifically the blit_buffer()) must be done on the main thread. Your update() method is being run in another thread, so use Clock.schedule_once() to call a method that does the Texture manipulation (and the self.texture = texture) back in the main thread.

Kivy-python: duplicate image on texture

I am developing app in python 3.6 with kivy.
I'd like to display an image saved as numpy array.
I wrote this code:
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.widget import Widget
from kivy.graphics.texture import Texture
import cv2
class Test(Widget):
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
img = cv2.imread(r'./kulki.jpg', cv2.IMREAD_GRAYSCALE)
w, h = img.shape
texture = Texture.create(size=(h, w))
texture.blit_buffer(img.flatten(), colorfmt='rgb', bufferfmt='ubyte')
w_img = Image(size=(w, h), texture=texture)
self.add_widget(w_img)
class DemoApp(App):
def build(self):
return Test()
if __name__ == '__main__':
DemoApp().run()
and this is my output:
for this image:
Does anybody know why there are several of the same pictures instead of one? Anw why do I have to change dimensions in places (w,h) -> (h,w)?
Best regards!
I think the problem is that you are converting the image to grayscale when you read it, then your are using rgb for the Texture color format. If you make those two agree, then your code will work. For example, change:
texture.blit_buffer(img.flatten(), colorfmt='rgb', bufferfmt='ubyte')
to:
texture.blit_buffer(img.flatten(), colorfmt='luminance', bufferfmt='ubyte')

showing video on the entire screen using OpenCV and Tkiner

I'm trying to create a GUI for playing a video that fills up the entire screen, while the button for Snapshot is still visible at the bottom.
Right now, What i manage to do is just set the app window itself to fullscreen, resulting a small sized video playing at the top and a huge "snapshot" button at the button.
Is there a way to make the video fill up the entire screen?
thanks!
from PIL import Image, ImageTk
import Tkinter as tk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('Cat Walking.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(padx=10, pady=10)
# create a button, that when pressed, will take the current frame and save it to file
btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
btn.pack(fill="both", expand=True, padx=10, pady=10)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
self.current_image = Image.fromarray(cv2image) # convert image for PIL
imgtk = ImageTk.PhotoImage(image=self.current_image) # convert image for tkinter
self.panel.imgtk = imgtk # anchor imgtk so it does not be deleted by garbage-collector
self.root.attributes("-fullscreen",True)
#self.oot.wm_state('zoomed')
self.panel.config(image=imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
def take_snapshot(self):
""" Take snapshot and save it to the file """
ts = datetime.datetime.now() # grab the current timestamp
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S")) # construct filename
p = os.path.join(self.output_path, filename) # construct output path
self.current_image.save(p, "JPEG") # save image as jpeg file
print("[INFO] saved {}".format(filename))
def destructor(self):
""" Destroy the root object and release all resources """
print("[INFO] closing...")
self.root.destroy()
self.vs.release() # release web camera
cv2.destroyAllWindows() # it is not mandatory in this application
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", default="./",
help="path to output directory to store snapshots (default: current folder")
args = vars(ap.parse_args())
# start the app
print("[INFO] starting...")
pba = Application(args["output"])
pba.root.mainloop()
It's not a hard task if you don't care about execution time! We knew that resizing of an image isn't a rocket science for common user, but under the hood it takes some time to resize each frame. And if you really wonder about time and options - there're many options to play around from numpy/scipy to skimage/skvideo.
But let's try to do something with your code "as is" so we have two options to play with: cv2 and Image. For testing I grabbed 20 secs of "Keyboard Cat" video from youtube (480p) and resize each frame upto 1080p, and GUI looks like this (fullscreen 1920x1080):
Resize Methods / timeit elapsed time of showing frames:
cv2.resize() / ~81.377 s.
Image.resize() / ~82.98 s.
As you see - no big difference between theese two so here's a code (only Application class and video_loop changed):
#imports
try:
import tkinter as tk
except:
import Tkinter as tk
from PIL import Image, ImageTk
import argparse
import datetime
import cv2
import os
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
self.output_path = output_path # store output path
self.current_image = None # current image from the camera
self.root = tk.Tk() # initialize root window
self.root.title("PyImageSearch PhotoBooth") # set window title
# self.destructor function gets fired when the window is closed
self.root.protocol('WM_DELETE_WINDOW', self.destructor)
self.root.attributes("-fullscreen", True)
# getting size to resize! 30 - space for button
self.size = (self.root.winfo_screenwidth(), self.root.winfo_screenheight() - 30)
self.panel = tk.Label(self.root) # initialize image panel
self.panel.pack(fill='both', expand=True)
# create a button, that when pressed, will take the current frame and save it to file
self.btn = tk.Button(self.root, text="Snapshot!", command=self.take_snapshot)
self.btn.pack(fill='x', expand=True)
# start a self.video_loop that constantly pools the video sensor
# for the most recently read frame
self.video_loop()
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
self.current_image = Image.fromarray(cv2image) #.resize(self.size, resample=Image.NEAREST) # convert image for PIL
self.panel.imgtk = ImageTk.PhotoImage(image=self.current_image)
self.panel.config(image=self.panel.imgtk) # show the image
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
But you knew - do such a things "on fly" isn't a good idea, so lets try to resize all frames first and then do all stuff(only Application class and video_loop method changed, resize_video method added):
class Application:
def __init__(self, output_path = "./"):
""" Initialize application which uses OpenCV + Tkinter. It displays
a video stream in a Tkinter window and stores current snapshot on disk """
self.vs = cv2.VideoCapture('KeyCat.mp4') # capture video frames, 0 is your default video camera
...
# init frames
self.frames = self.resize_video()
self.video_loop()
def resize_video(self):
temp = list()
try:
temp_count_const = cv2.CAP_PROP_FRAME_COUNT
except AttributeError:
temp_count_const = cv2.cv.CV_CAP_PROP_FRAME_COUNT
frames_count = self.vs.get(temp_count_const)
while self.vs.isOpened():
ok, frame = self.vs.read() # read frame from video stream
if ok: # frame captured without any errors
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA) # convert colors from BGR to RGBA
cv2image = cv2.resize(cv2image, self.size, interpolation=cv2.INTER_NEAREST)
cv2image = Image.fromarray(cv2image) # convert image for PIL
temp.append(cv2image)
# simple progress print w/o sys import
print('%d/%d\t%d%%' % (len(temp), frames_count, ((len(temp)/frames_count)*100)))
else:
return temp
def video_loop(self):
""" Get frame from the video stream and show it in Tkinter """
if len(self.frames) != 0:
self.current_image = self.frames.pop(0)
self.panel.imgtk = ImageTk.PhotoImage(self.current_image)
self.panel.config(image=self.panel.imgtk)
self.root.after(1, self.video_loop) # call the same function after 30 milliseconds
timeit elapsed time of showing pre-resized frames: ~78.78 s.
As you see - resizing isn't a main problem of your script, but a good option!

Categories