I am trying to recognize six digits from a meter using python-OpenCV. It's surprising how incredibly hard it is to set morphological operations working in the right way, given the time I have spent adjusting the focus/distance of my raspberry pi camera to the meter screen and I even have bought a separate led lamp to have as much uniform light as possible. This is a template image
and I've tried using and adjusting the code from these two sources: enter link description here and enter link description here reproduced below without any progress. I got stuck right at the start when setting the thresholding options. Thank you in advance for any help.
# Code 1
import cv2
import numpy as np
import pytesseract
# Load the image
img = cv2.imread("test.jpg")
# Color-segmentation to get binary mask
lwr = np.array([43, 0, 71])
upr = np.array([103, 255, 130])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
msk = cv2.inRange(hsv, lwr, upr)
cv2.imwrite("msk.png", msk)
# Extract digits
krn = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 3))
dlt = cv2.dilate(msk, krn, iterations=5)
res = 255 - cv2.bitwise_and(dlt, msk)
cv2.imwrite("res.png", res)
# Displaying digits and OCR
txt = pytesseract.image_to_string(res, config="--psm 6 digits")
print(''.join(t for t in txt if t.isalnum()))
cv2.imshow("res", res)
cv2.waitKey(0)
cv2.destroyAllWindows()
# code 2
# https://pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
# from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
import numpy as np
from numpy.linalg import norm
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(1, 0, 1, 0, 1, 0, 1): 1,
(1, 0, 1, 1, 1, 0, 1): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 1, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
images = 'test.jpg'
image = cv2.imread(images, 1)
# pre-process the image by resizing it, converting it to
# graycale, blurring it, and computing an edge map
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# gray = cv2.medianBlur(blurred, 1)
# threshold the warped image, then apply a series of morphological
# operations to cleanup the thresholded image
(T, thresh) = cv2.threshold(blurred, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cv2.imshow('thresh', thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
mask = np.zeros((image.shape[0] + 2, image.shape[1] + 2), np.uint8)
cv2.floodFill(thresh, mask, (0, 0), 0)
cv2.floodFill(thresh, mask, (image.shape[1]-1, 0), 0)
cv2.floodFill(thresh, mask, (round(image.shape[1]/2.4), 0), 0)
cv2.floodFill(thresh, mask, (image.shape[1]//2, 0), 0)
cv2.floodFill(thresh, mask, (0, image.shape[0]-1), 0)
cv2.floodFill(thresh, mask, (image.shape[1]-1, image.shape[0]-1), 0)
kernel = np.ones((2, 2), np.uint8)
thresh = cv2.erode(thresh, kernel, iterations=2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 13))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# cv2.imshow('thresh', thresh)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w <= 300 and (h >= 130 and h <= 300):
digitCnts.append(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# cv2.imshow('image', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]
digits = []
clao = 0
# loop over each of the digits
for c in digitCnts:
clao = clao + 1
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = thresh[y:y + h, x:x + w]
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC), (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if clao == 1:
if total / float(area) > 0.34:
if area < 1500:
on = [1, 0, 1, 0, 1, 0, 1]
else:
on[i] = 1
else:
if total / float(area) > 0.39:
if area < 1500:
on = [1, 0, 1, 0, 1, 0, 1]
else:
on[i] = 1
# lookup the digit and draw it on the image
digit = DIGITS_LOOKUP.get(tuple(on)) or DIGITS_LOOKUP[
min(DIGITS_LOOKUP.keys(), key=lambda key: norm(np.array(key)-np.array(on)))]
# digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
# print(digits)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(image, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
print(digits)
cv2.imshow("Input", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Update
Apologies for my late reply but I have been quite busy with work.
I have captured 22 images throughout the day and used #fmw42 code (with some amendments) to apply thresholding and morphological operations. I am making the images available here and the code that I am using is available below. Overall the performance is quite robust, although 1s and sometimes 8s get mixed up with 2s. I am happy to accept a code that provides improved performance. Note: I think that one problem is that the vertical lines of the numbers are slightly slanted? Thank you in advance.
import cv2
import numpy as np
from numpy.linalg import norm
from imutils import contours
import imutils
import os
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(1, 0, 1, 0, 1, 0, 1): 1,
(1, 0, 1, 1, 1, 0, 1): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 1, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
path_of_the_directory = "/home/myusername/mypathdirectory"
ext = ('.jpg')
for files in os.listdir(path_of_the_directory):
if files.endswith(ext):
# load image
print(files)
img = cv2.imread(path_of_the_directory+files)
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# blur
blur = cv2.GaussianBlur(gray, (0,0), sigmaX=51, sigmaY=51)
# divide
divide = cv2.divide(gray, blur, scale=255)
# threshold
thresh = cv2.threshold(divide, 235, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (41,41))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (41,41))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
morph = cv2.bitwise_not(morph) # reverse
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 70))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# write result to disk
cv2.imwrite("digits_division.jpg", divide)
cv2.imwrite("digits_threshold.jpg", thresh)
cv2.imwrite("digits_morph.jpg", morph)
# display it
cv2.imshow("divide", divide)
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(morph.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 60 and (h >= 300 and h <= 800):
digitCnts.append(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]
digits = []
clao = 0
# loop over each of the digits
for c in digitCnts:
clao = clao + 1
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = morph[y:y + h, x:x + w]
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC), (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if clao == 1:
if total / float(area) > 0.34:
if area < 1500:
on = [1, 0, 1, 0, 1, 0, 1]
else:
on[i] = 1
else:
if total / float(area) > 0.42:
if area < 1500:
on = [1, 0, 1, 0, 1, 0, 1]
else:
on[i] = 1
# lookup the digit andq draw it on the image
digit = DIGITS_LOOKUP.get(tuple(on)) or DIGITS_LOOKUP[
min(DIGITS_LOOKUP.keys(), key=lambda key: norm(np.array(key)-np.array(on)))]
# digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
# print(digits)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(img, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
print(digits)
cv2.imshow("Input", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
continue
Perhaps this will help you using division normalization in Python/OpenCV.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("digits.jpg")
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# blur
blur = cv2.GaussianBlur(gray, (0,0), sigmaX=51, sigmaY=51)
# divide
divide = cv2.divide(gray, blur, scale=255)
# threshold
thresh = cv2.threshold(divide, 235, 255, cv2.THRESH_BINARY)[1]
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# write result to disk
cv2.imwrite("digits_division.jpg", divide)
cv2.imwrite("digits_threshold.jpg", thresh)
cv2.imwrite("digits_morph.jpg", morph)
# display it
cv2.imshow("divide", divide)
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.waitKey(0)
cv2.destroyAllWindows()
Division normalized image:
Thresholded image:
Morphology processed image:
You can then clean up further by getting contours and removing small contours and very long horizontal contours.
The key to getting this working is cleaning the image up which I have done to a good enough level to get it to work. I've done this using scikit image library.
I then look at certain squares on the image and take an average reading from that area.
On the right hand-side image I've marked some of the locations with red squares.
My script I used to get this result:
import numpy as np
from pathlib import Path
import imageio.v3 as iio
import skimage.filters as skif
from skimage.color import rgb2gray
from skimage.util import img_as_ubyte
from skimage.restoration import denoise_bilateral
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import matplotlib.cm as cm
threshold = 125
digit_loc = [1600, 1300, 1000, 730, 420, 155]
size = 20
x_mid = 80
x_right = 160
y_top = 130
y_mt = 250
y_mid = 380
y_bm = 520
y_bot = 630
def img_with_threshold(orig_img):
block_size = 255
local_thresh = skif.threshold_local(
orig_img,
block_size,
method="mean",
)
binary_local = orig_img > local_thresh
u8_val = img_as_ubyte(binary_local)
return u8_val
def image_denoise(orig_img):
return denoise_bilateral(orig_img, win_size=10, bins=10, )
def plot_imgs(orig_img, mod_img):
# Display the image
fig, axes = plt.subplots(1, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(orig_img, cmap=cm.Greys_r)
ax[1].imshow(mod_img, cmap=cm.Greys_r)
# Create a Rectangle patch
for x_loc in digit_loc:
rect1 = Rectangle((x_loc + x_mid, y_top), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect2 = Rectangle((x_loc, y_mt), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect3 = Rectangle((x_loc + x_right, y_mt), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect4 = Rectangle((x_loc + x_mid, y_mid), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect5 = Rectangle((x_loc, y_bm), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect6 = Rectangle((x_loc + x_right, y_bm), size, size, linewidth=1, edgecolor='r', facecolor='none')
rect7 = Rectangle((x_loc + x_mid, y_bot), size, size, linewidth=1, edgecolor='r', facecolor='none')
# Add the patch to the Axes
ax[1].add_patch(rect1)
ax[1].add_patch(rect2)
ax[1].add_patch(rect3)
ax[1].add_patch(rect4)
ax[1].add_patch(rect5)
ax[1].add_patch(rect6)
ax[1].add_patch(rect7)
plt.show()
def seg_to_digit(segments, location):
digit_values = {0b1110111: 0,
0b0010010: 1,
0b1011101: 2,
0b1011011: 3,
0b0111010: 4,
0b1101011: 5,
0b1101111: 6,
0b1110010: 7,
0b1111111: 8,
0b1111011: 9,
}
result = int("".join(["1" if i < threshold else "0" for i in segments]), 2)
# print("score:", result)
return digit_values.get(result, 0) * 10 ** location
def get_digit(location, mod_img):
"""
a
b c
d
e f
g
"""
x_loc = digit_loc[location]
m_loc = (x_loc + x_mid, x_loc + x_mid + size)
l_loc = (x_loc, x_loc + size)
r_loc = (x_loc + x_right, x_loc + x_right + size)
seg_a = np.average(mod_img[y_top:y_top + size, m_loc[0]:m_loc[1]])
seg_b = np.average(mod_img[y_mt:y_mt + size, l_loc[0]:l_loc[1]])
seg_c = np.average(mod_img[y_mt:y_mt + size, r_loc[0]:r_loc[1]])
seg_d = np.average(mod_img[y_mid:y_mid + size, m_loc[0]:m_loc[1]])
seg_e = np.average(mod_img[y_bm:y_bm + size, l_loc[0]:l_loc[1]])
seg_f = np.average(mod_img[y_bm:y_bm + size, r_loc[0]:r_loc[1]])
seg_g = np.average(mod_img[y_bot:y_bot + size, m_loc[0]:m_loc[1]])
segments = [seg_a, seg_b, seg_c, seg_d, seg_e, seg_f, seg_g]
# print(f"x loc: {x_loc}, digit index: {location}, segment values: {segments}")
# create an integer from the bits
# print('value:', result)
return seg_to_digit(segments, location)
def main():
data_dir = Path(__file__).parent.joinpath('data')
meter_img = data_dir.joinpath('meter_test.jpg')
img = iio.imread(meter_img)
gray_img = img_as_ubyte(rgb2gray(img))
img_result = image_denoise(gray_img)
img_result1 = img_with_threshold(img_result)
reading = 0
for dig_loc in range(6):
reading += get_digit(dig_loc, img_result1)
print(f"{reading:>21}")
print("Final reading:", reading)
plot_imgs(gray_img, img_result1)
if __name__ == '__main__':
main()
This gave the following output:
7
77
677
4677
24677
924677
Final reading: 924677
Related
I am following this tutorial to recognize six digits from the following image
The threshold seems (to me) to be very good
However, when I reach the contour definition, digits 7, 1, 0 (and possibly more) are always split in two or more boxes.
By definition, a contour is a boundary of a continuous entity, which means that these digits separated by a small ligature cannot be classified as such. What to do in this case? My first instinct is to try and merge these small boxes? I have already tried to play around with the height and width of the contour with no success. The code is written below.
# https://pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
# load the example image
image = cv2.imread('DSC_01922.JPG', 1)
# pre-process the image by resizing it, converting it to
# graycale, blurring it, and computing an edge map
# image = imutils.resize(image, height=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# edged = cv2.Canny(blurred, 50, 200, 255)
# threshold the warped image, then apply a series of morphological
# operations to cleanup the thresholded image
thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 5))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
# cv2.imshow('thresh', thresh)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (h >= 90 and h <= 300):
digitCnts.append(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Update 1
Using MORPH_CLOSE instead of OPEN and enlarging the kernel as suggested by #Croolman improves the results as can be seen below
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 7))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
Note that I am doing this as a hobby and I am not familiar with/doing research on existent tools of OpenCV/python. Thank you in advance.
Update 2
This solution works.
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 15))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
threshold
contour
This is the complete answer. It required tweeking the kernel values + using morph_close
# https://pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
# load the example image
image = cv2.imread('DSC_01922.JPG', 1)
# pre-process the image by resizing it, converting it to
# graycale, blurring it, and computing an edge map
# image = imutils.resize(image, height=500)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# edged = cv2.Canny(blurred, 50, 200, 255)
# threshold the warped image, then apply a series of morphological
# operations to cleanup the thresholded image
thresh = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (1, 15))
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
cv2.imshow('thresh', thresh)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if (h >= 90 and h <= 300):
digitCnts.append(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
We can think of the shapes in the representative picture as randomly scattered pencils or sticks on a table. I've been trying to find the areas of each shape by fitting ellipses, but I haven't been able to fit ellipses properly. Can you help me? Thanks.
First image is : input image
The code that I tried,
import cv2
import numpy as np
import random as rng
import math
img = cv2.imread('sticks.png', 1)
imge= cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(imge, cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray, (2,2), 3)
rng.seed(1)
def thresh_callback(val):
threshold = val
canny_output = cv2.Canny(blur, threshold, threshold * 4)
contours, _ = cv2.findContours(canny_output, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
minRect = [None]*len(contours)
minEllipse = [None]*len(contours)
for i, c in enumerate(contours):
minRect[i] = cv2.minAreaRect(c)
if c.shape[0] > 5:
minEllipse[i] = cv2.fitEllipse(c)
(x,y),(minor_axis,major_axis),angle = minEllipse[i]
half_major= major_axis/2
half_minor= minor_axis/2
pixel= 37.795275591
half_major1= half_major/pixel
half_minor1= half_minor/pixel
area= math.pi * half_major1 * half_major1
print(area)
drawing = np.zeros((canny_output.shape[1], canny_output.shape[1], 3), dtype=np.uint8)
for i, c in enumerate(contours):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv2.drawContours(drawing, contours, i, color)
if c.shape[0] > 5:
cv2.ellipse(drawing, minEllipse[i], color, 1)
cv2.imshow('Fitting Ellips', drawing)
source_window = 'Source'
cv2.namedWindow(source_window)
cv2.imshow(source_window, img)
max_thresh = 255
thresh = 100
cv2.createTrackbar('Canny Thresh:', source_window,thresh, max_thresh, thresh_callback)
thresh_callback(thresh)
cv2.waitKey()
Second image is: expected result (fitting ellipse each line like this)
This is not the final result and definitely has errors. You need to take the time to achieve the desired result. But it can be a good idea to start with:
import sys
import cv2
import math
import numpy as np
# Check it there is a black area in specific position of an image
def checkPointArea(im, pt):
x, y = pt[0], pt[1]
return im[y, x, 0] == 0 or im[y, x+1, 0] == 0 or im[y, x-1, 0] == 0 or im[y+1, x, 0] == 0 or im[y-1, x, 0] == 0
# Load image
pth = sys.path[0]
im = cv2.imread(pth+'/im.jpg')
H, W = im.shape[:2]
# Make grayscale and black and white versions
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
bw = cv2.threshold(im, 110, 255, cv2.THRESH_BINARY)[1]
# Try to clear the parts of the image that are stuck together
bw = cv2.dilate(bw, np.ones((5, 5), np.uint8))
# Convert im back to BGR
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
# Make some copies
org = im.copy()
empty = im.copy()
empty[:] = 255
# Find contours and sort them by position
cnts, _ = cv2.findContours(bw, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts.sort(key=lambda x: cv2.boundingRect(x)[0])
# Thikness of random lines
thickness = 5
# Find and draw ellipses
for cnt in cnts:
x, y, w, h = cv2.boundingRect(cnt)
if w < W:
cv2.rectangle(im, (x, y), (x+w, y+h), (10, 230, 0)
if w < h else (200, 0, 128), 1)
hw, hh = w//2, h//2
cx, cy = x+hw, y+hh
r = int(math.sqrt(w**2+h**2))
t, c = math.atan(hw/hh), (255, 0, 0)
if checkPointArea(org, (x, y)) and checkPointArea(org, (x+w-1, y+h-1)):
t, c = math.atan(hw/-hh), (100, 0, 200)
deg = math.degrees(t)
if w <= thickness*2:
deg = 0
if h <= thickness*2:
deg = 90
cv2.ellipse(im, (x, y), (1, 1), 0, 0, 360, c, 4)
cv2.ellipse(im, (cx, cy), (thickness, r//2),
deg, 0, 360, (40, 0, 255), 2, lineType=cv2.LINE_AA)
#cv2.ellipse(empty, (x, y), (1, 1), 0, 0, 360, c, 2)
cv2.ellipse(empty, (cx, cy), (thickness, r//2),
deg, 0, 360, c, 2, lineType=cv2.LINE_AA)
# Save output
bw = cv2.cvtColor(bw, cv2.COLOR_GRAY2BGR)
top = np.hstack((org, empty))
btm = np.hstack((bw, im))
cv2.imwrite(pth+'/im_.png', np.vstack((top, btm)))
Each section:
Final Result:
Errors:
You have to spend more time for these two parts, the first is due to my weak code. Removable with more time. The second is due to the overlap of two lines. Clearing the image did not help this part. You may be able to prevent such interference from occurring later.
I'm trying to detects the numbers found in my sqares, and I thought I could use the libary pytesseract, but for some reason I read the wrong values.
This is the console output:
And here I have all my pictures (they are seperated, this is just to show them all)
import numpy as np
import cv2
import re
from PIL import Image
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
img = cv2.imread('gulRecNum.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# convert to HSV, since red and yellow are the lowest hue colors and come before green
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# create a binary thresholded image on hue between red and yellow
lower = (0,240,160)
upper = (30,255,255)
thresh = cv2.inRange(hsv, lower, upper)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
clean = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
clean = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
result1 = img.copy()
result2 = img.copy()
mask = np.zeros(result2.shape, dtype=np.uint8)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
ROI_number = 0
for c in contours:
cv2.drawContours(result1,[c],0,(0,0,0),2)
# get rotated rectangle from contour
rot_rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
# draw rotated rectangle on copy of img
cv2.drawContours(result2,[box],0,(0,0,0),2)
# Gør noget hvis arealet er større end 1.
# Whats the area of the component?
areal = cv2.contourArea(c)
if(areal > 1):
# get the center of mass
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx, cy)
print("\nx: ",cx,"\ny: ",cy)
color = (0, 0, 255)
cv2.circle(result2, center, 3, color, -1)
cv2.putText(result2, "center", (int(cx) - 10, int(cy) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 2)
# LOOK AT THIS PART
x,y,w,h = cv2.boundingRect(c)
ROI = 255 - thresh[y:y+h, x:x+w]
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI)
Number = pytesseract.image_to_string(ROI, config='--psm 13 --oem 3 -c tessedit_char_whitelist=0123456789')
print("Number ", Number)
ROI_number += 1
# save result
cv2.imwrite("4cubes_result2.png",result2)
# display result
imS = cv2.resize(result2, (600, 400))
cv2.imshow("result2", imS)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thought I could write Number = pytesseract.image_to_string(ROI, config='--psm 13 --oem 3 -c tessedit_char_whitelist=0123456789') print(Number)
and then get the number from the image, but I don't, how can that be?
EDIT NEW ERROR
how do i solve it with this picture?
from PIL import Image
from operator import itemgetter
import numpy as np
import easyocr
import cv2
import re
import imutils
import pytesseract
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
reader = easyocr.Reader(['ch_sim','en']) # need to run only once to load model into memory
#Define empty array
Cubes = []
def getNumber(ROI):
img = cv2.imread(ROI)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
#cv2.imshow(thresh)
#cv2.imshow('Thresholded original',thresh)
#cv2.waitKey(0)
## Get contours
contours,h = cv2.findContours(thresh,cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
## only draw contour that have big areas
imx = img.shape[0]
imy = img.shape[1]
lp_area = (imx * imy) / 10
tmp_img = img.copy()
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01 * cv2.arcLength(cnt, True), True)
if cv2.contourArea(cnt) > lp_area:
# Draw box corners and minimum area rectangle
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
#cv2.drawContours(tmp_img, [box], 0, (0, 50, 255), 3)
#cv2.circle(tmp_img, tuple(box[0]), 8, (0, 255, 0), -1)
#cv2.circle(tmp_img, tuple(box[1]), 8, (0, 255, 0), -1)
#cv2.circle(tmp_img, tuple(box[2]), 8, (0, 255, 0), -1)
#cv2.circle(tmp_img, tuple(box[3]), 8, (0, 255, 0), -1)
#cv2.imshow(tmp_img)
#cv2.imshow('Minimum Area Rectangle', tmp_img)
#cv2.waitKey(0)
## Correct orientation and crop
# Link, https://jdhao.github.io/2019/02/23/crop_rotated_rectangle_opencv/
width = int(rect[1][0])
height = int(rect[1][1])
src_pts = box.astype("float32")
dst_pts = np.array([[0, height-1],
[0, 0],
[width-1, 0],
[width-1, height-1]], dtype="float32")
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(img, M, (width, height))
# Run OCR on cropped image
# If the predicted value is digit print else rotate first
result = reader.readtext(warped)
print(result)
predicted_digit = result[0][1]
if np.char.isdigit(predicted_digit) == True:
cv2.imshow("warped " + ROI,warped)
else:
rot_img = warped.copy()
for i in range(0, 3):
rotated_image = cv2.rotate(rot_img, cv2.cv2.ROTATE_90_CLOCKWISE)
result = reader.readtext(rotated_image)
#if np.array(result).size == 0:
# continue
if not result:
rot_img = rotated_image
continue
#if len(result) == 0:
# continue
predicted_digit = result[0][1]
#print(result)
#print(predicted_digit)
#cv2.imshow(rotated_image)
if np.char.isdigit(predicted_digit) == True:
cv2.imshow("Image " + ROI, rotated_image)
break
rot_img = rotated_image
return predicted_digit
def sortNumbers(Cubes):
Cubes = sorted(Cubes, key=lambda x: int(x[2]))
#Cubes.sort(key=itemgetter(2)) # In-place sorting
#Cubes = sorted(Cubes, key=itemgetter(2)) # Create a new list
return Cubes
#img = cv2.imread('gulRecNum.jpg')
img = cv2.imread('webcam7.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# convert to HSV, since red and yellow are the lowest hue colors and come before green
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# create a binary thresholded image on hue between red and yellow
#Change these if cube colours changes?
lower =(20, 100, 100)
upper = (30, 255, 255)
#lower = (0,240,160)
#upper = (30,255,255)
thresh = cv2.inRange(hsv, lower, upper)
# apply morphology
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
clean = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15,15))
clean = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
result2 = img.copy()
mask = np.zeros(result2.shape, dtype=np.uint8)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
ROI_number = 0
for c in contours:
cv2.drawContours(result2,[c],0,(0,0,0),2)
# get rotated rectangle from contour
rot_rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
# draw rotated rectangle on copy of img
cv2.drawContours(result2,[box],0,(0,0,0),2)
# Gør noget hvis arealet er større end 1.
# Whats the area of the component?
areal = cv2.contourArea(c)
if(areal > 1):
# get the center of mass
M = cv2.moments(c)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
center = (cx, cy)
print("\nx: ",cx,"\ny: ",cy)
color = (0, 0, 255)
cv2.circle(result2, center, 3, color, -1)
cv2.putText(result2, "center", (int(cx) - 10, int(cy) - 20),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, color, 2)
x,y,w,h = cv2.boundingRect(c)
ROI = 255 - thresh[y:y+h, x:x+w]
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI)
#Read saved image (number)
result = getNumber('ROI_{}.png'.format(ROI_number))
print("ROI_number: ", result)
Cubes.append([cx, cy, result])
ROI_number += 1
# save result
cv2.imwrite("4cubes_result2.png",result2)
# display result
imS = cv2.resize(result2, (600, 400))
cv2.imshow("result2", imS)
#cv2.imshow('mask', mask)
#cv2.imshow('thresh', thresh)
SortedCubes = sortNumbers(Cubes)
print("\nFound array [x, y, Cube_num] = ", Cubes)
print("Sorted array [x, y, Cube_num] = ", SortedCubes)
cv2.waitKey(0)
cv2.destroyAllWindows()
I get the following error (it can't detect a number)
Traceback (most recent call last): File "c:/Users/Mads/OneDrive/Universitet/7. semester/ROB1/python/objectDetectiong.py", line 169, in <module> result = getNumber('ROI_{}.png'.format(ROI_number)) File "c:/Users/Mads/OneDrive/Universitet/7. semester/ROB1/python/objectDetectiong.py", line 70, in getNumber predicted_digit = result[0][1] IndexError: list index out of range
This is implementation of my comment. Since, I do not have individual images this code will work with given grid like processed image.
For OCR I used EasyOCR instead of Tesserect. You could also try pytesserect on each output cropped images. Instead of rotating 4 times by 90 degrees by confidence, I went with digit detection on OCR result. If a detection is not a number then only rotate and retry.
Tested on google colab. Replace cv2_imshow(...) with cv2.imshow(...) for working locally. Also remove from google.colab.patches import cv2_imshow import.
This is modified version of my answer on card orientation correction here, OpenCV: using Canny and Shi-Tomasi to detect round corners of a playing card. All previous code is left as comment.
Code
!pip install easyocr
import easyocr
reader = easyocr.Reader(['ch_sim','en']) # need to run only once to load model into memory
"""
Based on my answer of rotated card detection,
https://stackoverflow.com/questions/64860785/opencv-using-canny-and-shi-tomasi-to-detect-round-corners-of-a-playing-card/64862448#64862448
"""
import cv2
import numpy as np
from google.colab.patches import cv2_imshow
img = cv2.imread('1.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,0)
#cv2_imshow(thresh)
#cv2.imshow('Thresholded original',thresh)
#cv2.waitKey(0)
## Get contours
contours,h = cv2.findContours(thresh,cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
## only draw contour that have big areas
imx = img.shape[0]
imy = img.shape[1]
lp_area = (imx * imy) / 10
#################################################################
# Four point perspective transform
# https://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
#################################################################
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, compute the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
#################################################################
#print(len(contours))
tmp_img = img.copy()
for cnt in contours:
approx = cv2.approxPolyDP(cnt,0.01 * cv2.arcLength(cnt, True), True)
## calculate number of vertices
#print(len(approx))
## Get the largest contours only
## Side count cannot be used since contours are not all rectangular
if cv2.contourArea(cnt) > lp_area:
#if len(approx) == 4 and cv2.contourArea(cnt) > lp_area:
# print("\n\n")
# print("#################################################")
# print("rectangle")
# print("#################################################")
# print("\n\n")
#tmp_img = img.copy()
#cv2.drawContours(tmp_img, [cnt], 0, (0, 255, 0), 6)
#cv2_imshow(tmp_img)
#cv2.imshow('Contour Borders', tmp_img)
#cv2.waitKey(0)
# tmp_img = img.copy()
# cv2.drawContours(tmp_img, [cnt], 0, (255, 0, 255), -1)
# cv2_imshow(tmp_img)
# #cv2.imshow('Contour Filled', tmp_img)
# #cv2.waitKey(0)
# # Make a hull arround the contour and draw it on the original image
# tmp_img = img.copy()
# mask = np.zeros((img.shape[:2]), np.uint8)
# hull = cv2.convexHull(cnt)
# cv2.drawContours(mask, [hull], 0, (255, 255, 255), -1)
# cv2_imshow(mask)
# #cv2.imshow('Convex Hull Mask', mask)
# #cv2.waitKey(0)
# # Draw minimum area rectangle
# #tmp_img = img.copy()
# rect = cv2.minAreaRect(cnt)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# cv2.drawContours(tmp_img, [box], 0, (255, 0, 0), 2)
# #cv2_imshow(tmp_img)
# #cv2.imshow('Minimum Area Rectangle', tmp_img)
# #cv2.waitKey(0)
# Draw box corners and minimum area rectangle
#tmp_img = img.copy()
rect = cv2.minAreaRect(cnt)
box = cv2.boxPoints(rect)
box = np.int0(box)
#print(rect)
#print(box)
cv2.drawContours(tmp_img, [box], 0, (0, 50, 255), 3)
cv2.circle(tmp_img, tuple(box[0]), 8, (0, 255, 0), -1)
cv2.circle(tmp_img, tuple(box[1]), 8, (0, 255, 0), -1)
cv2.circle(tmp_img, tuple(box[2]), 8, (0, 255, 0), -1)
cv2.circle(tmp_img, tuple(box[3]), 8, (0, 255, 0), -1)
#cv2_imshow(tmp_img)
#cv2.imshow('Minimum Area Rectangle', tmp_img)
#cv2.waitKey(0)
## Correct orientation and crop
# Link, https://jdhao.github.io/2019/02/23/crop_rotated_rectangle_opencv/
width = int(rect[1][0])
height = int(rect[1][1])
src_pts = box.astype("float32")
dst_pts = np.array([[0, height-1],
[0, 0],
[width-1, 0],
[width-1, height-1]], dtype="float32")
M = cv2.getPerspectiveTransform(src_pts, dst_pts)
warped = cv2.warpPerspective(img, M, (width, height))
#cv2_imshow(warped)
# Run OCR on cropped image
# If the predicted value is digit print else rotate first
result = reader.readtext(warped)
predicted_digit = result[0][1]
print("Detected Text:")
if np.char.isdigit(predicted_digit) == True:
print(result)
print(predicted_digit)
cv2_imshow(warped)
else:
rot_img = warped.copy()
for i in range(0, 3):
rotated_image = cv2.rotate(rot_img, cv2.cv2.ROTATE_90_CLOCKWISE)
result = reader.readtext(rotated_image)
#if np.array(result).size == 0:
# continue
if not result:
rot_img = rotated_image
continue
#if len(result) == 0:
# continue
predicted_digit = result[0][1]
#print(result)
#print(predicted_digit)
#cv2_imshow(rotated_image)
if np.char.isdigit(predicted_digit) == True:
print(result)
print(predicted_digit)
cv2_imshow(rotated_image)
break
rot_img = rotated_image
# # Draw bounding rectangle
# #tmp_img = img.copy()
# x, y, w, h = cv2.boundingRect(cnt)
# cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (255, 0, 0), 2)
# #cv2_imshow(tmp_img)
# #cv2.imshow('Bounding Rectangle', tmp_img)
# #cv2.waitKey(0)
# # Bounding Rectangle and Minimum Area Rectangle
# #tmp_img = img.copy()
# rect = cv2.minAreaRect(cnt)
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# cv2.drawContours(tmp_img, [box], 0, (0, 0, 255), 2)
# x, y, w, h = cv2.boundingRect(cnt)
# cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
# #cv2_imshow(tmp_img)
# #cv2.imshow('Bounding Rectangle', tmp_img)
# #cv2.waitKey(0)
# # determine the most extreme points along the contour
# # https://www.pyimagesearch.com/2016/04/11/finding-extreme-points-in-contours-with-opencv/
# tmp_img = img.copy()
# extLeft = tuple(cnt[cnt[:, :, 0].argmin()][0])
# extRight = tuple(cnt[cnt[:, :, 0].argmax()][0])
# extTop = tuple(cnt[cnt[:, :, 1].argmin()][0])
# extBot = tuple(cnt[cnt[:, :, 1].argmax()][0])
# cv2.drawContours(tmp_img, [cnt], -1, (0, 255, 255), 2)
# cv2.circle(tmp_img, extLeft, 8, (0, 0, 255), -1)
# cv2.circle(tmp_img, extRight, 8, (0, 255, 0), -1)
# cv2.circle(tmp_img, extTop, 8, (255, 0, 0), -1)
# cv2.circle(tmp_img, extBot, 8, (255, 255, 0), -1)
# print("Corner Points: ", extLeft, extRight, extTop, extBot)
# cv2_imshow(tmp_img)
# #cv2.imshow('img contour drawn', tmp_img)
# #cv2.waitKey(0)
# #cv2.destroyAllWindows()
# ## Perspective Transform
# tmp_img = img.copy()
# pts = np.array([extLeft, extRight, extTop, extBot])
# warped = four_point_transform(tmp_img, pts)
# cv2_imshow(tmp_img)
# #cv2.imshow("Warped", warped)
# #cv2.waitKey(0)
cv2_imshow(tmp_img)
#cv2.destroyAllWindows()
Output Prediction
Detected Text:
[([[85, 67], [131, 67], [131, 127], [85, 127]], '1', 0.9992043972015381)]
1
Detected Text:
[([[85, 65], [133, 65], [133, 125], [85, 125]], '2', 0.9991914629936218)]
2
Detected Text:
[([[96, 72], [144, 72], [144, 128], [96, 128]], '4', 0.9996564984321594)]
4
Detected Text:
[([[88, 76], [132, 76], [132, 132], [88, 132]], '3', 0.9973381161689758)]
3
White Region Detection With Corners
Alternate methods,
Try pretrained digit classification model trained from MNIST and others on each large contours exceeding certain area.
Use multitask object detection with rotation. One output of network will be detections another angle regression to predict orientation.
Use text detector like, East and run OCR on each detected text.
New image: test image
I'm trying to quantify the distance between two contours in a video of a microvessel (see snapshot)
Image analysis structure
Right now I'm only able to select for one contour (which is outlined) and I'm acquiring dimensions from this outline, but what I'd like to select for is the top and bottom contour of the structure and measure the distance (labeled with an orange line and A in the snapshot).
Any suggestions as to do this? My code for this video analysis is the following. Thanks for the help in advance!:
import cv2
import pandas as pd
import numpy as np
import imutils
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
videocapture = cv2.VideoCapture('RTMLV.mp4')
def safe_div(x,y):
if y==0: return 0
return x/y
def nothing(x):
pass
def rescale_frame(frame, percent=100): #make the video windows a bit smaller
width = int(frame.shape[1]*percent/100)
height = int(frame.shape[0]*percent/100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
if not videocapture.isOpened():
print("Unable to open video")
exit()
windowName="Vessel Tracking"
cv2.namedWindow(windowName)
# Sliders to adjust image
cv2.createTrackbar("Threshold", windowName, 75, 255, nothing)
cv2.createTrackbar("Kernel", windowName, 5, 30, nothing)
cv2.createTrackbar("Iterations", windowName, 1, 10, nothing)
showLive=True
while(showLive):
ret, frame=videocapture.read()
frame_resize=rescale_frame(frame)
if not ret:
print("Cannot capture the frame")
exit()
thresh = cv2.getTrackbarPos("Threshold", windowName)
ret,thresh1 = cv2.threshold(frame_resize, thresh, 255, cv2.THRESH_BINARY)
kern = cv2.getTrackbarPos("Kernel", windowName)
kernel = np.ones((kern, kern), np.uint8) # square image kernel used for erosion
itera=cv2.getTrackbarPos("Iterations", windowName)
dilation = cv2.dilate(thresh1, kernel, iterations=itera)
erosion = cv2.erode(dilation, kernel, iterations=itera) #refines all edges in the binary image
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
closing = cv2.cvtColor(closing, cv2.COLOR_BGR2GRAY)
contours,hierarchy = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE) # find contours with simple approximation cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE
closing = cv2.cvtColor(closing,cv2.COLOR_GRAY2RGB)
cv2.drawContours(closing, contours, -1, (128,255,0), 1)
# focus on only the largest outline by area
areas = [] #list to hold all areas
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas)
max_area_index = areas.index(max_area) # index of the list element with largest area
cnt = contours[max_area_index - 1] # largest area contour is usually the viewing window itself, why?
cv2.drawContours(closing, [cnt], 0, (0,0,255), 1)
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# compute the rotated bounding box of the contour
orig = frame_resize.copy()
box = cv2.minAreaRect(cnt)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 1)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# unpack the ordered bounding box, then compute the midpoint
# between the top-left and top-right coordinates, followed by
# the midpoint between bottom-left and bottom-right coordinates
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# compute the midpoint between the top-left and top-right points,
# followed by the midpoint between the top-right and bottom-right
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# draw the midpoints on the image
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# draw lines between the midpoints
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),(255, 0, 255), 1)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),(255, 0, 255), 1)
cv2.drawContours(orig, [cnt], 0, (0,0,255), 1)
# compute the Euclidean distance between the midpoints
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
# compute the size of the object
P2M4x = 1.2
P2M10x = 3.2
P2M20x = 6
pixelsPerMetric = P2M10x # Pixel to micron conversion
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
dimensions = [dimA, dimB]
# draw the object sizes on the image
cv2.putText(orig, "{:.1f}um".format(dimA), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}um".format(dimB), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
# compute the center of the contour
M = cv2.moments(cnt)
cX = int(safe_div(M["m10"],M["m00"]))
cY = int(safe_div(M["m01"],M["m00"]))
# draw the contour and center of the shape on the image
cv2.circle(orig, (cX, cY), 5, (255, 255, 255), -1)
cv2.putText(orig, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.imshow(windowName, orig)
cv2.imshow('', closing)
if cv2.waitKey(30)>=0:
showLive=False
videocapture.release()
cv2.destroyAllWindows()
Edits have been made to this answer in reponse to the new test image that was added to the post.
I was unable to segment the blood vessel in the test image using the code that you uploaded. I segmented the image by using manual annotation and the GrabCut algorithm.
This is the code that I used for the manual segmentation:
import cv2, os, numpy as np
import time
# Plot with Matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img_path = '/home/stephen/Desktop/0lszR.jpg'
img = cv2.imread(img_path)
img = img[420:1200, :]
h,w,_ = img.shape
mask = np.zeros((h,w), np.uint8)
mask[:] = 2
src = img.copy()
h,w,_ = img.shape
drawing = src.copy()
# Mouse callback function
global k, px, py
k = 0
px, py = 0,0
def callback(event, x, y, flags, param):
global k, px, py
print(x,y, k, px, py)
if k == 115: # 's' for sure background
if px+py!=0:
cv2.line(img, (x,y), (px, py), (255,255,0), 8)
cv2.line(mask, (x,y), (px, py), 0, 8)
if k == 116: # 't' for sure foreground
if px+py!=0:
cv2.line(img, (x,y), (px, py), (0,255,255), 8)
cv2.line(mask, (x,y), (px, py), 1, 8)
else: print(px, py)
px, py = x,y
#if k != 115 or 116: px, py = 0,0
cv2.namedWindow('img')
cv2.setMouseCallback('img', callback)
while k != 27:
cv2.imshow('img', img)
k_temp = cv2.waitKey(1)
if k_temp!=-1: k = k_temp
cv2.destroyAllWindows()
After I had found the segmented image, I used the function np.nonzero() to find the tops and bottoms of the columns:
This is the code that I used to find the width:
# Initialize parameters for the GrabCut algorithm
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
# Apply GrabCut
out_mask = mask.copy()
out_mask, _, _ = cv2.grabCut(src,out_mask,None,bgdModel,fgdModel,1,cv2.GC_INIT_WITH_MASK)
out_mask = np.where((out_mask==2)|(out_mask==0),0,1).astype('uint8')
# Open the mask to fill in the holes
out_img = src*out_mask[:,:,np.newaxis]
flip_mask = cv2.flip(out_mask, 0)
# Find the distances
distances = []
for col_num in range(src.shape[1]-1):
col = out_mask[:, col_num:col_num+1]
flip_col = flip_mask[:, col_num:col_num+1]
top = np.nonzero(col)[0][0]
bottom = h-np.nonzero(flip_col)[0][0]
if col_num % 12 == 0:
cv2.line(drawing, (col_num, top), (col_num, bottom), (234,345,34), 4)
distances.append(bottom-top)
f, axarr = plt.subplots(2,3, sharex=True)
axarr[0,0].imshow(src)
axarr[0,1].imshow(out_mask)
axarr[0,2].imshow(drawing)
axarr[1,0].imshow(img)
axarr[1,1].imshow(out_img)
axarr[1,2].plot(distances)
axarr[0,0].set_title("Source")
axarr[0,1].set_title('Mask from GrabCut')
axarr[0,2].set_title('Widths')
axarr[1,0].set_title('Manual Annotation')
axarr[1,1].set_title('GrabCut Mask')
axarr[1,2].set_title('Graph of Width')
axarr[0,0].axis('off')
axarr[0,1].axis('off')
axarr[1,0].axis('off')
axarr[1,1].axis('off')
axarr[1,2].axis('off')
axarr[0,2].axis('off')
plt.show()
I am trying to build an OCR for recognising seven segment display as mentioned below
Using preprocessing tools of open CV I got it here
Now I am trying to follow this tutorial - https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
But on the part
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
I am getting error as -
The error is solved using THRESH_BINARY_INV but still the OCR is not working any fix would be great
File "/Users/ms/anaconda3/lib/python3.6/site-packages/imutils/contours.py", line 25, in sort_contours
key=lambda b: b1[i], reverse=reverse))
ValueError: not enough values to unpack (expected 2, got 0)
Any idea how to solve this and make my OCR a working model
My whole code is :
import numpy as np
import cv2
import imutils
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
# load image
image = cv2.imread('d4.jpg')
# create hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# set lower and upper color limits
low_val = (60,180,160)
high_val = (179,255,255)
# Threshold the HSV image
mask = cv2.inRange(hsv, low_val,high_val)
# find contours in mask
ret, cont, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in cont:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# get the parameters of the boundingbox
x,y,w,h = cv2.boundingRect(cont)
# create and show subimage
roi = image[y:y+h, x:x+w]
cv2.imshow("Result", roi)
# draw box on original image and show image
cv2.rectangle(image, (x,y),(x+w,y+h), (0,0,255),2)
cv2.imshow("Image", image)
grayscaled = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY)
retval, threshold = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
retval2,threshold2 = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('threshold',threshold2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 15 and (h >= 30 and h <= 40):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
# loop over each of the digits
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = thresh[y:y + h, x:x + w]
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if total / float(area) > 0.5:
on[i]= 1
# lookup the digit and draw it on the image
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(output, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
print(u"{}{}.{}{}.{}{} \u00b0C".format(*digits))
cv2.imshow("Input", image)
cv2.imshow("Output", output)
cv2.waitKey(0)
A help would be great in fixing my OCR
I think the lookup-table you created is is for seven-digit display, not for seven-digit OCR. As for the size of display is fixed, I think you can try to segment it into seperated regions and recognise using template-matching or k-means.
This is my preprocessed steps:
(1) Find light green display in the HSV
mask = cv2.inRange(hsv, (50, 100, 180), (70, 255, 255))
(2) try to seperate by projecting and recognise standard seven-digits using LUT:
(3) try on the detected green display
So, as I said in the comments, there were two problems:
You were trying to find black contours on a white background, which is opposite of OpenCV documentation. This was solved using THRESH_BINARY_INV flag instead of THRESH_BINARY.
Due to the numbers not being connected, a full contour for the number couldn't be found. So I tried some morphological operations. Following are the steps:
2a) Opening on the above image with following code:
threshold2 = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))
2b) Dilation on the previous image:
threshold2 = cv2.dilate(threshold2, np.ones((5,1), np.uint8), iterations=1)
2c) Crop the top part of the image to separate numbers due to dilating into the top border:
height, width = threshold2.shape[:2]
threshold2 = threshold2[5:height,5:width]
Note Somehow, the images are being displayed here without the white border that I am talking about. Try opening the image in a new window and you will see what I mean.
So, after solving these issues, the contours were pretty good and how they were supposed to be as seen here:
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w <= width * 0.5 and (h >= height * 0.2):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
cv2.drawContours(image2, digitCnts, -1, (0, 0, 255))
cv2.imwrite("cnts-sort.jpg", image2)
As you can see below, the contours are being drawn in red.
Now, for estimating whether the digit is a code or not, this part somehow doesn't work and I blame the look-up table for it. As you can see from the below images, the bounding rects for all the numbers are correctly cropped but the lookup table fails to recognize them.
# loop over each of the digits
j = 0
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = threshold2[y:y + h, x:x + w]
cv2.imwrite("roi" + str(j) + ".jpg", roi)
j += 1
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if area != 0:
if total / float(area) > 0.5:
on[i] = 1
# lookup the digit and draw it on the image
try:
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(roi, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
except KeyError:
continue
I read through the website you mentioned in the question and from the comments it seems some of the entries in the LUT might be wrong. So I am going to leave it to you to figure that out. Following are the individual digits found (but not recognised):
Alternatively, you could use tesseract instead to recognise these detected digits.
Hope it helps!