Extract individual field from table image to excel with OCR - python

I have scanned images which have tables as shown in this image:
I am trying to extract each box separately and perform OCR but when I try to detect horizontal and vertical lines and then detect boxes it's returning the following image:
And when I try to perform other transformations to detect text (erode and dilate) some remains of lines are still coming along with text like below:
I cannot detect text only to perform OCR and proper bounding boxes aren't being generated like below:
I cannot get clearly separated boxes using real lines, I've tried this on an image that was edited in paint(as shown below) to add digits and it works.
I don't know which part I'm doing wrong but if there's anything I should try or maybe change/add in my question please please tell me.
#Loading all required libraries
%pylab inline
import cv2
import numpy as np
import pandas as pd
import pytesseract
import matplotlib.pyplot as plt
import statistics
from time import sleep
import random
img = cv2.imread('images/scan1.jpg',0)
# for adding border to an image
img1= cv2.copyMakeBorder(img,50,50,50,50,cv2.BORDER_CONSTANT,value=[255,255])
# Thresholding the image
(thresh, th3) = cv2.threshold(img1, 255, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
# to flip image pixel values
th3 = 255-th3
# initialize kernels for table boundaries detections
if(th3.shape[0]<1000):
ver = np.array([[1],
[1],
[1],
[1],
[1],
[1],
[1]])
hor = np.array([[1,1,1,1,1,1]])
else:
ver = np.array([[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]])
hor = np.array([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]])
# to detect vertical lines of table borders
img_temp1 = cv2.erode(th3, ver, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, ver, iterations=3)
# to detect horizontal lines of table borders
img_hor = cv2.erode(th3, hor, iterations=3)
hor_lines_img = cv2.dilate(img_hor, hor, iterations=4)
# adding horizontal and vertical lines
hor_ver = cv2.add(hor_lines_img,verticle_lines_img)
hor_ver = 255-hor_ver
# subtracting table borders from image
temp = cv2.subtract(th3,hor_ver)
temp = 255-temp
#Doing xor operation for erasing table boundaries
tt = cv2.bitwise_xor(img1,temp)
iii = cv2.bitwise_not(tt)
tt1=iii.copy()
#kernel initialization
ver1 = np.array([[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1]])
hor1 = np.array([[1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1]])
#morphological operation
temp1 = cv2.erode(tt1, ver1, iterations=2)
verticle_lines_img1 = cv2.dilate(temp1, ver1, iterations=1)
temp12 = cv2.erode(tt1, hor1, iterations=1)
hor_lines_img2 = cv2.dilate(temp12, hor1, iterations=1)
# doing or operation for detecting only text part and removing rest all
hor_ver = cv2.add(hor_lines_img2,verticle_lines_img1)
dim1 = (hor_ver.shape[1],hor_ver.shape[0])
dim = (hor_ver.shape[1]*2,hor_ver.shape[0]*2)
# resizing image to its double size to increase the text size
resized = cv2.resize(hor_ver, dim, interpolation = cv2.INTER_AREA)
#bitwise not operation for fliping the pixel values so as to apply morphological operation such as dilation and erode
want = cv2.bitwise_not(resized)
if(want.shape[0]<1000):
kernel1 = np.array([[1,1,1]])
kernel2 = np.array([[1,1],
[1,1]])
kernel3 = np.array([[1,0,1],[0,1,0],
[1,0,1]])
else:
kernel1 = np.array([[1,1,1,1,1,1]])
kernel2 = np.array([[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]])
tt1 = cv2.dilate(want,kernel1,iterations=2)
# getting image back to its original size
resized1 = cv2.resize(tt1, dim1, interpolation = cv2.INTER_AREA)
# Find contours for image, which will detect all the boxes
contours1, hierarchy1 = cv2.findContours(resized1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#function to sort contours by its x-axis (top to bottom)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
#sorting contours by calling fuction
(cnts, boundingBoxes) = sort_contours(contours1, method="top-to-bottom")
#storing value of all bouding box height
heightlist=[]
for i in range(len(boundingBoxes)):
heightlist.append(boundingBoxes[i][3])
#sorting height values
heightlist.sort()
sportion = int(.5*len(heightlist))
eportion = int(0.05*len(heightlist))
#taking 50% to 95% values of heights and calculate their mean
#this will neglect small bounding box which are basically noise
try:
medianheight = statistics.mean(heightlist[-sportion:-eportion])
except:
medianheight = statistics.mean(heightlist[-sportion:-2])
#keeping bounding box which are having height more then 70% of the mean height and deleting all those value where
# ratio of width to height is less then 0.9
box =[]
imag = iii.copy()
for i in range(len(cnts)):
cnt = cnts[i]
x,y,w,h = cv2.boundingRect(cnt)
if(h>=.7*medianheight and w/h > 0.9):
image = cv2.rectangle(imag,(x+4,y-2),(x+w-5,y+h),(0,255,0),1)
box.append([x,y,w,h])
# to show image
###Now we have badly detected boxes image as shown

You're on the right track. Here's a continuation of your approach with slight modifications. The idea is:
Obtain binary image. Load image, convert to grayscale, and Otsu's threshold.
Remove all character text contours. We create a rectangular kernel and perform opening to only keep the horizontal/vertical lines. This will effectively make the text into tiny noise so we find contours and filter using contour area to remove them.
Repair horizontal/vertical lines and extract each ROI. We morph close to fix and broken lines and smooth the table. From here we sort the box field contours using imutils.sort_contours() with the top-to-bottom parameter. Next we find contours and filter using contour area then extract each ROI.
Here's a visualization of each box field and the extracted ROI
Code
import cv2
import numpy as np
from imutils import contours
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove text characters with morph open and contour filtering
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 500:
cv2.drawContours(opening, [c], -1, (0,0,0), -1)
# Repair table lines, sort contours, and extract ROI
close = 255 - cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=1)
cnts = cv2.findContours(close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
for c in cnts:
area = cv2.contourArea(c)
if area < 25000:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), -1)
ROI = original[y:y+h, x:x+w]
# Visualization
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey(20)
cv2.imshow('opening', opening)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.waitKey()

nanthancy's answer is also accurate, I used the following script for getting each box and sorting it by columns and rows.
Note: Most of this code is from a medium blog by Kanan Vyas here: https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26
#most of this code is take from blog by Kanan Vyas here:
#https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26
import cv2
import numpy as np
img = cv2.imread('images/scan2.jpg',0)
#fn to show np images with cv2 and close on any key press
def imshow(img, label='default'):
cv2.imshow(label, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Thresholding the image
(thresh, img_bin) = cv2.threshold(img, 250, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
#inverting the image
img_bin = 255-img_bin
# Defining a kernel length
kernel_length = np.array(img).shape[1]//80
# A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))# A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))# A kernel of (3 X 3) ones.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Morphological operation to detect vertical lines from an image
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#cv2.imwrite("verticle_lines.jpg",verticle_lines_img)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)
# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha# This function helps to add two image with specific weight parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite("img_final_bin.jpg",img_final_bin)
# Find contours for image, which will detect all the boxes
contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
""" this section saves each extracted box as a seperate image.
idx = 0
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
#only selecting boxes within certain width height range
if (w > 10 and h > 15 and h < 50):
idx += 1
new_img = img[y:y+h, x:x+w]
#cv2.imwrite("kanan/1/"+ "{}-{}-{}-{}".format(x, y, w, h) + '.jpg', new_img)
"""
#get set of all y-coordinates to sort boxes row wise
def getsety(boxes):
ally = []
for b in boxes:
ally.append(b[1])
ally = set(ally)
ally = sorted(ally)
return ally
#sort boxes by y in certain range, because if image is tilted than same row boxes
#could have different Ys but within certain range
def sort_boxes(boxes, y, row_column):
l = []
for b in boxes:
if (b[2] > 10 and b[3] > 15 and b[3] < 50):
if b[1] >= y - 7 and b[1] <= y + 7:
l.append(b)
if l in row_column:
return row_column
else:
row_column.append(l)
return row_column
#sort each row using X of each box to sort it column wise
def sortrows(rc):
new_rc = []
for row in rc:
r_new = sorted(row, key = lambda cell: cell[0])
new_rc.append(r_new)
return new_rc
row_column = []
for i in getsety(boundingBoxes):
row_column = sort_boxes(boundingBoxes, i, row_column)
row_column = [i for i in row_column if i != []]
#final np array with sorted boxes from top left to bottom right
row_column = sortrows(row_column)
I made this in Jupyter notebook and copy-pasted here, if any errors come up, let me know.
Thank you everyone for answers

This is function, which uses tesseract-ocr for layout detection. You can try with different RIL levels and PSM. For more details have a look here: https://github.com/sirfz/tesserocr
import os
import platform
from typing import List, Tuple
from tesserocr import PyTessBaseAPI, iterate_level, RIL
system = platform.system()
if system == 'Linux':
tessdata_folder_default = ''
elif system == 'Windows':
tessdata_folder_default = r'C:\Program Files (x86)\Tesseract-OCR\tessdata'
else:
raise NotImplementedError
# this tesseract specific env variable takes precedence for tessdata folder location selection
# especially important for windows, as we don't know if we're running 32 or 64bit tesseract
tessdata_folder = os.getenv('TESSDATA_PREFIX', tessdata_folder_default)
def get_layout_boxes(input_image, # PIL image object
level: RIL,
include_text: bool,
include_boxes: bool,
language: str,
psm: int,
tessdata_path='') -> List[Tuple]:
"""
Get image components coordinates. It will return also text if include_text is True.
:param input_image: input PIL image
:param level: page iterator level, please see "RIL" enum
:param include_text: if True return boxes texts
:param include_boxes: if True return boxes coordinates
:param language: language for OCR
:param psm: page segmentation mode, by default it is PSM.AUTO which is 3
:param tessdata_path: the path to the tessdata folder
:return: list of tuples: [((x1, y1, x2, y2), text)), ...]
"""
assert any((include_text, include_boxes)), (
'Both include_text and include_boxes can not be False.')
if not tessdata_path:
tessdata_path = tessdata_folder
try:
with PyTessBaseAPI(path=tessdata_path, lang=language) as api:
api.SetImage(input_image)
api.SetPageSegMode(psm)
api.Recognize()
page_iterator = api.GetIterator()
data = []
for pi in iterate_level(page_iterator, level):
bounding_box = pi.BoundingBox(level)
if bounding_box is not None:
text = pi.GetUTF8Text(level) if include_text else None
box = bounding_box if include_boxes else None
data.append((box, text))
return data
except RuntimeError:
print('Please specify correct path to tessdata.')

Related

Not getting a countour box around the texts/tags on an X-ray image with OpenCV

I am trying to detect texts and remove those thick letters. The goal is same proposed by the following link: How to detect text on an X-Ray image with OpenCV. ("to extract the oriented bounding boxes as a matrix")
Two sample images:
Sample Image #1
Sample Image #2
Here, the fonts L, J, C, O (from 1st image) and L, D, A, N, circle-shape (from 2nd image) must be detected. I tried the methods proposed by above link (How to detect text on an X-Ray image with OpenCV), however, it fails to detect the texts in the background.
Original Image --> Binary image
Binary Image (thresholding)
Morph close --> Detected text
Morph close
detected texts (nothing)
As you can see, it fails to detect the texts from the background. It just returns black image. Don't know what happened.
The code:
import cv2
import numpy as np
# Load image, create mask, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('HandImage.png')
original = image.copy()
blank = np.zeros(image.shape[:2], dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Merge text into a single contour
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=3)
# Find contours
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
# Filter using contour area and aspect ratio
x,y,w,h = cv2.boundingRect(c)
area = cv2.contourArea(c)
ar = w / float(h)
if (ar > 1.4 and ar < 4) or ar < .85 and area > 10 and area < 500:
# Find rotated bounding box
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image,[box],0,(36,255,12),2)
cv2.drawContours(blank,[box],0,(255,255,255),-1)
# Bitwise operations to isolate text
extract = cv2.bitwise_and(thresh, blank)
extract = cv2.bitwise_and(original, original, mask=extract)
cv2.imshow('thresh', thresh)
cv2.imshow('image', image)
cv2.imshow('close', close)
cv2.imshow('extract', extract)
cv2.waitKey()
I am new to the computer vision and image manipulation... Please help!
Update:
I have figured out the error in my code. I had to adjust and identify the right size/area of the bounding box that selected for the texts.
The code:
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key = lambda x: cv2.boundingRect(x)[0])
for c in cnts:
# Filter using contour area and aspect ratio (x1 = width, y1 = height)
x, y, x1, y1 = cv2.boundingRect(c)
area = cv2.contourArea(c)
if (30 < x1 < 200 or 350 < x1 < 500) and 50 < y1 < 350:
# Using the area above to calculate the rotation
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
translated_box = box - np.mean(box, axis=0)
scaled_box = translated_box * 2 # 2 is scale factor
retranslated_box = scaled_box + np.mean(box, axis=0)
box = np.int0(retranslated_box)
cv2.drawContours(image, [box], 0, (36,255,12), 2)
cv2.drawContours(blank, [box], 0, (255,255,255), -1)
But I also liked the suggestion by #Mikel B, where he used a neural network method.
Not a solution in OpenCV as requested. But I leave it here in case a neural network-based solution could be of interest for anybody
First of all install the package by:
pip install craft-text-detector
Then copy paste the following
from craft_text_detector import Craft
# import craft functions
from craft_text_detector import (
read_image,
load_craftnet_model,
load_refinenet_model,
get_prediction,
export_detected_regions,
export_extra_results,
empty_cuda_cache
)
# set image path and export folder directory
image = '/path/to/JP6x0.png' # can be filepath, PIL image or numpy array
output_dir = 'outputs/'
# read image
image = read_image(image)
# load models
refine_net = load_refinenet_model(cuda=False)
craft_net = load_craftnet_model(cuda=False)
# perform prediction
prediction_result = get_prediction(
image=image,
craft_net=craft_net,
refine_net=refine_net,
text_threshold=0.5,
link_threshold=0.1,
low_text=0.1,
cuda=False,
long_size=1280
)
# export detected text regions
exported_file_paths = export_detected_regions(
image=image,
regions=prediction_result["boxes"],
output_dir=output_dir,
rectify=True
)
# export heatmap, detection points, box visualization
export_extra_results(
image=image,
regions=prediction_result["boxes"],
heatmaps=prediction_result["heatmaps"],
output_dir=output_dir
)
# unload models from gpu
empty_cuda_cache()
My results with these parameters:

Show only large differences between images of slightly different scale

I am trying to show differences between 2 images, mainly those within a "cloud" outline as shown in the new image below. The "cloud" outline represents changes that have been made to the reference image. Some info has been blanked out due to privacy reasons, do let me know if you require any more information!
Reference Image: reference
New Image: new
The scale of both images is a tad bit off, so I used cv2.homography but was unable to get it aligned properly
Aligned Image: aligned new
Here's the output image from my code: result
Image result I am trying to achieve: ideal result
To summarize, here are the problems I am trying to solve
To align 2 images of slightly different scale
To highlight differences, typically within a cloud outline
Merge small differences of close proximity into a larger boundary
My code is based on the response of user fmw42 in this thread with some minor changes:
Find Differences Between Images with OpenCV Python
Any help is greatly appreciated!
import cv2
import numpy as np
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.1
def alignImages(imNew, imRef):
# im2 is reference and im1 is to be warped to match im2
# note: numbering is swapped in function
# Convert images to grayscale
imNew_gray = cv2.cvtColor(imNew, cv2.COLOR_BGR2GRAY)
imRef_gray = cv2.cvtColor(imRef, cv2.COLOR_BGR2GRAY)
# Detect ORB features and compute descriptors.
orb = cv2.ORB_create(MAX_FEATURES)
keypoints1, descriptors1 = orb.detectAndCompute(imNew_gray, None)
keypoints2, descriptors2 = orb.detectAndCompute(imRef_gray, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
matches = list(matcher.match(descriptors1, descriptors2, None))
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Draw top matches
imMatches = cv2.drawMatches(imNew, keypoints1, imRef, keypoints2, matches, None)
cv2.imwrite("matches.png", imMatches)
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = imRef.shape
imReg = cv2.warpPerspective(imNew, h, (width, height))
return imReg, h
# Read reference image
refFilename = "SD5021-.png"
print("Reading reference image : ", refFilename)
imReference = cv2.imread(refFilename)
hh, ww = imReference.shape[:2]
# Read image to be aligned
imFilename = "SD5021A.png"
print("Reading image to align : ", imFilename);
im = cv2.imread(imFilename)
# Aligned image will be stored in imReg.
# The estimated homography will be stored in h.
imReg, h = alignImages(im, imReference)
# Print estimated homography
print("Estimated homography : \n", h)
# Convert images to grayscale
ref_gray = cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)
im_gray = cv2.cvtColor(imReg, cv2.COLOR_BGR2GRAY)
# Otsu threshold
refThresh = cv2.threshold(ref_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
imThresh = cv2.threshold(im_gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_OPEN, kernel, iterations=1)
refThresh = cv2.morphologyEx(refThresh, cv2.MORPH_CLOSE, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.float64)
imThresh = cv2.morphologyEx(imThresh, cv2.MORPH_CLOSE, kernel, iterations=1)
# get absolute difference between the two thresholded images
diff = np.abs(cv2.add(imThresh,-refThresh))
# apply morphology open to remove small regions caused by slight misalignment of the two images
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20))
diff_cleaned = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel, iterations=1).astype(np.uint8)
# Filter using contour area and draw bounding boxes that do not touch the sides of the image
cnts = cv2.findContours(diff_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = im.copy()
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
if x>0 and y>0 and x+w<ww-1 and y+h<hh-1:
cv2.rectangle(result, (x, y), (x+w, y+h), (0, 0, 255), 2)
# save images
cv2.imwrite('new_aligned.jpg', imReg)
cv2.imwrite('diff.png', diff_cleaned)
cv2.imwrite('results.png', result)

Object Detection with OpenCV-Python

I am trying to detect all of the overlapping circle/ellipses shapes in this image all of which have digits on them. I have tried different types of image processing techniques using OpenCV, however I cannot detect the shapes that overlap the tree. I have tried erosion and dilation however it has not helped.
Any pointers on how to go about this would be great. I have attached my code below
original = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
canny = cv2.Canny(blurred, 120, 255, 1)
kernel = np.ones((5, 5), np.uint8)
dilate = cv2.dilate(canny, kernel, iterations=1)
# Find contours
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
image_number = 0
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (36, 255, 12), 2)
ROI = original[y:y + h, x:x + w]
cv2.imwrite("ROI_{}.png".format(image_number), ROI)
image_number += 1
cv2.imshow('canny', canny)
cv2.imshow('image', frame)
cv2.waitKey(0)
Here's a possible solution. I'm assuming that the target blobs (the saucer-like things) are always labeled - that is, they always have a white number inside them. The idea is to create a digits mask, because their size and color seem to be constant. I use the digits as guide to obtain sample pixels of the ellipses. Then, I convert these BGR pixels to HSV, create a binary mask and use that info to threshold and locate the ellipses. Let's check out the code:
# imports:
import cv2
import numpy as np
# image path
path = "D://opencvImages//"
fileName = "4dzfr.png"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert RGB to grayscale:
grayscaleImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Get binary image via Otsu:
binaryImage = np.where(grayscaleImage >= 200, 255, 0)
# The above operation converted the image to 32-bit float,
# convert back to 8-bit uint
binaryImage = binaryImage.astype(np.uint8)
The first step is to make a mask of the digits. I also created a deep copy of the BGR image. The digits are close to white (That is, an intensity close to 255). I use 200 as threshold and obtain this result:
Now, let's locate these contours on this binary mask. I'm filtering based on aspect ratio, as the digits have a distinct aspect ratio close to 0.70. I'm also filtering contours based on hierarchy - as I'm only interested on external contours (the ones that do not have children). That's because I really don't need contours like the "holes" inside the digit 8:
# Find the contours on the binary image:
contours, hierarchy = cv2.findContours(binaryImage, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Store the sampled pixels here:
sampledPixels = []
# Look for the outer bounding boxes (no children):
for i, c in enumerate(contours):
# Get the contour bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rect:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Compute the aspect ratio:
aspectRatio = rectWidth / rectHeight
# Create the filtering threshold value:
delta = abs(0.7 - aspectRatio)
epsilon = 0.1
# Get the hierarchy:
currentHierarchy = hierarchy[0][i][3]
# Prepare the list of sampling points (One for the ellipse, one for the circle):
samplingPoints = [ (rectX - rectWidth, rectY), (rectX, rectY - rectHeight) ]
# Look for the target contours:
if delta < epsilon and currentHierarchy == -1:
# This list will hold both sampling pixels:
pixelList = []
# Get sampling pixels from the two locations:
for s in range(2):
# Get sampling point:
sampleX = samplingPoints[s][0]
sampleY = samplingPoints[s][1]
# Get sample BGR pixel:
samplePixel = inputImageCopy[sampleY, sampleX]
# Store into temp list:
pixelList.append(samplePixel)
# convert list to tuple:
pixelList = tuple(pixelList)
# Save pixel value:
sampledPixels.append(pixelList)
Ok, there area a couple of things happening in the last snippet of code. We want to sample pixels from both the ellipse and the circle. We will use two sampling locations that are function of each digit's original position. These positions are defined in the samplingPoints tuple. For the ellipse, I'm sampling at a little before the top right position of the digit. For the circle, I'm sapling directly above the top right position - we end up with two pixels for each figure.
You'll notice I'm doing a little bit of data type juggling, converting lists to tuples. I want these pixels stored as a tuple for convenience. If I draw bounding rectangles of the digits, this would be the resulting image:
Now, let's loop through the pixel list, convert them to HSV and create a HSV mask over the original BGR image. The final bounding rectangles of the ellipses are stored in boundingRectangles, additionally I draw results on the deep copy of the original input:
# Final bounding rectangles are stored here:
boundingRectangles = []
# Loop through sampled pixels:
for p in range(len(sampledPixels)):
# Get current pixel tuple:
currentPixelTuple = sampledPixels[p]
# Prepare the HSV mask:
imageHeight, imageWidth = binaryImage.shape[:2]
hsvMask = np.zeros((imageHeight, imageWidth), np.uint8)
# Process the two sampling pixels:
for m in range(len(currentPixelTuple)):
# Get current pixel in the list:
currentPixel = currentPixelTuple[m]
# Create BGR Mat:
pixelMat = np.zeros([1, 1, 3], dtype=np.uint8)
pixelMat[0, 0] = currentPixel
# Convert the BGR pixel to HSV:
hsvPixel = cv2.cvtColor(pixelMat, cv2.COLOR_BGR2HSV)
H = hsvPixel[0][0][0]
S = hsvPixel[0][0][1]
V = hsvPixel[0][0][2]
# Create HSV range for this pixel:
rangeThreshold = 5
lowerValues = np.array([H - rangeThreshold, S - rangeThreshold, V - rangeThreshold])
upperValues = np.array([H + rangeThreshold, S + rangeThreshold, V + rangeThreshold])
# Create HSV mask:
hsvImage = cv2.cvtColor(inputImage.copy(), cv2.COLOR_BGR2HSV)
tempMask = cv2.inRange(hsvImage, lowerValues, upperValues)
hsvMask = hsvMask + tempMask
First, I create a 1 x 1 Matrix (or Numpy Array) with just a BGR pixel value - the first of two I previously sampled. In this way, I can use cv2.cvtColor to get the corresponding HSV values. Then, I create lower and upper threshold values for the HSV mask. However, the image seems synthetic, and a range-based thresholding could be reduced to a unique tuple. After that, I create the HSV mask using cv2.inRange.
This will yield the HSV mask for the ellipse. After applying the method for the circle we will end up with two HSV masks. Well, I just added the two arrays to combine both masks. At the end you will have something like this, this is the "composite" HSV mask created for the first saucer-like figure:
We can apply a little bit of morphology to join both shapes, just a little closing will do:
# Set kernel (structuring element) size:
kernelSize = 3
# Set morph operation iterations:
opIterations = 2
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
hsvMask = cv2.morphologyEx(hsvMask, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations,cv2.BORDER_REFLECT101)
This is the result:
Nice. Let's continue and get the bounding rectangles of all the shapes. I'm using the boundingRectangles list to store each bounding rectangle, like this:
# Process current contour:
currentContour, _ = cv2.findContours(hsvMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for _, c in enumerate(currentContour):
# Get the contour's bounding rectangle:
boundRect = cv2.boundingRect(c)
# Get the dimensions of the bounding rect:
rectX = boundRect[0]
rectY = boundRect[1]
rectWidth = boundRect[2]
rectHeight = boundRect[3]
# Store and set bounding rect:
boundingRectangles.append(boundRect)
color = (0, 0, 255)
cv2.rectangle(inputImageCopy, (int(rectX), int(rectY)),
(int(rectX + rectWidth), int(rectY + rectHeight)), color, 2)
cv2.imshow("Objects", inputImageCopy)
cv2.waitKey(0)
This is the image of the located rectangles once every sampled pixel is processed:

Trouble extracting information from a financial statement while keeping the original spacing using Tesseract OCR [duplicate]

I have scanned images which have tables as shown in this image:
I am trying to extract each box separately and perform OCR but when I try to detect horizontal and vertical lines and then detect boxes it's returning the following image:
And when I try to perform other transformations to detect text (erode and dilate) some remains of lines are still coming along with text like below:
I cannot detect text only to perform OCR and proper bounding boxes aren't being generated like below:
I cannot get clearly separated boxes using real lines, I've tried this on an image that was edited in paint(as shown below) to add digits and it works.
I don't know which part I'm doing wrong but if there's anything I should try or maybe change/add in my question please please tell me.
#Loading all required libraries
%pylab inline
import cv2
import numpy as np
import pandas as pd
import pytesseract
import matplotlib.pyplot as plt
import statistics
from time import sleep
import random
img = cv2.imread('images/scan1.jpg',0)
# for adding border to an image
img1= cv2.copyMakeBorder(img,50,50,50,50,cv2.BORDER_CONSTANT,value=[255,255])
# Thresholding the image
(thresh, th3) = cv2.threshold(img1, 255, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
# to flip image pixel values
th3 = 255-th3
# initialize kernels for table boundaries detections
if(th3.shape[0]<1000):
ver = np.array([[1],
[1],
[1],
[1],
[1],
[1],
[1]])
hor = np.array([[1,1,1,1,1,1]])
else:
ver = np.array([[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1],
[1]])
hor = np.array([[1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]])
# to detect vertical lines of table borders
img_temp1 = cv2.erode(th3, ver, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, ver, iterations=3)
# to detect horizontal lines of table borders
img_hor = cv2.erode(th3, hor, iterations=3)
hor_lines_img = cv2.dilate(img_hor, hor, iterations=4)
# adding horizontal and vertical lines
hor_ver = cv2.add(hor_lines_img,verticle_lines_img)
hor_ver = 255-hor_ver
# subtracting table borders from image
temp = cv2.subtract(th3,hor_ver)
temp = 255-temp
#Doing xor operation for erasing table boundaries
tt = cv2.bitwise_xor(img1,temp)
iii = cv2.bitwise_not(tt)
tt1=iii.copy()
#kernel initialization
ver1 = np.array([[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1],
[1,1]])
hor1 = np.array([[1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1]])
#morphological operation
temp1 = cv2.erode(tt1, ver1, iterations=2)
verticle_lines_img1 = cv2.dilate(temp1, ver1, iterations=1)
temp12 = cv2.erode(tt1, hor1, iterations=1)
hor_lines_img2 = cv2.dilate(temp12, hor1, iterations=1)
# doing or operation for detecting only text part and removing rest all
hor_ver = cv2.add(hor_lines_img2,verticle_lines_img1)
dim1 = (hor_ver.shape[1],hor_ver.shape[0])
dim = (hor_ver.shape[1]*2,hor_ver.shape[0]*2)
# resizing image to its double size to increase the text size
resized = cv2.resize(hor_ver, dim, interpolation = cv2.INTER_AREA)
#bitwise not operation for fliping the pixel values so as to apply morphological operation such as dilation and erode
want = cv2.bitwise_not(resized)
if(want.shape[0]<1000):
kernel1 = np.array([[1,1,1]])
kernel2 = np.array([[1,1],
[1,1]])
kernel3 = np.array([[1,0,1],[0,1,0],
[1,0,1]])
else:
kernel1 = np.array([[1,1,1,1,1,1]])
kernel2 = np.array([[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]])
tt1 = cv2.dilate(want,kernel1,iterations=2)
# getting image back to its original size
resized1 = cv2.resize(tt1, dim1, interpolation = cv2.INTER_AREA)
# Find contours for image, which will detect all the boxes
contours1, hierarchy1 = cv2.findContours(resized1, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#function to sort contours by its x-axis (top to bottom)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
#sorting contours by calling fuction
(cnts, boundingBoxes) = sort_contours(contours1, method="top-to-bottom")
#storing value of all bouding box height
heightlist=[]
for i in range(len(boundingBoxes)):
heightlist.append(boundingBoxes[i][3])
#sorting height values
heightlist.sort()
sportion = int(.5*len(heightlist))
eportion = int(0.05*len(heightlist))
#taking 50% to 95% values of heights and calculate their mean
#this will neglect small bounding box which are basically noise
try:
medianheight = statistics.mean(heightlist[-sportion:-eportion])
except:
medianheight = statistics.mean(heightlist[-sportion:-2])
#keeping bounding box which are having height more then 70% of the mean height and deleting all those value where
# ratio of width to height is less then 0.9
box =[]
imag = iii.copy()
for i in range(len(cnts)):
cnt = cnts[i]
x,y,w,h = cv2.boundingRect(cnt)
if(h>=.7*medianheight and w/h > 0.9):
image = cv2.rectangle(imag,(x+4,y-2),(x+w-5,y+h),(0,255,0),1)
box.append([x,y,w,h])
# to show image
###Now we have badly detected boxes image as shown
You're on the right track. Here's a continuation of your approach with slight modifications. The idea is:
Obtain binary image. Load image, convert to grayscale, and Otsu's threshold.
Remove all character text contours. We create a rectangular kernel and perform opening to only keep the horizontal/vertical lines. This will effectively make the text into tiny noise so we find contours and filter using contour area to remove them.
Repair horizontal/vertical lines and extract each ROI. We morph close to fix and broken lines and smooth the table. From here we sort the box field contours using imutils.sort_contours() with the top-to-bottom parameter. Next we find contours and filter using contour area then extract each ROI.
Here's a visualization of each box field and the extracted ROI
Code
import cv2
import numpy as np
from imutils import contours
# Load image, grayscale, Otsu's threshold
image = cv2.imread('1.jpg')
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove text characters with morph open and contour filtering
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area < 500:
cv2.drawContours(opening, [c], -1, (0,0,0), -1)
# Repair table lines, sort contours, and extract ROI
close = 255 - cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=1)
cnts = cv2.findContours(close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="top-to-bottom")
for c in cnts:
area = cv2.contourArea(c)
if area < 25000:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), -1)
ROI = original[y:y+h, x:x+w]
# Visualization
cv2.imshow('image', image)
cv2.imshow('ROI', ROI)
cv2.waitKey(20)
cv2.imshow('opening', opening)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.waitKey()
nanthancy's answer is also accurate, I used the following script for getting each box and sorting it by columns and rows.
Note: Most of this code is from a medium blog by Kanan Vyas here: https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26
#most of this code is take from blog by Kanan Vyas here:
#https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26
import cv2
import numpy as np
img = cv2.imread('images/scan2.jpg',0)
#fn to show np images with cv2 and close on any key press
def imshow(img, label='default'):
cv2.imshow(label, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Thresholding the image
(thresh, img_bin) = cv2.threshold(img, 250, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
#inverting the image
img_bin = 255-img_bin
# Defining a kernel length
kernel_length = np.array(img).shape[1]//80
# A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))# A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))# A kernel of (3 X 3) ones.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# Morphological operation to detect vertical lines from an image
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#cv2.imwrite("verticle_lines.jpg",verticle_lines_img)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)
# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha# This function helps to add two image with specific weight parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite("img_final_bin.jpg",img_final_bin)
# Find contours for image, which will detect all the boxes
contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
""" this section saves each extracted box as a seperate image.
idx = 0
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
#only selecting boxes within certain width height range
if (w > 10 and h > 15 and h < 50):
idx += 1
new_img = img[y:y+h, x:x+w]
#cv2.imwrite("kanan/1/"+ "{}-{}-{}-{}".format(x, y, w, h) + '.jpg', new_img)
"""
#get set of all y-coordinates to sort boxes row wise
def getsety(boxes):
ally = []
for b in boxes:
ally.append(b[1])
ally = set(ally)
ally = sorted(ally)
return ally
#sort boxes by y in certain range, because if image is tilted than same row boxes
#could have different Ys but within certain range
def sort_boxes(boxes, y, row_column):
l = []
for b in boxes:
if (b[2] > 10 and b[3] > 15 and b[3] < 50):
if b[1] >= y - 7 and b[1] <= y + 7:
l.append(b)
if l in row_column:
return row_column
else:
row_column.append(l)
return row_column
#sort each row using X of each box to sort it column wise
def sortrows(rc):
new_rc = []
for row in rc:
r_new = sorted(row, key = lambda cell: cell[0])
new_rc.append(r_new)
return new_rc
row_column = []
for i in getsety(boundingBoxes):
row_column = sort_boxes(boundingBoxes, i, row_column)
row_column = [i for i in row_column if i != []]
#final np array with sorted boxes from top left to bottom right
row_column = sortrows(row_column)
I made this in Jupyter notebook and copy-pasted here, if any errors come up, let me know.
Thank you everyone for answers
This is function, which uses tesseract-ocr for layout detection. You can try with different RIL levels and PSM. For more details have a look here: https://github.com/sirfz/tesserocr
import os
import platform
from typing import List, Tuple
from tesserocr import PyTessBaseAPI, iterate_level, RIL
system = platform.system()
if system == 'Linux':
tessdata_folder_default = ''
elif system == 'Windows':
tessdata_folder_default = r'C:\Program Files (x86)\Tesseract-OCR\tessdata'
else:
raise NotImplementedError
# this tesseract specific env variable takes precedence for tessdata folder location selection
# especially important for windows, as we don't know if we're running 32 or 64bit tesseract
tessdata_folder = os.getenv('TESSDATA_PREFIX', tessdata_folder_default)
def get_layout_boxes(input_image, # PIL image object
level: RIL,
include_text: bool,
include_boxes: bool,
language: str,
psm: int,
tessdata_path='') -> List[Tuple]:
"""
Get image components coordinates. It will return also text if include_text is True.
:param input_image: input PIL image
:param level: page iterator level, please see "RIL" enum
:param include_text: if True return boxes texts
:param include_boxes: if True return boxes coordinates
:param language: language for OCR
:param psm: page segmentation mode, by default it is PSM.AUTO which is 3
:param tessdata_path: the path to the tessdata folder
:return: list of tuples: [((x1, y1, x2, y2), text)), ...]
"""
assert any((include_text, include_boxes)), (
'Both include_text and include_boxes can not be False.')
if not tessdata_path:
tessdata_path = tessdata_folder
try:
with PyTessBaseAPI(path=tessdata_path, lang=language) as api:
api.SetImage(input_image)
api.SetPageSegMode(psm)
api.Recognize()
page_iterator = api.GetIterator()
data = []
for pi in iterate_level(page_iterator, level):
bounding_box = pi.BoundingBox(level)
if bounding_box is not None:
text = pi.GetUTF8Text(level) if include_text else None
box = bounding_box if include_boxes else None
data.append((box, text))
return data
except RuntimeError:
print('Please specify correct path to tessdata.')

extracting rectangle from binary image using OpenCV

I am trying to crop a image using OpenCV and python. I am following a tutorial "https://medium.com/coinmonks/a-box-detection-algorithm-for-any-image-containing-boxes-756c15d7ed26"
my code so far:
import os
import cv2
import numpy as np
os.chdir("D:\\RTK\\")
# Read the image
img = cv2.imread('try.jpg', 0)
img1 = cv2.imread('try.jpg')
# Thresholding the image
(thresh, img_bin) = cv2.threshold(img, 128, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
# Invert the image
img_bin = 255-img_bin
#cv2.imwrite("Image_bin.jpg",img_bin)
# Defining a kernel length
kernel_length = np.array(img).shape[1]//80
# A verticle kernel of (1 X kernel_length), which will detect all the verticle lines from the image.
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length))
# A horizontal kernel of (kernel_length X 1), which will help to detect all the horizontal line from the image.
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
# A kernel of (3 X 3) ones.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img_ttt = cv2.dilate(img_bin, kernel, iterations =1)
#cv2.imwrite("Image_bin_dil.jpg",img_ttt)
# Morphological operation to detect vertical lines from an image
img_temp1 = cv2.erode(img_ttt, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
cv2.imwrite("verticle_lines.jpg",verticle_lines_img)
#minLineLength = 100
#maxLineGap = 10
#lines = cv2.HoughLinesP(verticle_lines_img,1,np.pi/180,100,minLineLength,maxLineGap)
#
#for line in lines:
# for x1,y1,x2,y2 in line:
# cv2.line(verticle_lines_img,(x1,y1),(x2,y2),(0,0,0),1)
#
#cv2.imwrite('Written_Back_Results.jpg',verticle_lines_img)
# Morphological operation to detect horizontal lines from an image
img_temp2 = cv2.erode(img_ttt, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines.jpg",horizontal_lines_img)
# Weighting parameters, this will decide the quantity of an image to be added to make a new image.
alpha = 0.5
beta = 1.0 - alpha
# This function helps to add two image with specific weight parameter to get a third image as summation of two image.
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
cv2.imwrite("img_final_bin.jpg",img_final_bin)
# Find contours for image, which will detect all the boxes
im2, contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
# Sort all the contours by top to bottom.
(contours, boundingBoxes) = sort_contours(contours, method="left-to-right")
idx = 0
for c in contours:
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
idx += 1
new_img = img1[y:y+h, x:x+w]
per_h = (new_img.shape[0]/img1.shape[0])*100.0
per_w = (new_img.shape[1]/img1.shape[1])*100.0
if (per_h > 80 and per_w > 80) and (per_h < 100 and per_w < 100):
cv2.imwrite(str(idx) + '.jpg', new_img)
Now I want to extract the rectangle marked in red color, but problem is that
cv2.findContours is not finding the rectangle marked in red color.
Kindly help me to extract rectangle in marked red color in image 1 from the Image 2.
IMAGE 1
IMAGE 2

Categories