contour for ID card - python

i'm trying to detect contour for ID card but it never work;
I tried with the four_point_transform, boundingrect,boundries,active_contours,hough transform and also the same result , the contour with be used to scan just the id card .
the id looks like that : here
This is how the code looks like:
from trans import four_point_transform
from skimage.filters import threshold_local
import numpy as np
import cv2
import imutils
def edgeDetection(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(gray, 200,150 )
return edged
def detectrectarrondi(image,edged):
orig = image.copy()
gray = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
edged = cv2.Canny(gray, 50, 40)
orig_edged = edged.copy()
(_,contours, _) = cv2.findContours(orig_edged, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
for contour in contours:
c = max(contours, key = cv2.contourArea)
(x,y,w,h) = cv2.boundingRect(c)
screen = cv2.rectangle(image, (x,y), (x+w,y+h), (0,255,0), 2)
return screen
def scan(screen,image):
ratio = image.shape[0] / 500.0
warped = four_point_transform(image, screen.reshape(4, 2) * ratio)
warped= cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)
T = threshold_local(warped, 11, offset = 10, method = "gaussian")
warped = (warped > T).astype("uint8") * 255
return warped

As I can't write a comment because I don't have 50 reputations, I will give you some steps to follow here:
1/ Convert your image to gray scale using cvtColor.
2/ Apply GaussianBlur to reduce noise.
3/ Apply Canny edge detector, you need to play with lower and higher threshold values to get the best result
4/ This step is not needed but can be helpful, apply a morphological operation to close incomplete contours using MORPH_CLOSE parameter.
5/ Find your contours using findContours
6/ Loop through the found contours and draw the bonding rectangle that have the biggest area.
I hope this help you, tell me if you want to see some code.
EDIT:
Imgproc.cvtColor(origMat, mGray, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 5);
Imgproc.Canny(mGray, mGray, 30, 80, 3, false);
Mat kernell = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(9,9));
Imgproc.morphologyEx(mGray, mGray, Imgproc.MORPH_CLOSE, kernell);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3)));
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(mGray, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint2f approxCurve = new MatOfPoint2f();
double largest_area=0;
int largest_contour_index=0;
Rect rect = new Rect();
for (int idx = 0; idx < contours.size() ; idx++) {
double a = Imgproc.contourArea(contours.get(idx)); //Find the area of contour
if (a > largest_area) {
largest_area = a;
largest_contour_index = idx;
rect = Imgproc.boundingRect(contours.get(idx));
}
}
Imgproc.rectangle(origMat, rect.tl(), rect.br(), new Scalar(0, 255, 0));
return origMat;
You can have a look at this good answer to set Canny theshold values automatically using the median value of your image
https://stackoverflow.com/a/41895229

Related

Finding the coordinates of the edges on a rectangluar object

I am trying to build a document scanner application from scratch using OpenCV and python. Till now i have done the following:
re-scaled the image
preprocessed the image, that is converted to greyscale, applied the Gaussian blur, applied adaptive threshold and finally used canny edge detection.
I then found the largest contour and drew it
Detected the edges of the contour and drew them
step 4 is where the problem is, I'm getting two of the points in the correct location however two seem to be slightly offset.
I can't seem to understand what I'm doing wrong, additionally could this problem potentially be due to the way i have preprocessed the image?
import cv2
import numpy as np
# Function to resize the image
def Re_scaleImg(img):
scale_percent = 50
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize the image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
return resized
# Function to process the image
def process(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
edged = cv2.Canny(thresh, 75, 200)
#cv2.imshow("blur", blur)
#cv2.imshow("edged", thresh)
return edged
# Function to find the areas of contours
def find_contourArea(contours):
areas = []
for cnt in contours:
cont_area = cv2.contourArea(cnt)
areas.append(cont_area)
return areas
image = cv2.imread("receipt.jpeg")
resized = Re_scaleImg(image)
processed_img = process(resized)
# finding the contours
contours, hierarchy = cv2.findContours(processed_img.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
resized_copy1 = resized.copy()
# sorting the contours
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)
largest_contour = sorted_contours[0]
epsilon = 0.01*cv2.arcLength(largest_contour, True)
approximation = cv2.approxPolyDP(largest_contour, epsilon, True)
cv2.drawContours(resized_copy1, [approximation], -1, (0, 255, 0), 3)
# Obtaining the corners of the rectangle
rot_rect = cv2.minAreaRect(largest_contour)
box = cv2.boxPoints(rot_rect)
box = np.int0(box)
for p in box:
pt = (p[0], p[1])
cv2.circle(resized_copy1, pt, 10, (255, 0, 0), -1)
print(pt)
cv2.imshow("contours", resized_copy1)
cv2.waitKey(0)
Both the images are shown below:
the original image:
output image:
Instead of finding the full contour are you could try to find the lines on the edge of the document instead.
With the Hough Line Transform you can find the four most prominent lines (with the most votes).
From these lines you can then calculate the intersection points and use the four points closes to the center of the full shape as corner points.

How to rotate an image to align the text for extraction?

I am using pytessearct to extract the text from images. But it doesn't work on images which are inclined. Consider the image given below:
Here is the code to extract text, which is working fine on images which are not inclined.
img = cv2.imread(<path_to_image>)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
def findSignificantContours (img, edgeImg):
contours, heirarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
significant = []
tooSmall = edgeImg.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
significant.append([contour, area])
# Draw the contour on the original image
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.sort(key=lambda x: x[1])
#print ([x[1] for x in significant]);
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi = img[y:y+h,x:x+w]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
text = pytesseract.image_to_string(roi);
print(text); print("\n"); print(pytesseract.image_to_string(thresh));
print("\n")
return [x[0] for x in significant];
edgeImg_8u = np.asarray(thresh, np.uint8)
# Find contours
significant = findSignificantContours(img, edgeImg_8u)
mask = thresh.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
Tesseract can't extract the text from this image. Is there a way I can rotate it to align the text perfectly and then feed it to pytesseract? Please let me know if my question require any more clarity.
Here's a simple approach:
Obtain binary image. Load image, convert to grayscale,
Gaussian blur, then Otsu's threshold.
Find contours and sort for largest contour. We find contours then filter using contour area with cv2.contourArea() to isolate the rectangular contour.
Perform perspective transform. Next we perform contour approximation with cv2.contourArea() to obtain the rectangular contour. Finally we utilize imutils.perspective.four_point_transform to actually obtain the bird's eye view of the image.
Binary image
Result
To actually extract the text, take a look at
Use pytesseract OCR to recognize text from an image
Cleaning image for OCR
Detect text area in an image using python and opencv
Code
from imutils.perspective import four_point_transform
import cv2
import numpy
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
for c in cnts:
# Perform contour approximation
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
displayCnt = approx
break
# Obtain birds' eye view of image
warped = four_point_transform(image, displayCnt.reshape(4, 2))
cv2.imshow("thresh", thresh)
cv2.imshow("warped", warped)
cv2.waitKey()
To Solve this problem you can also use minAreaRect api in opencv which will give you a minimum area rotated rectangle with an angle of rotation. You can then get the rotation matrix and apply warpAffine for the image to straighten it. I have also attached a colab notebook which you can play around on.
Colab notebook : https://colab.research.google.com/drive/1SKxrWJBOHhGjEgbR2ALKxl-dD1sXIf4h?usp=sharing
import cv2
from google.colab.patches import cv2_imshow
import numpy as np
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
img = cv2.imread("/content/sxJzw.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = np.zeros((img.shape[0], img.shape[1]))
blur = cv2.GaussianBlur(gray, (5,5),0)
ret, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
largest_countour = max(contours, key = cv2.contourArea)
binary_mask = cv2.drawContours(mask, [largest_countour], 0, 1, -1)
new_img = img * np.dstack((binary_mask, binary_mask, binary_mask))
minRect = cv2.minAreaRect(largest_countour)
rotate_angle = minRect[-1] if minRect[-1] < 0 else -minRect[-1]
new_img = rotate_image(new_img, rotate_angle)
cv2_imshow(new_img)

Python number recognition (on colored screen)

I searched for image recognition using python. It seems there is no tutorial about Extracting Numbers from colored background so I followed THIS TUTORIAL
import cv2
import matplotlib.pyplot as plt
def detect_edge(image):
''' function Detecting Edges '''
image_with_edges = cv2.Canny(image , 100, 200)
images = [image , image_with_edges]
location = [121, 122]
for loc, img in zip(location, images):
plt.subplot(loc)
plt.imshow(img, cmap='gray')
plt.savefig('edge.png')
plt.show()
image = cv2.imread('myscreenshot.png', 0)
detect_edge(image)
This is my image:
This is the result:
Any solution to print out these numbers?
Here is some code for getting clean canny edges for this image.
import cv2
import numpy as np
# load image
img = cv2.imread("numbers.png");
# change to hue colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV);
h,s,v = cv2.split(hsv);
# use clahe to improve contrast
# (the contrast is pretty good already, so not much change, but good habit to have here)
clahe = cv2.createCLAHE(clipLimit = 10)
contrast = clahe.apply(v);
# use canny
canny = cv2.Canny(contrast, 20, 110);
# show
cv2.imshow('i', img);
cv2.imshow('v', v);
cv2.imshow('c', contrast);
cv2.imshow("canny", canny);
cv2.waitKey(0);
# save
cv2.imwrite("edges.png", canny);
Without using any OCR like pytesseract or something, I don't see an obvious way to be able to consistently turn this image into "text" numbers. I'll leave that for someone else who might know how to solve that without any pattern recognition stuff because I don't even know where to begin without that. If you're willing to forgo that restriction then pytessaract should have no problem with this; possibly even without doing processing like this.
Ok, I filled in the numbers for the image. OpenCV's findContours' hierarchy wasn't cooperating for some reason so I had to manually do it which makes this code pretty janky. Honestly, if I were to try this again from scratch, I'd try to find colors that contribute to a small number of total pixels and threshold on each and combine the masks.
import cv2
import numpy as np
# check if small box is in big box
def contained(big, small):
# big corners
x,y,w,h = big;
big_tl = [x, y];
big_br = [x+w, y+h];
# small corners
x,y,w,h = small;
small_tl = [x, y];
small_br = [x+w, y+h];
# check
if small_tl[0] > big_tl[0] and small_br[0] < big_br[0]:
if small_tl[1] > big_tl[1] and small_br[1] < big_br[1]:
return True;
return False;
# load image
img = cv2.imread("numbers.png");
# change to hue colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV);
h,s,v = cv2.split(hsv);
# use clahe to improve contrast
# (the contrast is pretty good already, so not much change, but good habit to have here)
clahe = cv2.createCLAHE(clipLimit = 10)
contrast = clahe.apply(v);
# rescale
scale = 2.0;
h, w = img.shape[:2];
h = int(h * scale);
w = int(w * scale);
contrast = cv2.resize(contrast, (w,h), cv2.INTER_LINEAR);
img = cv2.resize(img, (w,h), cv2.INTER_LINEAR);
# use canny
canny = cv2.Canny(contrast, 10, 60);
# show
cv2.imshow('i', img);
cv2.imshow('v', v);
cv2.imshow('c', contrast);
cv2.imshow("canny", canny);
cv2.waitKey(0);
# try to fill in contours
# contours
_, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# filter contours by size
# filter out noisy bits and the big grid boxes
filtered = [];
for contour in contours:
perimeter = cv2.arcLength(contour, True);
if 50 < perimeter and perimeter < 750:
filtered.append(contour);
# draw contours again
# create a mask of the contoured image
mask = np.zeros_like(contrast);
mask = cv2.drawContours(mask, filtered, -1, 255, -1);
# close to get rid of annoying little gaps
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask,kernel,iterations = 1);
mask = cv2.erode(mask,kernel, iterations = 1);
# contours
_, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# alright, hierarchy is being stupid, plan B
# SUUUUUPEEERRR JAAAANK
outer_cntrs = [a for a in range(len(contours))];
children = [];
for a in range(len(contours)):
if a in outer_cntrs:
# get current box
big_box = cv2.boundingRect(contours[a]);
# check against all other boxes
for b in range(0, len(contours)):
if b in outer_cntrs:
small_box = cv2.boundingRect(contours[b]);
# remove any children
if contained(big_box, small_box):
outer_cntrs.remove(b);
children.append(contours[b]);
# # select by hierarchy
top_cntrs = [];
for a in range(len(contours)):
if a in outer_cntrs:
top_cntrs.append(contours[a]);
# create a mask of the contoured image
mask = np.zeros_like(contrast);
mask = cv2.drawContours(mask, top_cntrs, -1, 255, -1);
mask = cv2.drawContours(mask, children, -1, 255, -1);
# close
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask,kernel,iterations = 1);
mask = cv2.erode(mask,kernel, iterations = 1);
# do contours agains because opencv is being super difficult
# honestly, at this point, a fill method would've been better
# contours
_, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE);
# fill in
for con in contours:
cv2.fillPoly(mask, pts = [con], color=(255));
for con in children:
cv2.fillPoly(mask, pts = [con], color=(0));
# resize back down
h, w = mask.shape;
h = int(h / scale);
w = int(w / scale);
mask = cv2.resize(mask, (w,h));
# show
cv2.imshow("mask", mask);
cv2.waitKey(0);
# save
cv2.imwrite("filled.png", mask);
You can find the digits in three-steps
Applying Adaptive-threshold
Applying erosion
Read using pytesseract
Adaptive-threshold result:
Here we see 9 and 0 is different from rest of the digits. We need to remove the boundaries of the 9.
Erosion result:
Pytesseract result:
8 | 1
5 9
4 #
3 | 3
6 | 1
There are multiple page-segmentation-modes are available for pytesseract
If you want to remove | from the output you can use re.sub
text = re.sub('[^A-Za-z0-9]+', ',', text)
Result will be:
8
1
5
9
4
3
3
6
1
Code:
import cv2
import pytesseract
import re
import numpy as np
image = cv2.imread("7UUGYHw.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 13, 2)
erode = cv2.erode(thresh, np.array((7, 7)), iterations=1)
text = pytesseract.image_to_string(erode, config="--psm 6")
text = re.sub('[^A-Za-z0-9]+', '\n', text)
print(text)

Can I distinguish between a square and a lozenge with contour in OpenCV

I want to detect form with opencv and python
so i chose Contour Features but now i have problem how can i distinguish between a square and a lozenge using opencv and python
if there is other method can you tell me please my image it like this :enter image description here
i add my code
#-*- coding: utf-8 -*-
import cv2
import numpy as np
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
from scipy import ndimage
import math
import matplotlib.pyplot as plt
from skimage import io, morphology, img_as_bool, segmentation
global limit
# cv2.threshold(src, thresh, maxval, type[, dst])
import math
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
return math.acos(inner_product/(len1*len2))
def calculate(pt, ls):
i = 2
for x in ls:
pt2 = (x, i)
i = i+1
ang = angle(pt, pt2)*180/math.pi
ang = ang * (-1)
print (ang)
Image = cv2.imread("114.png")
# Extraction of Blue channel
b = Image[:,:,0]
# Callback Function for Trackbar (but do not any work)
def nothing(*arg):
pass
# Generate trackbar Window Name
TrackbarName = "Trackbar"
# Make Window and Trackbar
cv2.namedWindow("window", cv2.WINDOW_NORMAL)
cv2.createTrackbar(TrackbarName, "window", 0, 250, nothing)
img_threshed = np.zeros(b.shape, np.uint8)
ret,img_threshed = cv2.threshold(b,168,255,cv2.THRESH_BINARY)
cv2.imshow("window55", img_threshed)
# Expanding borders of the objects
kernel = np.ones((9, 9),np.uint8)
img_dilated = cv2.dilate(img_threshed, kernel)
cv2.namedWindow("Dilated Blue Channel", cv2.WINDOW_NORMAL)
cv2.imshow("Dilated Blue Channel", img_dilated)
# Retrieving contours by subtraction base objects from the expanded objects
img_contours = img_dilated - img_threshed
cv2.namedWindow("Contours", cv2.WINDOW_NORMAL)
cv2.imshow("Contours", img_contours)
median = cv2.medianBlur(img_contours,3)
cv2.imshow("median img_threshed", median)
#_, contours0, hierarchy = cv2.findContours( median, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cnts = [cv2.approxPolyDP(cnt, 2, True) for cnt in contours0]
gray = cv2.imread('114.png')
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv2.Canny(blurred, 90, 150)
cnts = cv2.findContours(img_contours, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(img_contours, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(c) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(Image, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
#cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
#cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2)
#calculate(cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2),cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2))
cv2.imwrite("Final_Image.jpg", Image)
# show the output image
cv2.imshow("Image", Image)
cv2.waitKey(0)
cv2.destroyAllWindows()
As mentioned in the comments' section, if you want to distinguish between an apparent square from a lozenge the only property that is distinct are the diagonals.
Using python in OpenCV I coded the following to obtain 1 diagonal for the square and the lozenge:
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(cnt[0]) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(im1, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
cv2.line(im1,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
cv2.imwrite("Final_Image.jpg", im1)
This is what I get:
SQUARE:
LOZENGE:
Now since you have obtained the diagonal you have to compare it with a reference line to find the angle in order to determine whether it is a square or not.
For that first draw a reference line (I considered a horizontal line)
cv2.line(im1,(0,10),(im1.shape[1], 10), (255,255,0),2)
You will get :
SQUARE:
LOZENGE:
Now you just have to calculate the angle between these two lines (the diagonal and the reference line):
If the angle is 90 degree or 0 => Lozenge.
Otherwise => Square
How do you calculate angles between two lines?
See THIS POST

How to extract white region in an image

I have a sample image like this
I'm looking for a way to black out the noise from the image such that I end up with an image that just has black text on white background so that I may send it to tesseract.
I've tried morphing with
kernel = np.ones((4,4),np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
cv2.imshow("opening", opening)
but it doesn't seem to work.
I've also tried to find contours
img = cv2.cvtColor(rotated, cv2.COLOR_BGR2GRAY)
(cnts, _) = cv2.findContours(img, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
roi=rotated[y:y+h,x:x+w].copy()
cv2.imwrite("roi.png", roi)
With the above code, I get the following contours:
which leads to this image when cropped:
which is still not good enough. I want black text on white background, so that I can send it to tesseract OCR and have good success rate.
Is there anything else I can try?
Update
Here is an additional similar image. This one is a bit easier because it has a smooth rectangle in it
The following works for your given example, although it might need tweaking for a wider range of images.
import numpy as np
import cv2
image_src = cv2.imread("input.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(gray, 250,255,0)
image, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
largest_area = sorted(contours, key=cv2.contourArea)[-1]
mask = np.zeros(image_src.shape, np.uint8)
cv2.drawContours(mask, [largest_area], 0, (255,255,255,255), -1)
dst = cv2.bitwise_and(image_src, mask)
mask = 255 - mask
roi = cv2.add(dst, mask)
roi_gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(roi_gray, 250,255,0)
image, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_x = 0
max_y = 0
min_x = image_src.shape[1]
min_y = image_src.shape[0]
for c in contours:
if 150 < cv2.contourArea(c) < 100000:
x, y, w, h = cv2.boundingRect(c)
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x+w, max_x)
max_y = max(y+h, max_y)
roi = roi[min_y:max_y, min_x:max_x]
cv2.imwrite("roi.png", roi)
Giving you the following type of output images:
And...
The code works by first locating the largest contour area. From this a mask is created which is used to first select only the area inside, i.e. the text. The inverse of the mask is then added to the image to convert the area outside the mask to white.
Lastly contours are found again for this new image. Any contour areas outside a suitable size range are discarded (this is used to ignore any small noise areas), and a bounding rect is found for each. With each of these rectangles, an outer bounding rect is calculated for all of the remaining contours, and a crop is made using these values to give the final image.
Update - To get the remainder of the image, i.e. with the above area removed, the following could be used:
image_src = cv2.imread("input.png")
gray = cv2.cvtColor(image_src, cv2.COLOR_BGR2GRAY)
ret, gray = cv2.threshold(gray, 10, 255,0)
image, contours, hierarchy = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
largest_area = sorted(contours, key=cv2.contourArea)[-1]
mask = np.zeros(image_src.shape, np.uint8)
cv2.drawContours(mask, [largest_area], 0, (255,255,255,255), -1)
image_remainder = cv2.bitwise_and(image_src, 255 - mask)
cv2.imwrite("remainder.png", image_remainder)
I get this:
Result
Source Code:
if __name__ == '__main__':
SrcImg = cv2.imread('./Yahi9.png', cv2.CV_LOAD_IMAGE_GRAYSCALE)
_, BinImg = cv2.threshold(SrcImg, 80, 255, cv2.THRESH_OTSU)
Contours, Hierarchy = cv2.findContours(image=copy.deepcopy(SrcImg),
mode=cv2.cv.CV_RETR_EXTERNAL,
method=cv2.cv.CV_CHAIN_APPROX_NONE)
MaxContour, _ = getMaxContour(Contours)
Canvas = np.ones(SrcImg.shape, np.uint8)
cv2.drawContours(image=Canvas, contours=[MaxContour], contourIdx=0, color=(255), thickness=-1)
mask = (Canvas != 255)
RoiImg = copy.deepcopy(BinImg)
RoiImg[mask] = 255
RoiImg = cv2.morphologyEx(src=RoiImg, op=cv2.MORPH_CLOSE, kernel=np.ones((3,3)), iterations=4)
cv2.imshow('RoiImg', RoiImg)
cv2.waitKey(0)
Function:
def getMaxContour(contours):
MaxArea = 0
Location = 0
for idx in range(0, len(contours)):
Area = cv2.contourArea(contours[idx])
if Area > MaxArea:
MaxArea = Area
Location = idx
MaxContour = np.array(contours[Location])
return MaxContour, MaxArea
Ehh, it's python code.
It only works when the white region is the max contour.
Basic idea of this answer is to use border around text.
1) Erode horizontally with a very large kernel, say size of 100 px or 8 times size of single expected character, something like that. It should be done row-wise. The extreme ordinate will give y-location of boundaries around text.
2) Process vertically same way to get x-location of boundaries around text. Then use these locations to crop out image you want.
-- One benefit of this method is you will get every sentence/word segmented separately which, I presume, is good for an OCR.
Happy Coding :)
Edited in by Mark Setchell
Here is a demo of 1)
Here is a demo of 2)

Categories