How to crop the biggest object in image with python opencv? - python

I want to crop the biggest object in the image (Characters). This code only works if there is no line (shown in the first image). But I need to ignore the line and make the image of the second image. Only crop the biggest object image.
import cv2
x1, y1, w1, h1 = (0,0,0,0)
points = 0
# load image
img = cv2.imread('Image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY)
# find where the signature is and make a cropped region
points = np.argwhere(thresh_gray==0) # find where the black pixels are
points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices
x, y, w, h = cv2.boundingRect(points) # create a rectangle around those points
crop = img[y:y+h, x:x+w]
cv2.imshow('save.jpg', crop)
cv2.waitkey(0)
Input
Output:

You can use function findContours to do this.
For example, like this:
#!/usr/bin/env python
import cv2
import numpy as np
# load image
img = cv2.imread('Image.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature (INVERTED)
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_LIST, \
cv2.CHAIN_APPROX_SIMPLE)
# Find object with the biggest bounding box
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi=img[y:y+h,x:x+w]
cv2.imwrite('Image_crop.jpg', roi)
cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2)
cv2.imwrite('Image_cont.jpg', img)
Note that I used THRESH_BINARY_INV instead of THRESH_BINARY.
Image_cont.jpg:
Image_crop.jpg:
You can also use this with skewed rectangles as #Jello pointed out. Unlike simpler solution above, this will correctly filter out diagonal lines.
For example:
#!/usr/bin/env python
import cv2
import numpy as np
# load image
img = cv2.imread('Image2.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature (INVERTED)
retval, thresh_gray = cv2.threshold(gray, 100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_LIST, \
cv2.CHAIN_APPROX_SIMPLE)
def crop_minAreaRect(img, rect):
# Source: https://stackoverflow.com/questions/37177811/
# rotate img
angle = rect[2]
rows,cols = img.shape[0], img.shape[1]
matrix = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
img_rot = cv2.warpAffine(img,matrix,(cols,rows))
# rotate bounding box
rect0 = (rect[0], rect[1], 0.0)
box = cv2.boxPoints(rect)
pts = np.int0(cv2.transform(np.array([box]), matrix))[0]
pts[pts < 0] = 0
# crop and return
return img_rot[pts[1][1]:pts[0][1], pts[1][0]:pts[2][0]]
# Find object with the biggest bounding box
mx_rect = (0,0,0,0) # biggest skewed bounding box
mx_area = 0
for cont in contours:
arect = cv2.minAreaRect(cont)
area = arect[1][0]*arect[1][1]
if area > mx_area:
mx_rect, mx_area = arect, area
# Output to files
roi = crop_minAreaRect(img, mx_rect)
cv2.imwrite('Image_crop.jpg', roi)
box = cv2.boxPoints(mx_rect)
box = np.int0(box)
cv2.drawContours(img,[box],0,(200,0,0),2)
cv2.imwrite('Image_cont.jpg', img)
Image2.png (the input image):
Image_cont.jpg:
Image_crop.jpg:
If you use opencv-python 4.x, change image, contours, hierarchy to just contours, hierarchy.

Related

How to rotate an image to align the text for extraction?

I am using pytessearct to extract the text from images. But it doesn't work on images which are inclined. Consider the image given below:
Here is the code to extract text, which is working fine on images which are not inclined.
img = cv2.imread(<path_to_image>)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
def findSignificantContours (img, edgeImg):
contours, heirarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
significant = []
tooSmall = edgeImg.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
significant.append([contour, area])
# Draw the contour on the original image
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.sort(key=lambda x: x[1])
#print ([x[1] for x in significant]);
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi = img[y:y+h,x:x+w]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
text = pytesseract.image_to_string(roi);
print(text); print("\n"); print(pytesseract.image_to_string(thresh));
print("\n")
return [x[0] for x in significant];
edgeImg_8u = np.asarray(thresh, np.uint8)
# Find contours
significant = findSignificantContours(img, edgeImg_8u)
mask = thresh.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
Tesseract can't extract the text from this image. Is there a way I can rotate it to align the text perfectly and then feed it to pytesseract? Please let me know if my question require any more clarity.
Here's a simple approach:
Obtain binary image. Load image, convert to grayscale,
Gaussian blur, then Otsu's threshold.
Find contours and sort for largest contour. We find contours then filter using contour area with cv2.contourArea() to isolate the rectangular contour.
Perform perspective transform. Next we perform contour approximation with cv2.contourArea() to obtain the rectangular contour. Finally we utilize imutils.perspective.four_point_transform to actually obtain the bird's eye view of the image.
Binary image
Result
To actually extract the text, take a look at
Use pytesseract OCR to recognize text from an image
Cleaning image for OCR
Detect text area in an image using python and opencv
Code
from imutils.perspective import four_point_transform
import cv2
import numpy
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
for c in cnts:
# Perform contour approximation
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
displayCnt = approx
break
# Obtain birds' eye view of image
warped = four_point_transform(image, displayCnt.reshape(4, 2))
cv2.imshow("thresh", thresh)
cv2.imshow("warped", warped)
cv2.waitKey()
To Solve this problem you can also use minAreaRect api in opencv which will give you a minimum area rotated rectangle with an angle of rotation. You can then get the rotation matrix and apply warpAffine for the image to straighten it. I have also attached a colab notebook which you can play around on.
Colab notebook : https://colab.research.google.com/drive/1SKxrWJBOHhGjEgbR2ALKxl-dD1sXIf4h?usp=sharing
import cv2
from google.colab.patches import cv2_imshow
import numpy as np
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
img = cv2.imread("/content/sxJzw.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = np.zeros((img.shape[0], img.shape[1]))
blur = cv2.GaussianBlur(gray, (5,5),0)
ret, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
largest_countour = max(contours, key = cv2.contourArea)
binary_mask = cv2.drawContours(mask, [largest_countour], 0, 1, -1)
new_img = img * np.dstack((binary_mask, binary_mask, binary_mask))
minRect = cv2.minAreaRect(largest_countour)
rotate_angle = minRect[-1] if minRect[-1] < 0 else -minRect[-1]
new_img = rotate_image(new_img, rotate_angle)
cv2_imshow(new_img)

Is there any way to crop an image inside a box?

I want to crop the image only inside the box or rectangle. I tried so many approaches but nothing worked.
import cv2
import numpy as np
img = cv2.imread("C:/Users/hp/Desktop/segmentation/add.jpeg", 0);
h, w = img.shape[:2]
# print(img.shape)
kernel = np.ones((3,3),np.uint8)
img2 = img.copy()
img2 = cv2.medianBlur(img2,5)
img2 = cv2.adaptiveThreshold(img2,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
img2 = 255 - img2
img2 = cv2.dilate(img2, kernel)
img2 = cv2.medianBlur(img2, 9)
img2 = cv2.medianBlur(img2, 9)
cv2.imshow('anything', img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
position = np.where(img2 !=0)
x0 = position[0].min()
x1 = position[0].max()
y0 = position[1].min()
y1 = position[1].max()
print(x0,x1,y0,y1)
result = img[x0:x1,y0:y1]
cv2.imshow('anything', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Output should be the image inside the sqaure.
You can use contour detection for this. If your image has basically only a hand drawn rectangle in it, I think it's good enough to assume it's the largest closed contour in the image. From that contour, we can figure out a polygon/quadrilateral approximation and then finally get an approximate rectangle. I'll define some utilities at the beginning which I generally use to make my time easier when messing around with images:
def load_image(filename):
return cv2.imread(filename)
def bnw(image):
return cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
def col(image):
return cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
def fixrgb(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def show_image(image, figsize=(7,7), cmap=None):
cmap = cmap if len(image.shape)==3 else 'gray'
plt.figure(figsize=figsize)
plt.imshow(image, cmap=cmap)
plt.show()
def AdaptiveThresh(gray):
blur = cv2.medianBlur(gray, 5)
adapt_type = cv2.ADAPTIVE_THRESH_GAUSSIAN_C
thresh_type = cv2.THRESH_BINARY_INV
return cv2.adaptiveThreshold(blur, 255, adapt_type, thresh_type, 11, 2)
def get_rect(pts):
xmin = pts[:,0,1].min()
ymin = pts[:,0,0].min()
xmax = pts[:,0,1].max()
ymax = pts[:,0,0].max()
return (ymin,xmin), (ymax,xmax)
Let's load the image and convert it to grayscale:
image_name = 'test.jpg'
image_original = fixrgb(load_image(image_name))
image_gray = 255-bnw(image_original)
show_image(image_gray)
Use some morph ops to enhance the image:
kernel = np.ones((3,3),np.uint8)
d = 255-cv2.dilate(image_gray,kernel,iterations = 1)
show_image(d)
Find the edges and enhance/denoise:
e = AdaptiveThresh(d)
show_image(e)
m = cv2.dilate(e,kernel,iterations = 1)
m = cv2.medianBlur(m,11)
m = cv2.dilate(m,kernel,iterations = 1)
show_image(m)
Contour detection:
contours, hierarchy = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
total_area = np.prod(image_gray.shape)
max_area = 0
for cnt in contours:
# Simplify contour
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.03 * perimeter, True)
area = cv2.contourArea(approx)
# Shape is recrangular, so 4 points approximately and it's convex
if (len(approx) == 4 and cv2.isContourConvex(approx) and max_area<area<total_area):
max_area = cv2.contourArea(approx)
quad_polygon = approx
img1 = image_original.copy()
img2 = image_original.copy()
cv2.polylines(img1,[quad_polygon],True,(0,255,0),10)
show_image(img1)
tl, br = get_rect(quad_polygon)
cv2.rectangle(img2, tl, br, (0,255,0), 10)
show_image(img2)
So you can see the approximate polygon and the corresponding rectangle, using which you can get your crop. I suggest you play around with median blur and morphological ops like erosion, dilation, opening, closing etc and see which set of operations suits your images the best; I can't really say what's good from just one image. You can crop using the top left and bottom right coordinates:
show_image(image_original[tl[1]:br[1],tl[0]:br[0],:])
Draw the square with a different color (e.g red) so it can be distinguishable from other writing and background. Then threshold it so you get a black and white image: the red line will be white in this image. Get the coordinates of white pixels: from this set, select only the two pairs (minX, minY)(maxX,maxY). They are the top-left and bottom-right points of the box (remember that in an image the 0,0 point is on the top left of the image) and you can use them to crop the image.

Find contours based on edges

I want to detect the contours of an equipment label. Although the code runs correctly, it never quite detects the contours of the label.
Original Image
Using this code:
import numpy as np
import cv2
import imutils #resizeimage
import pytesseract # convert img to string
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Read the image file
image = cv2.imread('Car Images/5.JPG')
# Resize the image - change width to 500
image = imutils.resize(image, width=500)
# Display the original image
cv2.imshow("Original Image", image)
cv2.waitKey(0)
# RGB to Gray scale conversion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("1 - Grayscale Conversion", gray)
cv2.waitKey(0)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("2 - Bilateral Filter", gray)
cv2.waitKey(0)
# Find Edges of the grayscale image
edged = cv2.Canny(gray, 170, 200)
cv2.imshow("3 - Canny Edges", edged)
cv2.waitKey(0)
# Find contours based on Edges
cnts, new = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Create copy of original image to draw all contours
img1 = image.copy()
cv2.drawContours(img1, cnts, -1, (0,255,0), 3)
cv2.imshow("4- All Contours", img1)
cv2.waitKey(0)
#sort contours based on their area keeping minimum required area as '30' (anything smaller than this will not be considered)
cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None #we currently have no Number plate contour
# Top 30 Contours
img2 = image.copy()
cv2.drawContours(img2, cnts, -1, (0,255,0), 3)
cv2.imshow("5- Top 30 Contours", img2)
cv2.waitKey(0)
# loop over our contours to find the best possible approximate contour of number plate
count = 0
idx =7
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# print ("approx = ",approx)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx #This is our approx Number Plate Contour
# Crop those contours and store it in Cropped Images folder
x, y, w, h = cv2.boundingRect(c) #This will find out co-ord for plate
new_img = gray[y:y + h, x:x + w] #Create new image
cv2.imwrite('Cropped Images-Text/' + str(idx) + '.png', new_img) #Store new image
idx+=1
break
# Drawing the selected contour on the original image
#print(NumberPlateCnt)
cv2.drawContours(image, [NumberPlateCnt], -1, (0,255,0), 3)
cv2.imshow("Final Image With Number Plate Detected", image)
cv2.waitKey(0)
Cropped_img_loc = 'Cropped Images-Text/7.png'
cv2.imshow("Cropped Image ", cv2.imread(Cropped_img_loc))
# Use tesseract to covert image into string
text = pytesseract.image_to_string(Cropped_img_loc, lang='eng')
print("Equipment Number is :", text)
cv2.waitKey(0) #Wait for user input before closing the images displayed
Displayed output
Is there a better way to narrow down the contour to the equipment label?
Here is the code for your reference on github:
https://github.com/AjayAndData/Licence-plate-detection-and-recognition---using-openCV-only/blob/master/Car%20Number%20Plate%20Detection.py
I think this code may help you
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('C:/Users/DELL/Desktop/download (5).png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,60,0.001,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),0,255,-1)
coord = np.where(np.all(img == (255, 0, 0),axis=-1))
plt.imshow(img)
plt.show()

Rotate image in python and remove the background

Is there a way to rotate these kind of images and remove the background whitespace or any background and get and image like this
I tried to remove the background if the image doesn't have any rotation i am able to remove the background whitespace by using this script but if the image got any rotation it doesn't remove any space
i followed this How to crop or remove white background from an image
import cv2
import numpy as np
img = cv2.imread('cheque_img\rotate.PNG')
## (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
## (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
## (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
## (4) Crop and save it
x,y,w,h = cv2.boundingRect(cnt)
dst = img[y:y+h, x:x+w]
cv2.imwrite("001.png", dst)
Please try it with any scanned image and rotate it and try to get rid of the background white space and rotate it to its original dimension for doing computer vision operation
Using cv2.boundingRect will give you the minimum non-rotating rectangle that fit the contour. cv2.boundingRect result :
Instead of cv2.boundingRect, you will need to use cv2.minAreaRect to obtain a rectangle that fit the contour. cv2.minAreaRect result :
After the obtaining the rotated rect information, you will need to find the affine transform matrix between the model points and the current points. Current points are the points found in rotated rect and the model point is the point of the original object. In this case an object with the initial location (0,0) and the width and height of the rotated rect.
Affine might be an overkill here but for generality affine transform is used.
Detailed explanation is located in the code.
import cv2
import numpy as np
img = cv2.imread('Bcm3h.png')
## (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
## (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
## (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
## This will extract the rotated rect from the contour
rot_rect = cv2.minAreaRect(cnt)
# Extract useful data
cx,cy = (rot_rect[0][0], rot_rect[0][1]) # rect center
sx,sy = (rot_rect[1][0], rot_rect[1][1]) # rect size
angle = rot_rect[2] # rect angle
# Set model points : The original shape
model_pts = np.array([[0,sy],[0,0],[sx,0],[sx,sy]]).astype('int')
# Set detected points : Points on the image
current_pts = cv2.boxPoints(rot_rect).astype('int')
# sort the points to ensure match between model points and current points
ind_model = np.lexsort((model_pts[:,1],model_pts[:,0]))
ind_current = np.lexsort((current_pts[:,1],current_pts[:,0]))
model_pts = np.array([model_pts[i] for i in ind_model])
current_pts = np.array([current_pts[i] for i in ind_current])
# Estimate the transform betwee points
M = cv2.estimateRigidTransform(current_pts,model_pts,True)
# Wrap the image
wrap_gray = cv2.warpAffine(gray, M, (int(sx),int(sy)))
# for display
cv2.imshow("dst",wrap_gray)
cv2.waitKey(0)
#cv2.imwrite("001.png", dst)
Result :
Considering you don't know the angle of the rotation and can be different for each scanned image, you need to find it first.
Combine what you already did with accepted answer for this question.
For the image you provided:
Angle is -25.953375702364195
If the background is guaranteed to be saturated white (value 255) and the document mostly unsaturated values, binarize below the threshold 255 and fit a bounding rectangle.
I had some problems running the code presented above, so here is my slightly modified version:
import cv2
import numpy as np
def crop_minAreaRect(img, rect):
# rotate img
angle = rect[2]
print("angle: " + str(angle))
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
img_rot = cv2.warpAffine(img,M,(cols,rows))
# rotate bounding box
rect0 = (rect[0], rect[1], angle)
box = cv2.boxPoints(rect0)
pts = np.int0(cv2.transform(np.array([box]), M))[0]
pts[pts < 0] = 0
# crop
img_crop = img_rot[pts[1][1]:pts[0][1],
pts[1][0]:pts[2][0]]
return img_crop
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
img = cv2.imread('rotatedCheque.png')
cv2.imshow("orig", img)
img_copy = img.copy()
# (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
# (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
# (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
# This will extract the rotated rect from the contour
rot_rect = cv2.minAreaRect(cnt)
cropped_img = crop_minAreaRect(img, rot_rect)
width, height = img.shape[0], img.shape[1]
if height > width:
cropped_img = cv2.rotate(cropped_img, cv2.ROTATE_90_CLOCKWISE)
resized_img = ResizeWithAspectRatio(cropped_img, width=800)
cv2.imshow("cropped", resized_img)
cv2.waitKey(0)

Cropping greyscale images within larger images using OpenCV and python

Hi I'm new to python and opencv. I've got this image:
I'm trying to crop the greyscale images from the picture. At the moment, the code finds the biggest bounding box i.e. the top right image and then crops it. What I want to do is find all the greyscale images even if there are more than 4 in the picture and crop all of them. I'm thinking of using a loop to do it but I don't want to set a loop where it finds the largest bounding boxes 4 times and then stops as other images that I'm processing would have more than 4 images in it. Any help would be greatly appreciated!
import cv2
import numpy as np
# load image
img = cv2.imread('multi.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature (INVERTED)
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_LIST, \
cv2.CHAIN_APPROX_SIMPLE)
# Find object with the biggest bounding box
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Find object with the biggest bounding box
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi=img[y:y+h,x:x+w]
cv2.imwrite('Image_crop.jpg', roi)
cv2.rectangle(img,(x,y),(x+w,y+h),(200,0,0),2)
cv2.imwrite('Image_cont.jpg', img)
I have elaborated my comment.
In the code provided by you, the contours are found using cv2.RETR_LIST which every possible contour in the image including those present within contours. I have used cv2.RETR_EXTERNAL which ignores those contours within other contours.
image = cv2.imread(r'C:\Users\Desktop\g.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, \
type=cv2.THRESH_BINARY_INV)
cv2.imshow('thresh_gray.png', thresh_gray)
image, contours, hierarchy = cv2.findContours(thresh_gray,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for i, c in enumerate(contours):
if cv2.contourArea(c) > 10000:
x, y, w, h = cv2.boundingRect(c)
roi = image[y :y + h, x : x + w ]
cv2.imshow('Region_{}.jpg'.format(i), roi)
cv2.waitKey(0)
cv2.destroyAllWindows()

Categories