Problem in cropping a bounding box out of an image [duplicate] - python

This question already has answers here:
Want to find contours -> ValueError: not enough values to unpack (expected 3, got 2), this appears [duplicate]
(2 answers)
Closed last year.
My objective is to crop a bounding box out of an image using opencv's cv2 library. I've found an interesting way of doing it from here, however I am getting an error like
>ValueError: not enough values to unpack (expected 3, got 2)
This is the complete traceback:
Traceback (most recent call last):
File "text_extraction.py", line 13, in <module>
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ValueError: not enough values to unpack (expected 3, got 2)
And this is my complete code:
import cv2
import numpy as np
import pytesseract
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe'
im = cv2.imread('detected-image-2.png')
grayimage = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(grayimage, 170, 255, cv2.THRESH_BINARY)
cv2.imshow('mask', mask)
image, contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 200:
continue
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
ext_left = tuple(contour[contour[:, :, 0].argmin()][0])
ext_right = tuple(contour[contour[:, :, 0].argmax()][0])
ext_top = tuple(contour[contour[:, :, 1].argmin()][0])
ext_bot = tuple(contour[contour[:, :, 1].argmax()][0])
roi_corners = np.array([box], dtype=np.int32)
cv2.polylines(im, roi_corners, 1, (255, 0, 0), 3)
cv2.imshow('image', im)
cv2.waitKey(0)
cropped_image = grayimage[ext_top[1]:ext_bot[1], ext_left[0]:ext_right[0]]
cv2.imwrite('crop.jpg', cropped_image)
What exactly is wrong in this code?
I am relatively new to OpenCV, please help me out.
EDIT: I even tried a way by referring to this answer however the problem still persists.
EDIT-2: upon solving the Value Error, the script was unable to save the cropped image(It was saving the image as it as). Based on statemachines code I changed the code to this:
im = cv2.imread('detected-image-2.png')
grayimage = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(grayimage, 170, 255, cv2.THRESH_BINARY)
cv2.imshow('mask', mask)
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
if cv2.contourArea(contour) < 200:
continue
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
X, Y, W, H = cv2.boundingRect(contour)
cropped_image = im[Y:Y + H, X:X + W]
print([X, Y, W, H])
plt.imshow(cropped_image)
cv2.imwrite('contour1.png', cropped_image)
However the output is still the same(cropping is not happening).

Solved it myself after figuring it out where I was making a mistake, in case if someone faces a similar issue, this piece of code would help.
def crop():
# reading image
image = cv2.imread("detected-image-1.png")
# converting to gray scale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# applying canny edge detection
edged = cv2.Canny(gray, 10, 250)
# finding contours
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
idx = 0
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
if w > 50 and h > 50:
idx += 1
new_img = image[y:y + h, x:x + w]
# cropping images
cv2.imwrite("cropped/" + str(idx) + '.png', new_img)
print('Objects Cropped Successfully!')

Related

How to detect White region in an image with Opencv & Python?

I'm trying to extract the coordinates of a big white region in an image as follows:
Here's the original image:
Using a small square kernel, I applied a closing operation to fill small holes and help identify larger structures in the image as follows:
import cv2
import numpy as np
import imutils
original = cv2.imread("Plates\\24.png")
original = cv2.resize(original, None, fx=3, fy=3, interpolation=cv2.INTER_CUBIC)
gray = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
# next, find regions in the image that are light
squareKern = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
light = cv2.morphologyEx(gray, cv2.MORPH_CLOSE, squareKern)
light = cv2.threshold(light, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
the resulting image is as follows:
Here's another example:
What I wish to be able to do is to detect the large white region in the plate as follows:
Keeping in mind that contours will not work well with many examples
With the one image you provided:
I came up with 2 approaches as to how this problem can be solved:
Approach 1
Contour Area Comparison
As you can see there are 3 large contours in the image; the top rectangle and the two rectangles below it, of which you want to detect as a whole.
So I used a threshold on your image, detected the contours of the thresholded image, and indexed the second largest contour and the third largest contour (the largest is the top rectangle which you want to ignore).
Here is the thresholded image:
I stacked the two contours together and detected the bounding box of the two contours:
import cv2
import numpy as np
img = cv2.imread("image.png")
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)
img_blur = cv2.GaussianBlur(thresh, (5, 5), 2)
img_canny = cv2.Canny(img_blur, 0, 0)
return img_canny
def get_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
r1, r2 = sorted(contours, key=cv2.contourArea)[-3:-1]
x, y, w, h = cv2.boundingRect(np.r_[r1, r2])
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
get_contours(img)
cv2.imshow("img_processed", img)
cv2.waitKey(0)
Output:
Approach 2
Threshold Masking
As the 2 bottom rectangles are whiter than the top rectangle of the plate, I used a threshold to mask out the top of the plate:
I used the canny edge detector on the mask shown above.
import cv2
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 163, 255, cv2.THRESH_BINARY)
img_canny = cv2.Canny(thresh, 0, 0)
img_dilate = cv2.dilate(img_canny, None, iterations=7)
return cv2.erode(img_dilate, None, iterations=7)
def get_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
x, y, w, h = cv2.boundingRect(max(contours, key=cv2.contourArea))
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
img = cv2.imread("egypt.png")
get_contours(img)
cv2.imshow("img_processed", img)
cv2.waitKey(0)
Output:
Of course, this method may not work properly if the top of the plate isn't brighter than the bottom.

Difficulty extracting characters using MSER in images which have borders around the text

I am trying to develop a OCR system. I am trying to use MSER in order to extract character from an image and then passing the characters into a CNN to recognize those characters. Here is my code for character extraction:
import cv2
import numpy as np
# create MSER object
mser = cv2.MSER_create()
# read the image
img = cv2.imread('textArea01.png')
# convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# store copy of the image
vis = img.copy()
# detect regions in the image
regions,_ = mser.detectRegions(gray)
# find convex hulls of the regions and draw them onto the original image
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
# create mask for the detected region
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))
for contour in hulls:
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)
This is working fine for most images, but for some images like this:
The outer border is also detected as a region and the contour is drawn in the mask such that all area inside the border is detected as text region. So, the contours inside have no effect. How do I prevent this so that only the text is detected?
Hulls detected:
and the mask as a result:
My result using this code:
import cv2
import numpy as np
img = cv2.imread("img.png")
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
# binary
# ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 35, 180)
cv2.imshow('threshold', thresh)
# dilation
kernel = np.ones((1, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)
# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = img[y:y + h, x:x + w]
# show ROI
# cv2.imshow('segment no:'+str(i),roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
# if you want to save the letters without green bounding box, comment the line above
if w > 5:
cv2.imwrite('C:\\Users\\PC\\Desktop\\output\\{}.png'.format(i), roi)
cv2.imshow('marked areas', img)
cv2.waitKey(0)
You can have a threshold on the contour area so that it ignores all shapes that cover more than a certain area in the image.
for contour in hulls:
if cv.contourArea(contour) < ThresholdArea:
continue
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)

Split handwritten number touching each other with openCV in Python for MNIST preprocessing

I'm looking to split number on image. My main problem is that the number may touch each other. so all technique with OpenCV contour found online doesn't work.
Here the steps i've done so far. It's working well with image that have number well separated, but not with number touching each other. One of the image i try to solve:
I'm trying to do that as part of the preprocessing step so i can after that use the number in my MNIST model.
My actual code:
import cv2
import numpy as np
# 3 - detect and extract ROI's
#image = cv2.imread('imgs/01.jpg')
#image = cv2.imread('imgs/03.jpg')
image = cv2.imread('imgs/01_tresh_210.jpg')
#cv2.imshow('i', image)
#cv2.waitKey(0)
# grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
cv2.waitKey(0)
# binary
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('thresh', thresh)
cv2.waitKey(0)
# dilation
kernel = np.ones((10, 1), np.uint8) # values set for this image only - need to change for different images
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)
cv2.waitKey(0)
# find contours
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = image[y:y + h, x:x + w]
# show ROI
#cv2.imshow('segment no:'+str(i),roi)
cv2.imwrite('imgs\\roi\\{}.png'.format(i), roi)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 0), 1)
#cv2.waitKey(0)
# save only the ROI's which contain a valid information
if h > 20 and w > 75:
cv2.imwrite('roi\\{}.png'.format(i), roi)
cv2.imshow('marked areas', image)
cv2.waitKey(0)

Python: Showing every Object of an image in its own window

I've written some code, to crop an object (in this case the Data Matrix Code) from an image:
import numpy as np
import cv2
image = cv2.imread("datamatrixc.png")
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_height, img_width = image.shape[:2]
WHITE = [255, 255, 255]
# Threshold filter
ret, thresh = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY_INV)
# Get Contours
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get Last element of the contours object
max = len(contours) - 1
cnt = contours[max]
# Get coordinates for the bounding box
x, y, w, h = cv2.boundingRect(cnt)
image_region = image[ int(((img_height / 2) - h) / 2) : int(((img_height / 2) - h) / 2 + h), int(x): int(x + w) ]
dmc = cv2.copyMakeBorder(image_region, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value = WHITE)
cv2.imshow("Test", dmc)
cv2.waitKey(0)
cv2.destroyAllWindows()
The code works fine and I received as result:
However, the next image is a little more complicated.
I receive the same result as in the previous image, but I have no idea how to detect the two other objects.
Is there an easier way every object showing in its window?
For this specific image take the biggest contours you have and check if the object is 4 sided shape.If the half-point between the bounding box's corners (see pairs below) is in the contour array then voila, problem solved.
Pairs : TopRight-TopLeft, TopRight-BottomRight, TopLeft-BottomLeft, BottomLeft-BottomRight
Or you could check if there pixels that are not black/white inside the bounding box ?
And for the ploting individualy just slap a for on what you allready have
How about this?
import numpy as np
import cv2
image = cv2.imread("datamatrixc.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, bin_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel, iterations=4)
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(bin_img)
size_thresh = 5000
for i in range(1, n_labels):
if stats[i, cv2.CC_STAT_AREA] >= size_thresh:
print(stats[i, cv2.CC_STAT_AREA])
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
cv2.imshow('img', image[y:y+h, x:x+w])
cv2.waitKey(0)

Python Open CV overlay image on contour of a big image

I have a big image of shape where i'm looking to overlap an image on a shape based on the contour
I have this image
I have this contur where it is detecting the shape and color of the image
I want to place an image on this shape
I want to custom resize the image
Out Put Desired:-
Code:-
if color == "blue" and shape == "pentagon":
A_img = cv2.imread("frame.png")
print(A_img)
x_offset=y_offset=50
B_img[y_offset:y_offset+A_img.shape[0], x_offset:x_offset+A_img.shape[1]] = A_img
That code is not working the monkey is getting printed on the top left
I have a working solution. Hope it is what you were looking for.
Code:
import cv2
import numpy as np
image = cv2.imread('C:/Users/524316/Desktop/shapes.png', 1)
monkey = cv2.imread('C:/Users/524316/Desktop/monkey.png', 1)
image2 = image.copy()
image3 = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
#cv2.imshow('thresh', thresh)
_, cnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in cnts:
#---- making sure to avoid small unwanted contours ---
if cv2.contourArea(c) > 150:
#--- selecting contours having 5 sides ---
if len(cv2.approxPolyDP(c, 0.04 * cv2.arcLength(c, True), True)) == 5:
cv2.drawContours(image2, [c], -1, (0, 255, 0), 2)
#--- finding bounding box dimensions of the contour ---
x, y, w, h = cv2.boundingRect(c)
print(x, y, w, h)
#--- overlaying the monkey in place of pentagons using the bounding box dimensions---
image3[y:y+h, x:x+w] = cv2.resize(monkey, (np.abs(x - (x+w)), np.abs(y - (y+h))))
cv2.imshow('image2', image2)
cv2.imshow('image3', image3)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Categories