Here's the sample image I am using:
I'm basically trying to automatically extract diagrams from similarly structured engineering drawings by removing non diagram contours, however since the table doesn't have continuously flowing data, it treats it as if it were another diagram and keeps it within the cropped region.
Code:
import cv2
import numpy as np
image = cv2.imread("pin1.png")
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3, 3), 0)
thresh = cv2.threshold(
blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 10))
dilate = cv2.dilate(thresh, kernel, iterations=2)
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
area = cv2.contourArea(c)
if w/h > 2 and area > 10000:
cv2.drawContours(dilate, [c], -1, (0, 0, 0), -1)
boxes = []
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
boxes.append([x, y, x+w, y+h])
boxes = np.asarray(boxes)
x = np.min(boxes[:, 0])
y = np.min(boxes[:, 1])
w = np.max(boxes[:, 2]) - x
h = np.max(boxes[:, 3]) - y
cv2.rectangle(image, (x, y), (x + w, y + h), (36, 255, 12), 2)
cropped_region = original[y:y+h, x:x+w]
cv2.namedWindow("original", cv2.WINDOW_NORMAL)
cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
cv2.namedWindow("dilate", cv2.WINDOW_NORMAL)
cv2.namedWindow("cropped_region", cv2.WINDOW_NORMAL)
cv2.imshow('original', original)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('cropped_region', cropped_region)
cv2.imwrite("Cropped1.png", cropped_region)
cv2.waitKey()
I'm not sure how to go about making the contour filter search for the table instead of lines of text since I'm new to this. Any help would be appreciated.
EDIT: Here's my Expected Output
I didn't exactly find a method of automatic detection.
I however made use of template matching and added 4 templates to an array and it worked well enough for me.
I followed this doc
Related
I'm trying to cut an object in an image with opencv, but it doesn't cut properly. As seen in the picture below, the rectangle cuts the edges of the picture. How can I manually increase the width and height of the green rectangle here or what change do I need to make in the code to make a correct cut?
Code
image = cv2.imread("example.jpg")
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Morph open to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=1)
# Find contours, obtain bounding box, extract and save ROI
ROI_number = 0
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
#print(f"x -> {x} y -> {x} w -> {x} h -> {x}")
# x= x-20
# y= y-20
# w= w-20
# h= h-20
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12))
ROI = original[y:y+h, x:x+w]
#cv2.imwrite('ROI_{}.png'.format(ROI_number), ROI)
cv2.imwrite(img, ROI)
ROI_number += 1
I am new in computer vision, and I want to create a program which helps me to detect box in the image and save as an image.
and etc...
I tried some code but did not get my desired result.
here is my code and its output.
import cv2
# Load iamge, grayscale, adaptive threshold
image = cv2.imread('image.jpeg')
result = image.copy()
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,9)
# Fill rectangular contours
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(thresh, [c], -1, (255,255,255), -1)
# Morph open
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=4)
# Draw rectangles
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36,255,12), 3)
cv2.imshow('thresh', thresh)
cv2.imshow('opening', opening)
cv2.imshow('image', image)
cv2.waitKey()
output:
All you need to do is simply first remove the outermost white area, that is, make it black so that we can detect the boxes without any issues using the cv2.RETR_EXTERNAL flag as they are not touching. Then we'll just extract the boxes one by one.
To remove the outmost area, I have used the point polygon test of the contours. If the point (1, 1) lies inside or on a contour, it is not drawn and every other contour will be drawn on a new image. From this new image, I have read the box contours and extracted them.
import cv2
import numpy as np
img = cv2.imread("2lscp.png", cv2.IMREAD_GRAYSCALE)
ret, img = cv2.threshold(img, 50, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
Contours = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
newImg = np.zeros(img.shape, dtype=np.uint8)
for Contour in Contours:
if cv2.pointPolygonTest(Contour, (1, 1), False) == -1:
cv2.drawContours(newImg, [Contour], -1, 255, 1)
Contours = cv2.findContours(newImg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
for Contour in Contours:
[x, y, w, h] = cv2.boundingRect(Contour)
cv2.imshow("box extracted", img[y:y+h, x:x+w])
cv2.waitKey(0)
cv2.destroyAllWindows()
This case seems particularly simple because the image is quasi-binary. Detect the contours of the white regions and select those that have an area like 10 to 15% of the whole image. These are the desired boxes. Then fit a rectangle or rotated rectangle.
No need for additional processing.
Here is solution
try this:
import cv2
import numpy as np
#Read input image
img = cv2.imread('hw_data.png')
#convert from BGR to HSV color space
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#apply threshold
thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY)[1]
# find contours and get one with area about 180*35
# draw all contours in green and accepted ones in red
contours = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
#area_thresh = 0
min_area = 0.95*180*44
max_area = 1.05*180*44
print(min_area)
print(max_area)
result = img.copy()
i = 1
for c in contours:
# print(c)
area = cv2.contourArea(c)
cv2.drawContours(result, [c], -1, (0, 255, 0), 1)
x,y,w,h = cv2.boundingRect(c)
# crop region of img using bounding box
region = result[y:y+h, x:x+w]
# save region to new image
print(region.shape,' i ',i)
# cv2.imwrite("black_region_{0}.png".format(i), region)
i = i + 1
if region.shape[0]>70 and region.shape[1]<100:
cv2.imwrite("black_region_{0}.png".format(i), region)
# break
# if area > min_area and area < max_area:
# cv2.drawContours(result, [c], -1, (0, 0, 255), 1)
# break
# save result
# cv2.imwrite("box_found.png", result)
# show images
# cv2.imshow("GRAY", gray)
# cv2.imshow("THRESH", thresh)
# cv2.imshow("RESULT", result)
# cv2.waitKey(0)
I want to detect each word without running any kind of OCR technology. My Idea is to create rectangle around each word in this picture. To achieve this I have made a python script which I have attached below, which does not match my expectation. Basically I want to:
draw rectangle around each word.(I don't care actually if it is a word or not) Depending upon black color specially I want to draw rectangles,Like if black colors have been occurred very closely then I can assume that it is a word. Like This:
My Python script:
import numpy as np
import cv2
image = cv2.imread('Capture.JPG')
original = image.copy()
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower = np.array([0,0, 0], dtype="uint8")
upper = np.array([200,200, 200], dtype="uint8")
mask = cv2.inRange(image, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
cnts = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
area = 0
for c in cnts:
area += cv2.contourArea(c)
cv2.drawContours(original,[c], 0, (0,0,0), 1)
x, y, w, h = cv2.boundingRect(c)
color = list(np.random.random(size=3) * 256)
cv2.rectangle(original, (x, y), (x + w, y + h), color, 1)
print(c)
print(area)
cv2.imshow('mask', mask)
cv2.imshow('original', original)
cv2.imshow('opening', opening)
cv2.waitKey()
I am trying to extract handwritten numbers and alphabet from an image, for that i followed this stackoverflow link
but it is still not extracting the numbers and alphabet properly and picking up the border line as well.
You can find the result below:
Code:
import cv2
import imutils
# Load image, grayscale, Otsu's threshold
image = cv2.imread('xxx/ocr/pic_crop_2.png')
image = imutils.resize(image, width=375)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
I tried to use dialate but no luck.
Please find the sample image below:
you Can Check the Contour Area For Fix That.
import cv2
# Load image, grayscale, Otsu's threshold
image = cv2.imread('pic_crop_2.png')
#image = cv2.resize(image, width=375)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
MIN_AREA=200
for c in cnts:
if cv2.contourArea(c)>MIN_AREA:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
3x3 closing and binarization give good results, it seems (thought the black area is problematic and should be erased or cropped explicitly).
I'm currently working on a project with floorplan images. I'm dealing with a problem where I have a certain output, but often times it's in a need of a bit correction. This is what i have:
The upper image is the predicted output and the bottom one is the ground truth. What would be the optimal way for example to correct the spots missing the black fillings?
One idea is to use morphological transformations with a clever trick. If you just use normal morph closing to fill the holes, it will not work due to the curved ends of the walls. So to get around this, we can isolate the walls into two parts by first detecting all horizontal walls and then vertical walls one at a time. Once we have isolated each orientation, we find the rectangular contour which will effectively create the corner point of the wall. Here's a visualization:
Input image
Detected horizontal and vertical walls drawn in by a rectangle fill
Combined masks
Color mask on input image to get result
Here's the results with the 2nd input image
Code
import cv2
import numpy as np
# Load image, create mask, grayscale, and Otsu's threshold
image = cv2.imread('2.png')
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Perform morph operations
open_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, open_kernel, iterations=1)
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9,9))
close = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, close_kernel, iterations=3)
# Find horizontal sections and draw rectangle on mask
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25,3))
detect_horizontal = cv2.morphologyEx(close, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)
cnts = cv2.findContours(detect_horizontal, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), -1)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), 2)
# Find vertical sections and draw rectangle on mask
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,25))
detect_vertical = cv2.morphologyEx(close, cv2.MORPH_OPEN, vertical_kernel, iterations=2)
cnts = cv2.findContours(detect_vertical, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), -1)
cv2.rectangle(mask, (x, y), (x + w, y + h), (255,255,255), 2)
# Color mask onto original image
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
image[mask==255] = [0,0,0]
cv2.imshow('opening', opening)
cv2.imshow('close', close)
cv2.imshow('image', image)
cv2.imshow('thresh', thresh)
cv2.imshow('mask', mask)
cv2.waitKey()