How can i find these containers on photo by opencv - python

there's an image
it's my code
from cv2 import cv2
import numpy as np
image = cv2.imread('Photos\\1photo.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
kernel_size = 5
blur = cv2.GaussianBlur(gray, (kernel_size, kernel_size), 0)
sharp_strength = 9.1
sharpen_kernel = np.array([[-1, -1, -1],
[-1, sharp_strength, -1],
[-1, -1, -1]])
sharpen = cv2.filter2D(gray, -1, sharpen_kernel)
cv2.imshow('sharpen', sharpen)
cv2.waitKey()
threshold = 140
max_value = 280
thresh = cv2.threshold(sharpen, threshold, max_value, cv2.THRESH_BINARY_INV)[1]
thresh = cv2.bitwise_not(thresh)
cv2.imshow("ThreshInv", thresh)
cv2.waitKey()
kernel_size = 3
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
cv2.imshow("Close", closing)
cv2.waitKey()
#
cnts = cv2.findContours(closing, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
min_area = 80
max_area = 150
image_number = 0
for c in cnts:
area = cv2.contourArea(c)
if min_area < area < max_area:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(image, (x, y), (x + w, y + h), (36, 255, 12), 2)
cv2.imshow('image', image)
cv2.waitKey()
After blur, thresh, inversion and morph, i have image like this
Carriages and containers are quite visible, but findContours can't detect them.
Can i fix this?

Related

Try to count building

I tried to count building from a gmaps image using opencv.
Here's the code I used from an answer in stackoverflow :
import cv2
import numpy as np
image = cv2.imread('converted2.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray, 5)
sharpen_kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
sharpen = cv2.filter2D(blur, -1, sharpen_kernel)
thresh = cv2.threshold(sharpen,160,255, cv2.THRESH_BINARY_INV)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
cnts = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
min_area = 10
max_area = 1000000
image_number = 0
for c in cnts:
area = cv2.contourArea(c)
if area > min_area and area < max_area:
x,y,w,h = cv2.boundingRect(c)
ROI = image[y:y+h, x:x+w]
cv2.imwrite('ROI_{}.png'.format(image_number), ROI)
cv2.rectangle(image, (x, y), (x + w, y + h), (50,255,10), 2)
image_number += 1
cv2.imshow('sharpen', sharpen)
cv2.imshow('close', close)
cv2.imshow('thresh', thresh)
print(image_number)
cv2.imshow('image', image)
cv2.waitKey()
The image is black and white.
I tried to detect how many building. But it gave me wrong number of building.
The output was 17.
The original image =
The result =
Edited :
I tried using different code :
org=cv2.imread('maps_converted2.jpg')
cv2.imshow('sample',org)
img = cv2.imread('maps_converted2.jpg',0)
ret,thresh = cv2.threshold(img,127,255,0)
contours,hierarchy = cv2.findContours(thresh, 1, 2)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
img = cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow("image", img)
cv2.waitKey()
This is the result :

Draw bounding boxding box around whole block of text In image using python

I have the image, i have removed the noise (dots in the background) and, I want to draw a bounding box around the block of text In image how can I do it using python OpenCV
Input image:
Noise Removed Image:
Here is the code used to remove noise in background Where i can change to save images with bounding boxes around the text
import cv2
import matplotlib.pyplot as plt
import glob
import os
def remove_dots(image_path,outdir):
image = cv2.imread(image_path)
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,9)
# Create horizontal kernel then dilate to connect text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=2)
# Find contours and filter out noise using contour approximation and area filtering
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(c)
area = w * h
ar = w / float(h)
if area > 1200 and area < 50000 and ar <8:
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
# Bitwise-and input image and mask to get result
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255,255,255) # Color background white
cv2.imwrite(os.path.join(outdir,os.path.basename(image_path)),result)
for jpgfile in glob.glob(r'C:\custom\TableDetectionWork\text_detection_dataset/*'):
print(jpgfile)
remove_dots(jpgfile,r'C:\custom\TableDetectionWork\textdetect/')
You can do that by using a horizontal morphology filter to merge the letters in a mask image. Then find the contours. Then get the bounding boxes.
Input:
import cv2
import numpy as np
img = cv2.imread("john.jpg")
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# invert
thresh = 255 - thresh
# apply horizontal morphology close
kernel = np.ones((5 ,191), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw contours
result = img.copy()
for cntr in contours:
# get bounding boxes
pad = 10
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(result, (x-pad, y-pad), (x+w+pad, y+h+pad), (0, 0, 255), 4)
# save result
cv2.imwrite("john_bbox.png",result)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Morphology Closed Image:
Bounding Boxes Image:
Here is the core of your code modified to do what you want in Python/OpenCV. It is just adding my code to the end of your code.
Input:
import cv2
import numpy as np
image = cv2.imread("john.jpg")
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,9)
# Create horizontal kernel then dilate to connect text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=2)
# Find contours and filter out noise using contour approximation and area filtering
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(c)
area = w * h
ar = w / float(h)
if area > 1200 and area < 50000 and ar <8:
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
# Bitwise-and input image and mask to get result
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255,255,255) # Color background white
# NEW CODE HERE TO END _____________________________________________________________
gray2 = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh2 = cv2.threshold(gray2, 128, 255, cv2.THRESH_BINARY)[1]
thresh2 = 255 - thresh2
kernel = np.ones((5 ,191), np.uint8)
close = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw contours
result2 = result.copy()
for cntr in contours:
# get bounding boxes
pad = 10
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(result2, (x-pad, y-pad), (x+w+pad, y+h+pad), (0, 0, 255), 4)
cv2.imwrite("john_bboxes.jpg", result2)
cv2.imshow("mask",mask)
cv2.imshow("thresh",thresh)
cv2.imshow("dilate",dilate)
cv2.imshow("result",result)
cv2.imshow("gray2",gray2)
cv2.imshow("thresh2",thresh2)
cv2.imshow("close",close)
cv2.imshow("result2",result2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Bounding Boxes on Your Result:

Unable to extract numbers using findContours opencv

I am trying to extract handwritten numbers and alphabet from an image, for that i followed this stackoverflow link
but it is still not extracting the numbers and alphabet properly and picking up the border line as well.
You can find the result below:
Code:
import cv2
import imutils
# Load image, grayscale, Otsu's threshold
image = cv2.imread('xxx/ocr/pic_crop_2.png')
image = imutils.resize(image, width=375)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
for c in cnts:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
I tried to use dialate but no luck.
Please find the sample image below:
you Can Check the Contour Area For Fix That.
import cv2
# Load image, grayscale, Otsu's threshold
image = cv2.imread('pic_crop_2.png')
#image = cv2.resize(image, width=375)
img=image.copy()
# Remove border
kernel_vertical = cv2.getStructuringElement(cv2.MORPH_RECT, (1,50))
temp1 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel_vertical)
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50,1))
temp2 = 255 - cv2.morphologyEx(image, cv2.MORPH_CLOSE, horizontal_kernel)
temp3 = cv2.add(temp1, temp2)
result = cv2.add(temp3, image)
# Convert to grayscale and Otsu's threshold
gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray, 120, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)
# thresh=cv2.dilate(thresh,None,iterations=1)
# Find contours and filter using contour area
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[0]
MIN_AREA=200
for c in cnts:
if cv2.contourArea(c)>MIN_AREA:
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('img', img)
cv2.waitKey()
3x3 closing and binarization give good results, it seems (thought the black area is problematic and should be erased or cropped explicitly).

How to automatically crop out diagrams from a engineering drawing image?

Here's the sample image I am using:
I'm basically trying to automatically extract diagrams from similarly structured engineering drawings by removing non diagram contours, however since the table doesn't have continuously flowing data, it treats it as if it were another diagram and keeps it within the cropped region.
Code:
import cv2
import numpy as np
image = cv2.imread("pin1.png")
original = image.copy()
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3, 3), 0)
thresh = cv2.threshold(
blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 10))
dilate = cv2.dilate(thresh, kernel, iterations=2)
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
area = cv2.contourArea(c)
if w/h > 2 and area > 10000:
cv2.drawContours(dilate, [c], -1, (0, 0, 0), -1)
boxes = []
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
x, y, w, h = cv2.boundingRect(c)
boxes.append([x, y, x+w, y+h])
boxes = np.asarray(boxes)
x = np.min(boxes[:, 0])
y = np.min(boxes[:, 1])
w = np.max(boxes[:, 2]) - x
h = np.max(boxes[:, 3]) - y
cv2.rectangle(image, (x, y), (x + w, y + h), (36, 255, 12), 2)
cropped_region = original[y:y+h, x:x+w]
cv2.namedWindow("original", cv2.WINDOW_NORMAL)
cv2.namedWindow("thresh", cv2.WINDOW_NORMAL)
cv2.namedWindow("dilate", cv2.WINDOW_NORMAL)
cv2.namedWindow("cropped_region", cv2.WINDOW_NORMAL)
cv2.imshow('original', original)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('cropped_region', cropped_region)
cv2.imwrite("Cropped1.png", cropped_region)
cv2.waitKey()
I'm not sure how to go about making the contour filter search for the table instead of lines of text since I'm new to this. Any help would be appreciated.
EDIT: Here's my Expected Output
I didn't exactly find a method of automatic detection.
I however made use of template matching and added 4 templates to an array and it worked well enough for me.
I followed this doc

Match ROI or shape in several images for analysis

I have a lot of images to use. But this question I am going to just use the first and last image.
Image 1 of 83
Image 83 of 83
I ran Image 1 through this code
import numpy as np
import cv2
def process(filename, key):
gwash = cv2.imread(filename)
gwashBW = cv2.cvtColor(gwash, cv2.COLOR_BGR2GRAY)
ret,thresh1 = cv2.threshold(gwashBW,179,255,cv2.THRESH_BINARY)
kernel = np.ones((1,1),np.uint8)
erosion = cv2.erode(thresh1, kernel,iterations = 1)
opening = cv2.morphologyEx(erosion, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
_, contours ,_ = cv2.findContours(closing,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
idx = np.argmax(areas)
cnt = contours[idx]
mask = np.zeros_like(gwash)
cv2.drawContours(mask, contours, idx, (255,255,255), -1)
out = np.zeros_like(gwash)
out[mask == 255] = gwash[mask == 255]
cv2.imwrite('img/out{}.jpg'.format(key),out)
print idx
This one result I have gotten better and worst results.
Here is the contour code I used and result
import cv2
import numpy as np
import imutils
from matplotlib import pyplot as plt
def process(filename, key):
image = cv2.imread(filename)
resized = imutils.resize(image, width=600)
ratio = image.shape[0] / float(resized.shape[0])
blurred = cv2.GaussianBlur(resized, (5, 5), 0)
gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
lab = cv2.cvtColor(resized, cv2.COLOR_BGR2LAB)
thresh = cv2.threshold(gray, 180, 255, cv2.THRESH_BINARY)[1]
imagem = cv2.bitwise_not(thresh)
th4 = cv2.threshold(gray,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
th3 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)
gray_2 = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
gray_blur = cv2.GaussianBlur(gray, (15, 15), 0)
thresh_2 = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY_INV, 11, 1)
kernel = np.ones((1, 1), np.uint8)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=500)
cnts = cv2.findContours(closing.copy(), cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cnts2 = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
cnts2 = cnts2[0] if imutils.is_cv2() else cnts2[1]
# loop over the contours
for c in cnts:
# compute the center of the contour
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
else:
cX, cY = 0, 0
# multiply the contour (x, y)-coordinates by the resize ratio,
# then draw the contours and the name of the shape and labeled
# color on the image
c = c.astype("float")
c *= ratio
c = c.astype("int")
cX *= ratio
cY *= ratio
cv2.drawContours(image, [c], -1, (0, 255, 0), 5)
cv2.circle(image, (int(cX),int(cY)),5,300,3)
#r = 100.0 / image.shape[1]
#dim = (100, int(image.shape[0] *r))
#imageresized = cv2.resize(image,(2048,2048),dim,interpolation = cv2.INTER_AREA)
cv2.imwrite( 'i/image_{}.jpg'.format(key) ,QR_final )
print 'image_{}.jpg'.format(key)
So my question is what would the best approach be to accurately find the canvas shape in all the photos using python ?

Categories