I want to detect the contours of an equipment label. Although the code runs correctly, it never quite detects the contours of the label.
Original Image
Using this code:
import numpy as np
import cv2
import imutils #resizeimage
import pytesseract # convert img to string
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
# Read the image file
image = cv2.imread('Car Images/5.JPG')
# Resize the image - change width to 500
image = imutils.resize(image, width=500)
# Display the original image
cv2.imshow("Original Image", image)
cv2.waitKey(0)
# RGB to Gray scale conversion
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("1 - Grayscale Conversion", gray)
cv2.waitKey(0)
# Noise removal with iterative bilateral filter(removes noise while preserving edges)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
cv2.imshow("2 - Bilateral Filter", gray)
cv2.waitKey(0)
# Find Edges of the grayscale image
edged = cv2.Canny(gray, 170, 200)
cv2.imshow("3 - Canny Edges", edged)
cv2.waitKey(0)
# Find contours based on Edges
cnts, new = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Create copy of original image to draw all contours
img1 = image.copy()
cv2.drawContours(img1, cnts, -1, (0,255,0), 3)
cv2.imshow("4- All Contours", img1)
cv2.waitKey(0)
#sort contours based on their area keeping minimum required area as '30' (anything smaller than this will not be considered)
cnts=sorted(cnts, key = cv2.contourArea, reverse = True)[:30]
NumberPlateCnt = None #we currently have no Number plate contour
# Top 30 Contours
img2 = image.copy()
cv2.drawContours(img2, cnts, -1, (0,255,0), 3)
cv2.imshow("5- Top 30 Contours", img2)
cv2.waitKey(0)
# loop over our contours to find the best possible approximate contour of number plate
count = 0
idx =7
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# print ("approx = ",approx)
if len(approx) == 4: # Select the contour with 4 corners
NumberPlateCnt = approx #This is our approx Number Plate Contour
# Crop those contours and store it in Cropped Images folder
x, y, w, h = cv2.boundingRect(c) #This will find out co-ord for plate
new_img = gray[y:y + h, x:x + w] #Create new image
cv2.imwrite('Cropped Images-Text/' + str(idx) + '.png', new_img) #Store new image
idx+=1
break
# Drawing the selected contour on the original image
#print(NumberPlateCnt)
cv2.drawContours(image, [NumberPlateCnt], -1, (0,255,0), 3)
cv2.imshow("Final Image With Number Plate Detected", image)
cv2.waitKey(0)
Cropped_img_loc = 'Cropped Images-Text/7.png'
cv2.imshow("Cropped Image ", cv2.imread(Cropped_img_loc))
# Use tesseract to covert image into string
text = pytesseract.image_to_string(Cropped_img_loc, lang='eng')
print("Equipment Number is :", text)
cv2.waitKey(0) #Wait for user input before closing the images displayed
Displayed output
Is there a better way to narrow down the contour to the equipment label?
Here is the code for your reference on github:
https://github.com/AjayAndData/Licence-plate-detection-and-recognition---using-openCV-only/blob/master/Car%20Number%20Plate%20Detection.py
I think this code may help you
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('C:/Users/DELL/Desktop/download (5).png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(gray,60,0.001,10)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
cv2.circle(img,(x,y),0,255,-1)
coord = np.where(np.all(img == (255, 0, 0),axis=-1))
plt.imshow(img)
plt.show()
Related
The output rank the 8 objects contours from biggest to smallest(1-8), how can I make it so that I perform a function on each of the contours individually based on its rank. For example to mask only contour 1(biggest contour)
import cv2
import numpy as np
import matplotlib.pyplot as plt
#Read image
image1 = cv2.imread('C:\\Users\\marnes\\Downloads\\starch-staining-patterns-in-Honeycrisp-
applesx.png')
#make a copy of the source image
image1_copy = image1.copy()
#convert image to grayscale
gray = cv2.cvtColor(image1_copy, cv2.COLOR_BGR2GRAY)
#invert the color
gray_inverted = cv2.bitwise_not(gray)
#create binary threshold image
_, binary = cv2.threshold(gray_inverted, 120, 200, cv2.THRESH_BINARY)
#convert image to grayscale
gray = cv2.cvtColor(image1_copy, cv2.COLOR_BGR2GRAY)
#find all contours in the image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#sort contours in decreasing order
sorted_contours = sorted(contours, key=cv2.contourArea, reverse= True)
# Draw largest 8 contours
for i, cont in enumerate(sorted_contours[:8],1):
# Draw the contour
cv2.drawContours(image1_copy, cont, -1, (0,255,0), 3)
# Display the position of contour in sorted list
cv2.putText(image1_copy, str(i), (cont[0,0,0], cont[0,0,1]+40), cv2.FONT_HERSHEY_SIMPLEX,
1, ( 255, 255, 255),4)
#Display the results
cv2.imshow('image', image1_copy)
cv2.waitKey(0)
I am using pytessearct to extract the text from images. But it doesn't work on images which are inclined. Consider the image given below:
Here is the code to extract text, which is working fine on images which are not inclined.
img = cv2.imread(<path_to_image>)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
def findSignificantContours (img, edgeImg):
contours, heirarchy = cv2.findContours(edgeImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# Find level 1 contours
level1 = []
for i, tupl in enumerate(heirarchy[0]):
# Each array is in format (Next, Prev, First child, Parent)
# Filter the ones without parent
if tupl[3] == -1:
tupl = np.insert(tupl, 0, [i])
level1.append(tupl)
significant = []
tooSmall = edgeImg.size * 5 / 100 # If contour isn't covering 5% of total area of image then it probably is too small
for tupl in level1:
contour = contours[tupl[0]];
area = cv2.contourArea(contour)
if area > tooSmall:
significant.append([contour, area])
# Draw the contour on the original image
cv2.drawContours(img, [contour], 0, (0,255,0),2, cv2.LINE_AA, maxLevel=1)
significant.sort(key=lambda x: x[1])
#print ([x[1] for x in significant]);
mx = (0,0,0,0) # biggest bounding box so far
mx_area = 0
for cont in contours:
x,y,w,h = cv2.boundingRect(cont)
area = w*h
if area > mx_area:
mx = x,y,w,h
mx_area = area
x,y,w,h = mx
# Output to files
roi = img[y:y+h,x:x+w]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5),0)
ret3, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
text = pytesseract.image_to_string(roi);
print(text); print("\n"); print(pytesseract.image_to_string(thresh));
print("\n")
return [x[0] for x in significant];
edgeImg_8u = np.asarray(thresh, np.uint8)
# Find contours
significant = findSignificantContours(img, edgeImg_8u)
mask = thresh.copy()
mask[mask > 0] = 0
cv2.fillPoly(mask, significant, 255)
# Invert mask
mask = np.logical_not(mask)
#Finally remove the background
img[mask] = 0;
Tesseract can't extract the text from this image. Is there a way I can rotate it to align the text perfectly and then feed it to pytesseract? Please let me know if my question require any more clarity.
Here's a simple approach:
Obtain binary image. Load image, convert to grayscale,
Gaussian blur, then Otsu's threshold.
Find contours and sort for largest contour. We find contours then filter using contour area with cv2.contourArea() to isolate the rectangular contour.
Perform perspective transform. Next we perform contour approximation with cv2.contourArea() to obtain the rectangular contour. Finally we utilize imutils.perspective.four_point_transform to actually obtain the bird's eye view of the image.
Binary image
Result
To actually extract the text, take a look at
Use pytesseract OCR to recognize text from an image
Cleaning image for OCR
Detect text area in an image using python and opencv
Code
from imutils.perspective import four_point_transform
import cv2
import numpy
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread("1.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (7,7), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours and sort for largest contour
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
displayCnt = None
for c in cnts:
# Perform contour approximation
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
displayCnt = approx
break
# Obtain birds' eye view of image
warped = four_point_transform(image, displayCnt.reshape(4, 2))
cv2.imshow("thresh", thresh)
cv2.imshow("warped", warped)
cv2.waitKey()
To Solve this problem you can also use minAreaRect api in opencv which will give you a minimum area rotated rectangle with an angle of rotation. You can then get the rotation matrix and apply warpAffine for the image to straighten it. I have also attached a colab notebook which you can play around on.
Colab notebook : https://colab.research.google.com/drive/1SKxrWJBOHhGjEgbR2ALKxl-dD1sXIf4h?usp=sharing
import cv2
from google.colab.patches import cv2_imshow
import numpy as np
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
img = cv2.imread("/content/sxJzw.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
mask = np.zeros((img.shape[0], img.shape[1]))
blur = cv2.GaussianBlur(gray, (5,5),0)
ret, thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2_imshow(thresh)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
largest_countour = max(contours, key = cv2.contourArea)
binary_mask = cv2.drawContours(mask, [largest_countour], 0, 1, -1)
new_img = img * np.dstack((binary_mask, binary_mask, binary_mask))
minRect = cv2.minAreaRect(largest_countour)
rotate_angle = minRect[-1] if minRect[-1] < 0 else -minRect[-1]
new_img = rotate_image(new_img, rotate_angle)
cv2_imshow(new_img)
I want to find the car plate number to search in a database. Since Saudi plates are different, I face this problem
The result of the code
My current approach is to search for the cross in openCV using edge detection. How can I found the cross and take the below character (using container and edge detection)?
import numpy as np
import pytesseract
from PIL import Image
import cv2
import imutils
import matplotlib.pyplot as plt
import numpy as np
img = cv2.imread('M4.png')
img = cv2.resize(img, (820,680) )
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #convert to grey scale
gray = cv2.blur(gray, (3,3))#Blur to reduce noise
edged = cv2.Canny(gray, 10, 100) #Perform Edge detection
# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
cnts = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.1 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
break
if screenCnt is None:
detected = 0
print "No contour detected"
else:
detected = 1
if detected == 1:
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
# Masking the part other than the number plate
imgs = img
mask = np.zeros(gray.shape,np.uint8)
new_image = cv2.drawContours(mask,[screenCnt],0,255,-1,)
new_image = cv2.bitwise_and(imgs,imgs,mask=mask)
# Now crop
(x, y) = np.where(mask == 255)
(topx, topy) = (np.min(x), np.min(y))
(bottomx, bottomy) = (np.max(x), np.max(y))
Cropped = gray[topx:bottomx+1, topy:bottomy+1]
#Read the number plate
text = pytesseract.image_to_string(Cropped, config='--psm 11')
print("Detected Number is:",text)
plt.title(text)
plt.subplot(1,4,1),plt.imshow(img,cmap = 'gray')
plt.title('Original'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,2),plt.imshow(gray,cmap = 'gray')
plt.title('gray'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,3),plt.imshow(Cropped,cmap = 'gray')
plt.title('Cropped'), plt.xticks([]), plt.yticks([])
plt.subplot(1,4,4),plt.imshow(edged,cmap = 'gray')
plt.title('edged'), plt.xticks([]), plt.yticks([])
plt.show()
#check data base
#recoed the entre
cv2.waitKey(0)
cv2.destroyAllWindows()
Thanks for your help
Here's an approach:
Convert image to grayscale and Gaussian blur
Otsu's threshold to get a binary image
Find contours and sort contours from left-to-right to maintain order
Iterate through contours and filter for the bottom two rectangles
Extract ROI and OCR
After converting to grayscale and Gaussian blurring, we Otsu's threshold to get a binary image. We find contours then sort the contours using imutils.contours.sort_contours() with the left-to-right parameter. This step keeps the contours in order. From here we iterate through the contours and perform contour filtering using these three filtering conditions:
The contour must be larger than some specified threshold area (3000)
The width must be larger than the height
The center of each ROI must be in the bottom half of the image. We find the center of each contour and compare it to where it is located on the image.
If a ROI passes these filtering conditions, we extract the ROI using numpy slicing and then throw it into Pytesseract. Here's the detected ROIs that pass the filter highlighted in green
Since we already have the bounding box, we extract each ROI
We throw each individual ROI into Pytesseract one at a time to construct our license plate string. Here's the result
License plate: 430SRU
Code
import cv2
import pytesseract
from imutils import contours
pytesseract.pytesseract.tesseract_cmd = r"C:\Program Files\Tesseract-OCR\tesseract.exe"
image = cv2.imread('1.png')
height, width, _ = image.shape
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5,5), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cnts, _ = contours.sort_contours(cnts, method="left-to-right")
plate = ""
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
center_y = y + h/2
if area > 3000 and (w > h) and center_y > height/2:
ROI = image[y:y+h, x:x+w]
data = pytesseract.image_to_string(ROI, lang='eng', config='--psm 6')
plate += data
print('License plate:', plate)
So, I have already detected all the edges of an object but the problem is that I can't find the two points of each edge, that is, the starting point and the ending point with its coordinates.
Actually I am trying to find the measurements of an object but I am stuck at this problem.The image is regarding the ROI of the image.
import cv2
import numpy as np
from matplotlib import pyplot as plt
#Read Image of the Object
img = cv2.imread("C:\\Users\\Desktop\\Project\\captured.jpg")
cv2.imshow('Original Image', img)
cv2.waitKey(0)
#Convert Image To GrayScale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('Gray', gray)
cv2.waitKey(0)
#Binary Thresholding
ret, thresh = cv2.threshold(gray, 150, 255, cv2.THRESH_BINARY_INV)
cv2.imshow('Binary Image', thresh)
cv2.waitKey(0)
#Crop Image
cropped = thresh[150:640, 150:500]
cv2.imshow('Cropped Image', cropped)
cv2.waitKey(0)
#Edge Detection
edges = cv2.Canny(cropped, 100, 200)
cv2.imshow('Edges', edges)
cv2.waitKey(0)
#find contours
ctrs, hier = cv2.findContours(cropped, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#Sort Contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0] + cv2.boundingRect(ctr)[1] * cropped.shape[1])
#ROI
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = cropped[y:y + h, x:x + w]
# show ROI
# cv2.imshow('segment no:'+str(i),roi)
cv2.rectangle(cropped , (x, y), (x + w, y + h), (150, 0, 255), 2)
cv2.imshow('marked areas', cropped)
cv2.waitKey(0)
Original Image
These are 5 points and the five edges that I need with coordinates so I can calculate the distance between them for the measurement.
Harris Corner Output.
Try using Harris Corner Detection instead:
import cv2
import numpy as np
def find_centroids(dst):
ret, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100,
0.001)
corners = cv2.cornerSubPix(gray,np.float32(centroids),(5,5),
(-1,-1),criteria)
return corners
image = cv2.imread("corner.jpg")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
# image[dst > 0.01*dst.max()] = [0, 0, 255]
# Get coordinates
corners= find_centroids(dst)
# To draw the corners
for corner in corners:
image[int(corner[1]), int(corner[0])] = [0, 0, 255]
cv2.imshow('dst', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
You might need to fine tune the parameters for cornerHarris.
I'm trying to use Tessract in the code below to extract the two lines of the image. I tryied to improve the image quality but even though it didn't work.
Can anyone help me?
from PIL import Image, ImageEnhance, ImageFilter
import pytesseract
img = Image.open(r'C:\ocr\test00.jpg')
new_size = tuple(4*x for x in img.size)
img = img.resize(new_size, Image.ANTIALIAS)
img.save(r'C:\\test02.jpg', 'JPEG')
print( pytesseract.image_to_string( img ) )
Given the comment by #barny I don't know if this will work, but you can try the code below. I created a script that selects the display area and warps this into a straight image. Next a threshold to a black and white mask of the characters and the result is cleaned up a bit.
Try if it improves recognition. If it does, also look at the intermediate stages so you'll understand all that happens.
Update: It seems Tesseract prefers black text on white background, inverted and dilated the result.
Result:
Updated result:
Code:
import numpy as np
import cv2
# load image
image = cv2.imread('disp.jpg')
# create grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask = cv2.threshold(gray_image, 190, 255, cv2.THRESH_BINARY)
# findcontours
ret, contours, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in contours:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# find the rectangle (and the cornerpoints of that rectangle) that surrounds the contours / photo
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
#### Warp image to square
# assign cornerpoints of the region of interest
pts1 = np.float32([box[2],box[3],box[1],box[0]])
# provide new coordinates of cornerpoints
pts2 = np.float32([[0,0],[500,0],[0,110],[500,110]])
# determine and apply transformationmatrix
M = cv2.getPerspectiveTransform(pts1,pts2)
tmp = cv2.warpPerspective(image,M,(500,110))
# create grayscale
gray_image2 = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask2 = cv2.threshold(gray_image2, 160, 255, cv2.THRESH_BINARY_INV)
# remove noise / close gaps
kernel = np.ones((5,5),np.uint8)
result = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
#draw rectangle on original image
cv2.drawContours(image, [box], 0, (255,0,0), 2)
# dilate result to make characters more solid
kernel2 = np.ones((3,3),np.uint8)
result = cv2.dilate(result,kernel2,iterations = 1)
#invert to get black text on white background
result = cv2.bitwise_not(result)
#show image
cv2.imshow("Result", result)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()