I am interested in finding the edges of the road. The output image should have only edges marked.
Here is one of my input image:
But the edges in output are either distorted or have lot of noise.
Here is its output:
I have tried applying watershed algorithm, but it does not detect the roads properly.
Here is my code:
import cv2
import numpy as np
img = cv2.imread('road2.jpg',0)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(thresh1,kernel,iterations = 1)
#Removing noise from image
blur = cv2.blur(img,(5,5))
#finding edges using edge detection
edges = cv2.Canny(blur, 100 ,200)
laplacian = cv2.Laplacian(edges, cv2.CV_8UC1)
sobely = cv2.Sobel(laplacian,cv2.CV_8UC1, 0, 1, ksize=5)
im2, contours, hierarchy = cv2.findContours(sobely,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
frame = cv2.drawContours(im2, contours, -1, (255,0,0), 3)
cv2.imshow('window',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
What should do inoder to mark only the edges. I want only the edges in the output beacuse I would later need these edges to find the middle lane of the road.
Result can be seen here. Not perfect, but best I could do. Idea taken from here
The code works for this image on the assumption that:
If there are no cars on the road, then the two markings(left and right), will meet at a vanishing point on the horizon and form a triangle. So, I keep only the largest contour that can be approximated by a triangle.
import cv2
import numpy as np
img = cv2.imread('road2.jpg',0)
ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(thresh1,kernel,iterations = 1)
#Removing noise from image
blur = cv2.blur(img,(5,5))
#finding edges using edge detection
edges = cv2.Canny(blur, 100 ,200)
laplacian = cv2.Laplacian(edges, cv2.CV_8UC1)
sobely = cv2.Sobel(laplacian,cv2.CV_8UC1, 0, 1, ksize=5)
# Do a dilation and erosion to accentuate the triangle shape
dilated = cv2.dilate(sobely,kernel,iterations = 1)
erosion = cv2.erode(dilated,kernel,iterations = 1)
im2, contours, hierarchy = cv2.findContours(erosion,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#keep 10 largest contours
cnts = sorted(contours, key = cv2.contourArea, reverse = True)[:10]
screenCnt = None
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.05 * peri, True)
# if our approximated contour has three points, then
# it must be the road markings
if len(approx) == 3:
screenCnt = approx
break
cv2.drawContours(img, [screenCnt], -1, (0, 255, 0), 3)
cv2.imshow("Road markings", img)
cv2.waitKey(0)
Related
I've been using opencv to approximate an abnormal shape as a polygon. It runs successfully, but it would be nice to know what it is counting as a side. I would want to plot the vertices of this polygonal approximation. How would I go on about doing that?
Here's my code:
import cv2
import numpy as np
# load image
img = cv2.imread('tinytri.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply canny edge detection
edges = cv2.Canny(gray, 60, 160)
# apply morphology close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
morph = cv2.morphologyEx(edges, cv2.MORPH_CLOSE, kernel)
# get contours and keep largest
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# draw contour
contour = img.copy()
cv2.drawContours(contour, [big_contour], 0, (0,0,255), 1)
# get number of vertices (sides)
peri = cv2.arcLength(big_contour, True)
approx = cv2.approxPolyDP(big_contour, 0.01 * peri, True)
print('number of sides:',len(approx))
Something like that should work:
for vertex in big_contour:
contour = cv2.circle(contour, vertex[0], 2, (225,0,0), 2)
I am trying to create a bubble sheet scanner but unfortunately it does not detect the image if the bubble sheet picture is captured by a mobile phone, maybe due to the thresholding and edge detection or due to the resolution of the picture unfortunately I've had little to no progress solving this issue. Any help or tips on how to solve this issue would be appreciated,
here is the code
from imutils.perspective import four_point_transform
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to the input image")
args = vars(ap.parse_args())
ANSWER_KEY = {0: 1, 1: 4, 2: 0, 3: 3, 4: 1}
# load the image, convert it to grayscale, blur it
# slightly, then find edges
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(blurred, 75, 200)
# find contours in the edge map, then initialize
# the contour that corresponds to the document
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
docCnt = None
# ensure that at least one contour was found
if len(cnts) > 0:
# sort the contours according to their size in
# descending order
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
# loop over the sorted contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points,
# then we can assume we have found the paper
if len(approx) == 4:
docCnt = approx
break
# apply a four point perspective transform to both the
# original image and grayscale image to obtain a top-down
# birds eye view of the paper
paper = four_point_transform(image, docCnt.reshape(4, 2))
warped = four_point_transform(gray, docCnt.reshape(4, 2))
# apply Otsu's thresholding method to binarize the warped
# piece of paper
thresh = cv2.threshold(warped, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# find contours in the thresholded image, then initialize
# the list of contours that correspond to questions
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
questionCnts = []
# loop over the contours
for c in cnts:
# compute the bounding box of the contour, then use the
# bounding box to derive the aspect ratio
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# in order to label the contour as a question, region
# should be sufficiently wide, sufficiently tall, and
# have an aspect ratio approximately equal to 1
if w >= 20 and h >= 20 and ar >= 0.9 and ar <= 1.1:
questionCnts.append(c)
# sort the question contours top-to-bottom, then initialize
# the total number of correct answers
questionCnts = contours.sort_contours(questionCnts,
method="top-to-bottom")[0]
correct = 0
# each question has 5 possible answers, to loop over the
# question in batches of 5
for (q, i) in enumerate(np.arange(0, len(questionCnts), 5)):
# sort the contours for the current question from
# left to right, then initialize the index of the
# bubbled answer
cnts = contours.sort_contours(questionCnts[i:i + 5])[0]
bubbled = None
# loop over the sorted contours
for (j, c) in enumerate(cnts):
# construct a mask that reveals only the current
# "bubble" for the question
mask = np.zeros(thresh.shape, dtype="uint8")
cv2.drawContours(mask, [c], -1, 255, -1)
# apply the mask to the thresholded image, then
# count the number of non-zero pixels in the
# bubble area
mask = cv2.bitwise_and(thresh, thresh, mask=mask)
total = cv2.countNonZero(mask)
# if the current total has a larger number of total
# non-zero pixels, then we are examining the currently
# bubbled-in answer
if bubbled is None or total > bubbled[0]:
bubbled = (total, j)
# initialize the contour color and the index of the
# *correct* answer
color = (0, 0, 255)
k = ANSWER_KEY[q]
# check to see if the bubbled answer is correct
if k == bubbled[1]:
color = (0, 255, 0)
correct += 1
# draw the outline of the correct answer on the test
cv2.drawContours(paper, [cnts[k]], -1, color, 3)
# grab the test taker
score = (correct / 5.0) * 100
print("[INFO] score: {:.2f}%".format(score))
cv2.putText(paper, "{:.2f}%".format(score), (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 2)
cv2.imshow("Original", image)
cv2.imshow("Exam", paper)
cv2.waitKey(0)
I am tasked to build a license plate detection system and my code does not work if the plate has the same colour of the paint of car (background).
Take a look at this picture below.
I have tried a variety of edge detection technique and my findings are they hardly work.
Here is my image processing pipeline:
Extract the gray channel from the image.
Reduce noise with Iterative Bilaterial Filtering
Detect edges with Adaptive Thresholding
Dilate the edges slightly
Locate contours based on some heuristics.
The edge detection part performed miserably around the plate region.
The pipeline works good and I am able to detect license plates if the car is has a different paint colour than the plate.
Code
def rectangleness(hull):
rect = cv2.boundingRect(hull)
rectPoints = np.array([[rect[0], rect[1]],
[rect[0] + rect[2], rect[1]],
[rect[0] + rect[2], rect[1] + rect[3]],
[rect[0], rect[1] + rect[3]]])
intersection_area = cv2.intersectConvexConvex(np.array(rectPoints), hull)[0]
rect_area = cv2.contourArea(rectPoints)
rectangleness = intersection_area/rect_area
return rectangleness
def preprocess(image):
image = imutils.resize(image, 1000)
# Attenuate shadows by using H channel instead of converting to gray directly
imgHSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
_, _, gray = cv2.split(imgHSV)
# Reduce noise while preserve edge with Iterative Bilaterial Filtering
blur = cv2.bilateralFilter(gray, 11, 6, 6)
# Detect edges by thresholding
edge = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 5)
# Dilate edges, kernel size cannot be too big as some fonts are very closed to the edge of the plates
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
dilated = cv2.dilate(edge, kernel)
# Detect contours
edge, contours, _ = cv2.findContours(dilated, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Loop through contours and select the most probable ones
contours = sorted(contours, key = cv2.contourArea, reverse=True)[:10]
for contour in contours:
perimeter = cv2.arcLength(contour, closed=True)
approximate = cv2.approxPolyDP(contour, 0.02*perimeter, closed=True)
if len(approximate) == 4:
(x, y, w, h) = cv2.boundingRect(approximate)
whRatio = w / h
# Heuristics:
# 1. Width of plate should at least be 2x greater than height
# 2. Width of contour should be more than 5 (eliminate false positive)
# 3. Height must not be too small
# 4. Polygon must resemble a rectangle
if (2.0 < whRatio < 6.0) and (w > 5.0) and (h > 20):
hull = cv2.convexHull(approximate, returnPoints=True)
if rectangleness(hull) > 0.75:
print("X Y {} {}".format(x, y))
print("Height: {}".format(h))
print("Width : {}".format(w))
print("Ratio : {}\n".format(w/h))
cv2.drawContours(image, [approximate], -1, (0, 255, 0), 2)
cv2.imshow("Edge", edge)
cv2.imshow("Frame", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can use cv2.morphologyEx for making the plate region become more visible. Next step is to find contours and set reasonable conditions to extract the contour that contains the plate. If you want, you can have a look at this github repository where my friend and I show detailed steps about license plate detection and recognition.
import cv2
import numpy as np
img = cv2.imread("a.png")
imgBlurred = cv2.GaussianBlur(img, (7, 7), 0)
gray = cv2.cvtColor(imgBlurred, cv2.COLOR_BGR2GRAY) # convert to gray
sobelx = cv2.Sobel(gray, cv2.CV_8U, 1, 0, ksize=3) # sobelX to get the vertical edges
ret,threshold_img = cv2.threshold(sobelx, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
morph_img_threshold = threshold_img.copy()
element = cv2.getStructuringElement(shape=cv2.MORPH_RECT, ksize=(22, 3))
cv2.morphologyEx(src=threshold_img, op=cv2.MORPH_CLOSE, kernel=element,
dst=morph_img_threshold)
cv2.imshow("img", img)
cv2.imshow("sobelx", sobelx)
cv2.imshow("morph_img_threshold", morph_img_threshold)
cv2.waitKey()
cv2.destroyAllWindows()
I've got a shape like a speech bubble. And I only want to detect the ellipse of this shape like in the image with the green encircled one.
I tried with closed morphology, but certain parts of the bubbles are also removed. I used a Kernel with a matrix of 20, 20. The shape becomes more rectangular. Maybe I have to change the kernel matrix more like this:
0 1 0
1 1 1
0 1 0
I also tried to draw a convex hull, but it also has no effect. And a inner convex hull is not possible. Here is my code for drawing a convex hull:
for i in range (max_index):
hull = cv2.convexHull(contours[i])
cv2.drawContours(image, [hull], 0, (0, 255, 0), 2)
I retrieved the contours with the parameters cv2.RETR_EXTERNAL and cv2.CHAIN_APPROX_NONE
This is the best I was able to get:
It is not the most clever way to do it. What I am doing here is actually simple, despite the verbose code.
First, I get the gray image and add a lot of blur and in the same way you tried, apply threshold and find contours. I then take the biggest contour and find the ellipse that fits this contour with fitEllipse. This is all in getEllipse function.
In this first round, the ellipse will be skewed because the tail is getting in the way. So, I use this not so good ellipse to process the original image and give another try.
The function grayEllipse filters an image by an ellipse. So, I use the ellipse from the first try to get process the original image and highlight the points the are inside the first ellipse. I use this image as input in this second round.
By repeating the process, the final ellipse I get in the second time is much less skewed.
Here is the code:
import cv2
import numpy as np
def getEllipse(imgray):
ret, thresh = cv2.threshold(imgray, 20, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
maxArea = 0
best = None
for contour in contours:
area = cv2.contourArea(contour)
if area > maxArea :
maxArea = area
best = contour
ellipse = cv2.fitEllipse(best)
el = np.zeros(imgray.shape)
cv2.ellipse(el, ellipse,(255,255,255),-1)
return el
def grayEllipse(el, img):
el = np.dstack((el,el,el))
el = el*img
el = el/(255)
el = el.astype('uint8')
imgray = cv2.cvtColor(el, cv2.COLOR_BGR2LAB)[...,0]
return imgray
image = cv2.imread("./baloon.png", cv2.IMREAD_COLOR)
img = image.copy()
imgray = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)[...,0]
imgray = cv2.GaussianBlur(imgray, (79,79), 0)
el = getEllipse(imgray)
imgray = grayEllipse(el, img.copy())
imgray = cv2.GaussianBlur(imgray, (11,11), 0)
el = getEllipse(imgray)
imgray = grayEllipse(el, img.copy())
ret, thresh = cv2.threshold(imgray, 20, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
maxArea = 0
best = None
for contour in contours:
area = cv2.contourArea(contour)
if area > maxArea :
maxArea = area
best = contour
ellipse = cv2.fitEllipse(best)
cv2.ellipse(image, ellipse, (0,255,0),3)
while True:
cv2.imshow("result", image)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
I have the following image:
I would like to remove the distortion using Python OpenCV. Is this possible.
I am trying to follow the tutorial at Sudoku Solver, but I get a blob with array dimensions 7,1,2 instead of 4,1,2; which I assume is due to the distortion in my image. My code so far is:
import cv2
import numpy as np
def rectify(h):
h = h.reshape((4,2))
hnew = np.zeros((4,2),dtype = np.float32)
add = h.sum(1)
hnew[0] = h[np.argmin(add)]
hnew[2] = h[np.argmax(add)]
diff = np.diff(h,axis = 1)
hnew[1] = h[np.argmin(diff)]
hnew[3] = h[np.argmax(diff)]
return hnew
img = cv2.imread('blokkies.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# GaussianBlur(src, ksize, sigma1[, dst[, sigma2[, borderType]]]) -> dst
gray = cv2.GaussianBlur(gray, (5, 5), 0)
# adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst
thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Biggest blob is the square we are looking for. Results in 4x1x2 array.
# First row is the TOP-RIGHT corner. Second row is the TOP-LEFT corner. Third row is the BOTTOM-LEFT corner. Finally, fourth one is the BOTTOM-RIGHT corner.
# The problem is that, there is no guarantee that for next image, the corners found out will be in this same order.
# Change to uniform order with rectify. [TOP-LEFT, TOP-RIGHT, BOTTOM-RIGHT, BOTTOM-LEFT]
biggest = None
max_area = 0
for i in contours:
area = cv2.contourArea(i)
if area > 100:
peri = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i, 0.02 * peri, True)
if area > max_area and len(approx) == 4:
biggest = approx
max_area = area
print(approx.shape)
approx = rectify(approx)
h = np.array([ [0,0],[449,0],[449,449],[0,449] ],np.float32)
cv2.imshow('image', h)
cv2.waitKey(0)
cv2.destroyAllWindows()
Turns out the bug in the tutorial was to use:
approx = rectify(biggest)
Also there is this
Removing curve