I have read few articles and saw videos of Lane Detection and thus decided to learn how it works
I'm completely new to OpenCV so kindly forgive me for dumb Doubts.
I took Udacity Opensource Project to develop Lane Detection,but I'm not able to execute the code. I 'm getting a value error which I'm not able to understand
Code:
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
imshape = img.shape
left_x1 = []
left_x2 = []
right_x1 = []
right_x2 = []
y_min = img.shape[0]
y_max = int(img.shape[0] * 0.611)
for line in lines:
for x1, y1, x2, y2 in line:
if ((y2 - y1) / (x2 - x1)) < 0:
mc = np.polyfit([x1, x2], [y1, y2], 1)
left_x1.append(np.int(np.float((y_min - mc[1])) / np.float(mc[0])))
left_x2.append(np.int(np.float((y_max - mc[1])) / np.float(mc[0])))
# cv2.line(img, (xone, imshape[0]), (xtwo, 330), color, thickness)
elif ((y2 - y1) / (x2 - x1)) > 0:
mc = np.polyfit([x1, x2], [y1, y2], 1)
right_x1.append(np.int(np.float((y_min - mc[1])) / np.float(mc[0])))
right_x2.append(np.int(np.float((y_max - mc[1])) / np.float(mc[0])))
# cv2.line(img, (xone, imshape[0]), (xtwo, 330), color, thickness)
l_avg_x1 = np.int(np.nanmean(left_x1))
l_avg_x2 = np.int(np.nanmean(left_x2))
r_avg_x1 = np.int(np.nanmean(right_x1))
r_avg_x2 = np.int(np.nanmean(right_x2))
# print([l_avg_x1, l_avg_x2, r_avg_x1, r_avg_x2])
cv2.line(img, (l_avg_x1, y_min), (l_avg_x2, y_max), color, thickness)
cv2.line(img, (r_avg_x1, y_min), (r_avg_x2, y_max), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros(img.shape, dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
def process_image(img):
img_test = grayscale(img)
img_test = gaussian_blur(img_test, 7)
img_test = canny(img_test, 50, 150)
imshape = img.shape
vertices = np.array([[(100, imshape[0]), (400, 330), (600, 330), (imshape[1], imshape[0])]], dtype=np.int32)
img_test = region_of_interest(img_test, vertices)
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 55 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 40 # minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
line_image = np.copy(img) * 0 # creating a blank to draw lines on
img_test = hough_lines(img_test, rho, theta, threshold, min_line_length, max_line_gap)
return img_test
img = cv2.imread("sy1.jpg")
res = process_image(img)
plt.imshow(res)
The Resulting Error:
/Users/ViditShah/anaconda/envs/py27/bin/python /Users/ViditShah/Downloads/untitled1/gist.py
/Users/ViditShah/Downloads/untitled1/gist.py:85: RuntimeWarning: Mean of empty slice
r_avg_x1 = np.int(np.nanmean(right_x1))
Traceback (most recent call last):
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 122, in <module>
res = process_image(img)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 117, in process_image
img_test = hough_lines(img_test, rho, theta, threshold, min_line_length, max_line_gap)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 100, in hough_lines
draw_lines(line_img, lines)
File "/Users/ViditShah/Downloads/untitled1/gist.py", line 85, in draw_lines
r_avg_x1 = np.int(np.nanmean(right_x1))
ValueError: cannot convert float NaN to integer
Process finished with exit code 1
I'm using python2.7
Please Guide me.
Your Sincerely,
Vidit Shah
One possibility is dividing by zero creating Nans when you calculate gradient; try filtering out for x1 == x2. This potential source of errors will crop up only rarely.
The most important issue is that the threshold is set too high in the Hough transform (at 55) for the structure of your code. If the Hough Lines stage does not identify lines then you will not be able to plot them.
You can get around this by either lowering the threshold (and losing quality in your line-detection for the cases when it does work) or adjusting something else in your code, for example using error handling or pre-processing the image differently so that there will always be lines output by the Hough step.
Related
i'm trying to parse an array of coordinates (which represents a closed shape) into a set of lines and arcs in python (I'm using OpenCV for edge detection).
What I'm trying to achieve, briefly, is to use the coordinates which draw this example image
Example shape
Into this set of lines and arcs
Set of arcs
Obviously, arcs are not so defined as in the image, but are something like "pixeled" arcs.
Is there any utility which can help with this kind of processing?
Let's load the image as grayscale, threshold it to black and white and invert colors, erode it a little, use Canny edge detection, then Hough lines detection (mostly just following this tutorial):
import cv2
import numpy as np
import math
import random
src = cv2.imread("s34I0.png", cv2.IMREAD_GRAYSCALE)
thr, bw = cv2.threshold(src, 128, 255, cv2.THRESH_BINARY_INV)
eroded = cv2.erode(bw, np.ones((5, 5), np.uint8))
canny = cv2.Canny(src, 50, 200, None, 3)
lines = cv2.HoughLines(canny, 1, np.pi / 180, 150, None, 0, 0)
lines = [list(x[0]) for x in lines]
def draw_line(img, line, color, thickness):
rho, the = line
a = math.cos(the)
b = math.sin(the)
x0 = a * rho
y0 = b * rho
pt1 = (int(x0 + 1000 * (-b)), int(y0 + 1000 * (a)))
pt2 = (int(x0 - 1000 * (-b)), int(y0 - 1000 * (a)))
cv2.line(img, pt1, pt2, color, thickness, cv2.LINE_AA)
We have, unfortunately, two parallel lines detected for every straight segment. Let's replace each such pair of close parallel lines with their mid-line:
lines_ = []
def midline(line1, line2):
return [(x + y) / 2 for x, y in zip(line1, line2)]
used = []
for l1 in lines:
if l1 in used: continue
for l2 in lines:
if l2 in used: continue
if l1 is l2: continue
if (abs(l1[0] - l2[0]) < 20) and (abs(l1[1] - l2[1]) < 1):
lines_.append(midline(l1, l2))
used.append(l1)
used.append(l2)
continue
lines = lines_
Now, let's create binary masks for our straight lines. For every straight line, we create a temporary binary black image (all the pixel values are zeros), then draw the line over it as a thick white line (same or slightly thicker than the lines on the original image). Then we logical-AND the original thresholded image and the temporary line image, so we get the pixels common for both - that is the binary mask for the line.
line_masks = []
for i, line in enumerate(lines):
line_img = np.zeros(bw.shape)
draw_line(line_img, line, 255, 10) # 10 pixel thick white line
common = np.logical_and((bw != 0), (line_img != 0))
line_masks.append(common)
Remove the masked pixels from the original black and white image, so only the arcs should remain. Unfortunately, some garbage remains, because the lines in the original image aren't perfect. To get rid of that, we could've drawn our Hough lines thicker (say, 15, or 20 pixels instead of 10), but then they take too much of the arc pixels. Instead, we could erode-dilate the resulting image a little, to get rid of the junk:
for lm in line_masks:
bw[lm] = 0
bw = cv2.erode(bw, np.ones((5, 5), np.uint8))
bw = cv2.dilate(bw, np.ones((5, 5), np.uint8))
Let's create binary masks for the arcs. There's no function in OpenCV to detect arcs, but for this case we could use detection of connected components:
arc_masks = []
num, labels = cv2.connectedComponents(bw)
for i in range(1, num):
arc_masks.append(labels == i)
Now that we have the masks, let's visualize them by drawing over the original image. Lines are going to have random shades of green, arcs - of blue:
line_colors = [(0, random.randint(127, 256), 0) for _ in line_masks]
arc_colors = [(random.randint(127, 256), 0, 0) for _ in arc_masks]
dst = cv2.imread("s34I0.png")
for color, mask in zip(line_colors, line_masks):
dst[mask] = color
for color, mask in zip(arc_colors, arc_masks):
dst[mask] = color
I am trying to detect a grainy printed line on a paper with cv2. I need the angle of the line. I dont have much knowledge in image processing and I only need to detect the line. I tried to play with the parameters but the angle is always detected wrong. Could someone help me. This is my code:
import cv2
import numpy as np
import matplotlib.pylab as plt
from matplotlib.pyplot import figure
img = cv2.imread('CamXY1_1.bmp')
crop_img = img[100:800, 300:900]
blur = cv2.GaussianBlur(crop_img, (1,1), 0)
ret,thresh = cv2.threshold(blur,150,255,cv2.THRESH_BINARY)
gray = cv2.cvtColor(thresh,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 60, 150)
figure(figsize=(15, 15), dpi=150)
plt.imshow(edges, 'gray')
lines = cv2.HoughLines(edges,1,np.pi/180,200)
for rho,theta in lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 3000*(-b))
y1 = int(y0 + 3000*(a))
x2 = int(x0 - 3000*(-b))
y2 = int(y0 - 3000*(a))
cv2.line(img,(x1,y1),(x2,y2),(0, 255, 0),2)
imagetobedetected
Here's a possible solution to estimate the line (and its angle) without using the Hough line transform. The idea is to locate the start and ending points of the line using the reduce function. This function can reduce an image to a single column or row. If we reduce the image we can also get the total SUM of all the pixels across the reduced image. Using this info we can estimate the extreme points of the line and calculate its angle. This are the steps:
Resize your image because it is way too big
Get a binary image via adaptive thresholding
Define two extreme regions of the image and crop them
Reduce the ROIs to a column using the SUM mode, which is the sum of all rows
Accumulate the total values above a threshold value
Estimate the starting and ending points of the line
Get the angle of the line
Here's the code:
# imports:
import cv2
import numpy as np
import math
# image path
path = "D://opencvImages//"
fileName = "mmCAb.jpg"
# Reading an image in default mode:
inputImage = cv2.imread(path + fileName)
# Scale your BIG image into a small one:
scalePercent = 0.3
# Calculate the new dimensions
width = int(inputImage.shape[1] * scalePercent)
height = int(inputImage.shape[0] * scalePercent)
newSize = (width, height)
# Resize the image:
inputImage = cv2.resize(inputImage, newSize, None, None, None, cv2.INTER_AREA)
# Deep copy for results:
inputImageCopy = inputImage.copy()
# Convert BGR to grayscale:
grayInput = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Adaptive Thresholding:
windowSize = 51
windowConstant = 11
binaryImage = cv2.adaptiveThreshold(grayInput, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, windowSize, windowConstant)
The first step is to get the binary image. Note that I previously downscaled your input because it is too big and we don't need all that info. This is the binary mask:
Now, we don't need most of the image. In fact, since the line is across the whole image, we can only "trim" the first and last column and check out where the white pixels begin. I'll crop a column a little bit wider, though, so we can ensure we have enough data and as less noise as possible. I'll define two Regions of Interest (ROIs) and crop them. Then, I'll reduce each ROI to a column using the SUM mode, this will give me the summation of all intensity across each row. After that, I can accumulate the locations where the sum exceeds a certain threshold and approximate the location of the line, like this:
# Define the regions that will be cropped
# from the original image:
lineWidth = 5
cropPoints = [(0, 0, lineWidth, height), (width-lineWidth, 0, lineWidth, height)]
# Store the line points here:
linePoints = []
# Loop through the crop points and
# crop de ROI:
for p in range(len(cropPoints)):
# Get the ROI:
(x,y,w,h) = cropPoints[p]
# Crop the ROI:
imageROI = binaryImage[y:y+h, x:x+w]
# Reduce the ROI to a n row x 1 columns matrix:
reducedImg = cv2.reduce(imageROI, 1, cv2.REDUCE_SUM, dtype=cv2.CV_32S)
# Get the height (or lenght) of the arry:
reducedHeight = reducedImg.shape[0]
# Define a threshold and accumulate
# the coordinate of the points:
threshValue = 100
pointSum = 0
pointCount = 0
for i in range(reducedHeight):
currentValue = reducedImg[i]
if currentValue > threshValue:
pointSum = pointSum + i
pointCount = pointCount + 1
# Get average coordinate of the line:
y = int(accX / pixelCount)
# Store in list:
linePoints.append((x, y))
The red rectangles show the regions I cropped from the input image:
Note that I've stored both points in the linePoints list. Let's check out our approximation by drawing a line that connects both points:
# Get the two points:
p0 = linePoints[0]
p1 = linePoints[1]
# Draw the line:
cv2.line(inputImageCopy, (p0[0], p0[1]), (p1[0], p1[1]), (255, 0, 0), 1)
cv2.imshow("Line", inputImageCopy)
cv2.waitKey(0)
Which yields:
Not bad, huh? Now that we have both points, we can estimate the angle of this line:
# Get angle:
adjacentSide = p1[0] - p0[0]
oppositeSide = p0[1] - p1[1]
# Compute the angle alpha:
alpha = math.degrees(math.atan(oppositeSide / adjacentSide))
print("Angle: "+str(alpha))
This prints:
Angle: 0.534210901840831
I am trying to detect a white Object on a black/white road to let an autonmous RC car drive around it. And i am detecting everything but the white box on the road.
What I tried can be seen in my code Example
#input= one video stream frame 320x240
frame = copy.deepcopy(input)
grayFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
threshGray = cv2.adaptiveThreshold(
grayFrame,
255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,
blockSize=123,
C=-19,
)
contours,_ = cv2.findContours(threshGray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
#some filtering needs to be done
#
#after filtering append contour
filteredContours.append(cnt)
cv2.rectangle(frame, (x, y), (x + w, y + h), (3, 244, 244), 1)
cv2.drawContours(frame, filteredContours, -1, (255, 0, 255),1 )
cv2.imshow("with contours", frame)
cv2.imshow("adaptiveThreshhold", threshGray)
cv2.imshow("input", input)
I'm looking for a way to draw a bounding box around the obstacle.
Problem is I dont know how to extract this box from the rest.
It is probably because the contour of the box and the lines on the right are connected and thats why the bounding box is that big. Would be great if someone knows a way to do that.
Click here to see the Result
First: Input image
Second: after adaptiveThreshold
third: with contours(pink) and bounding boxes(yellow)
At this point in time, you got several candidates of white color value.
You need to add code in to the #some filtering needs to be done to rid candidate list of NOT bounding box you want to find.
I suggest you to compare your candidate list with square box as bigger as enough.
Because all of contours without BOX(that you want to find on the road) do not satisfy condition about square box as I mentioned above.
I think what you are looking for is triangular masking, as seen in the input image you have lane marking as well. Did try using a lane detector with this all the areas out of lane can be masked and only the spaces in lane can be processed.
Below I have tried to use Lane detector using HoughLinesP and added Contours as well. Try to use this, I did not test this code but I see no issues.
#! /usr/bin/env python 3
"""
Lane detector using the Hog transform method
"""
import cv2 as cv
import numpy as np
# import matplotlib.pyplot as plt
import random as rng
rng.seed(369)
def do_canny(frame):
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
gray = cv.cvtColor(frame, cv.COLOR_RGB2GRAY)
# Applies a 5x5 gaussian blur with deviation of 0 to frame - not mandatory since Canny will do this for us
blur = cv.GaussianBlur(gray, (5, 5), 0)
# Applies Canny edge detector with minVal of 50 and maxVal of 150
canny = cv.Canny(blur, 50, 150)
return canny
def do_segment(frame):
# Since an image is a multi-directional array containing the relative intensities of each pixel in the image, we can use frame.shape to return a tuple: [number of rows, number of columns, number of channels] of the dimensions of the frame
# frame.shape[0] give us the number of rows of pixels the frame has. Since height begins from 0 at the top, the y-coordinate of the bottom of the frame is its height
height = frame.shape[0]
# Creates a triangular polygon for the mask defined by three (x, y) coordinates
polygons = np.array([
[(0, height), (800, height), (380, 290)]
])
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(frame)
# Allows the mask to be filled with values of 1 and the other areas to be filled with values of 0
cv.fillPoly(mask, polygons, 255)
# A bitwise and operation between the mask and frame keeps only the triangular area of the frame
segment = cv.bitwise_and(frame, mask)
return segment
def calculate_lines(frame, lines):
# Empty arrays to store the coordinates of the left and right lines
left = []
right = []
# Loops through every detected line
for line in lines:
# Reshapes line from 2D array to 1D array
x1, y1, x2, y2 = line.reshape(4)
# Fits a linear polynomial to the x and y coordinates and returns a vector of coefficients which describe the slope and y-intercept
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
y_intercept = parameters[1]
# If slope is negative, the line is to the left of the lane, and otherwise, the line is to the right of the lane
if slope < 0:
left.append((slope, y_intercept))
else:
right.append((slope, y_intercept))
# Averages out all the values for left and right into a single slope and y-intercept value for each line
left_avg = np.average(left, axis = 0)
right_avg = np.average(right, axis = 0)
# Calculates the x1, y1, x2, y2 coordinates for the left and right lines
left_line = calculate_coordinates(frame, left_avg)
right_line = calculate_coordinates(frame, right_avg)
return np.array([left_line, right_line])
def calculate_coordinates(frame, parameters):
slope, intercept = parameters
# Sets initial y-coordinate as height from top down (bottom of the frame)
y1 = frame.shape[0]
# Sets final y-coordinate as 150 above the bottom of the frame
y2 = int(y1 - 150)
# Sets initial x-coordinate as (y1 - b) / m since y1 = mx1 + b
x1 = int((y1 - intercept) / slope)
# Sets final x-coordinate as (y2 - b) / m since y2 = mx2 + b
x2 = int((y2 - intercept) / slope)
return np.array([x1, y1, x2, y2])
def visualize_lines(frame, lines):
# Creates an image filled with zero intensities with the same dimensions as the frame
lines_visualize = np.zeros_like(frame)
# Checks if any lines are detected
if lines is not None:
for x1, y1, x2, y2 in lines:
# Draws lines between two coordinates with green color and 5 thickness
cv.line(lines_visualize, (x1, y1), (x2, y2), (0, 255, 0), 5)
return lines_visualize
# The video feed is read in as a VideoCapture object
cap = cv.VideoCapture(1)
while (cap.isOpened()):
# ret = a boolean return value from getting the frame, frame = the current frame being projected in the video
ret, frame = cap.read()
canny = do_canny(frame)
cv.imshow("canny", canny)
# plt.imshow(frame)
# plt.show()
segment = do_segment(canny)
hough = cv.HoughLinesP(segment, 2, np.pi / 180, 100, np.array([]), minLineLength = 100, maxLineGap = 50)
# Averages multiple detected lines from hough into one line for left border of lane and one line for right border of lane
lines = calculate_lines(frame, hough)
# Visualizes the lines
lines_visualize = visualize_lines(frame, lines)
cv.imshow("hough", lines_visualize)
# Overlays lines on frame by taking their weighted sums and adding an arbitrary scalar value of 1 as the gamma argument
output = cv.addWeighted(frame, 0.9, lines_visualize, 1, 1)
contours, _ = cv.findContours(output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
contours_poly = [None]*len(contours)
boundRect = [None]*len(contours)
centers = [None]*len(contours)
radius = [None]*len(contours)
for i, c in enumerate(contours):
contours_poly[i] = cv.approxPolyDP(c, 3, True)
boundRect[i] = cv.boundingRect(contours_poly[i])
centers[i], radius[i] = cv.minEnclosingCircle(contours_poly[i])
## [allthework]
## [zeroMat]
drawing = np.zeros((output.shape[0], output.shape[1], 3), dtype=np.uint8)
## [zeroMat]
## [forContour]
# Draw polygonal contour + bonding rects + circles
for i in range(len(contours)):
color = (rng.randint(0,256), rng.randint(0,256), rng.randint(0,256))
cv.drawContours(drawing, contours_poly, i, color)
cv.rectangle(drawing, (int(boundRect[i][0]), int(boundRect[i][1])), \
(int(boundRect[i][0]+boundRect[i][2]), int(boundRect[i][1]+boundRect[i][3])), color, 2)
# Opens a new window and displays the output frame
cv.imshow('Contours', drawing)
# Frames are read by intervals of 10 milliseconds. The programs breaks out of the while loop when the user presses the 'q' key
if cv.waitKey(10) & 0xFF == ord('q'):
break
# The following frees up resources and closes all windows
cap.release()
cv.destroyAllWindows()
try different values in the threshold for canny.
First of all I am putting values into hough_lines such as rho = 2, theta = np.pi/180, threshold = 15, min_line_len = 40 , max_line_gap = 20
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
#This function is used for drawing line when we give a specific criteria into the pixels we want to pick up.
#This function is used with draw_lines function to draw the lines in specific pixels
#which are drawn by region_of_interest function.
"""`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
Above is my code and getting following error.
File "auto.py", line 68, in <module>
output = lane.process_image(frame)
File "/home/pi/test/lane.py", line 227, in process_image
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
File "/home/pi/test/lane.py", line 154, in hough_lines
draw_lines(line_img, lines)
File "/home/pi/test/lane.py", line 97, in draw_lines
for line in lines:
TypeError: 'NoneType' object is not iterable
In the main function, I am trying to put the frame taken by video capture into process_image function in lane file.
cam = cv2.VideoCapture(0)
while True:
print('Succeed to connect...')
data = ''
data=sys.stdin.read(1)[0]
print("data input=",data)
while True:
if not cam.isOpened():
print("Wait for the header")
else:
flag, frame = cam.read()
frame = cv2.flip(frame,1)
cv2.imshow('video', frame)
#print(type(frame))
output = lane.process_image(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
exit()
I tried many things such that I changed parameter values for hough_lines function and tried to see what functions affects NoneType value in terms of image. However, before cv2.HoughLinesP function, every function has their own value with narray type.
--edit
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
def grayscale(img): # It converts the original picture to the gray scale picture.
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
#After applied grayscale function, it converts the grayscale image to edges
#which is a binary image with white pixels.
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
#Gaussian blur is applied for suppressing noise and nonlogical gradients by averaging.
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
# This functions is used for tracing a specific line of the road
# utilizing vertices which is are 4 integer points here and ignore_mask_color which ignores
# pixels if those do not meet the criteria.
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
"""
right_slopes = []
right_intercepts = []
left_slopes = []
left_intercepts = []
left_points_x = []
left_points_y = []
right_points_x = []
right_points_y = []
y_max = img.shape[0]
y_min = img.shape[0]
for line in lines:
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
if slope < 0.0 and slope > -math.inf: # math.inf = floating point positive infinity
left_slopes.append(slope) # left line
left_points_x.append(x1)
left_points_x.append(x2)
left_points_y.append(y1)
left_points_y.append(y2)
left_intercepts.append(y1 - slope*x1)
if slope > 0.0 and slope < math.inf:
right_slopes.append(slope) # right line
right_points_x.append(x1)
right_points_x.append(x2)
right_points_y.append(y1)
right_points_y.append(y2)
right_intercepts.append(y1 - slope*x1)
y_min = min(y1,y2,y_min)
if len(left_slopes) > 0:
left_slope = np.mean(left_slopes)
left_intercept = np.mean(left_intercepts)
x_min_left = int((y_min - left_intercept)/left_slope)
x_max_left = int((y_max - left_intercept)/left_slope)
cv2.line(img, (x_min_left, y_min), (x_max_left, y_max), [255, 0, 0], 8)
if len(right_slopes) > 0:
right_slope = np.mean(right_slopes)
right_intercept = np.mean(right_intercepts)
x_min_right = int((y_min - right_intercept)/right_slope)
x_max_right = int((y_max - right_intercept)/right_slope)
cv2.line(img, (x_min_right, y_min), (x_max_right, y_max), [255, 0, 0], 8)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
#This function is used for drawing line when we give a specific criteria into the pixels we want to pick up.
#This function is used with draw_lines function to draw the lines in specific pixels
#which are drawn by region_of_interest function.
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
# By appliying "color" binary image, it finally draws the line on the edge image.
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
# Read in and grayscale the image
gray = grayscale(image)
# Define a kernel size and apply Gaussian smoothing
kernel_size = 5
blur_gray = gaussian_blur(gray, kernel_size)
# Define our parameters for Canny and apply
low_threshold = 50
high_threshold = 150
edges = canny(blur_gray, low_threshold, high_threshold)
# Next we'll create a masked edges image using cv2.fillPoly()
imshape = image.shape
vertices = np.array([[(120,imshape[0]),(450, 320), (500, 320), (imshape[1],imshape[0])]], dtype=np.int32)
masked_edges = region_of_interest(edges, vertices)
# Define the Hough transform parameters
# Make a blank the same size as our image to draw on
rho = 2 # distance resolution in pixels of the Hough grid
theta = np.pi/180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 40 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(image)*1 #creating a blank to draw lines on
# Run Hough on edge detected image
lines = hough_lines(masked_edges, rho, theta, threshold, min_line_len, max_line_gap)
# Create a "color" binary image to combine with line image
color_edges = np.dstack((edges, edges, edges))
# Draw the lines on the edge image
lines_edges = weighted_img(lines, line_image)
return lines_edges
I have an image, in which I want to threshold part of the image within a circular region, and then the remainder of the image outside of this region.
Unfortunately my attempts seem to be thresholding the image as a whole, ignoring the masks. How can this be properly achieved? See code attempt below.
def circular_mask(h, w, centre=None, radius=None):
if centre is None: # use the middle of the image
centre = [int(w / 2), int(h / 2)]
if radius is None: # use the smallest distance between the centre and image walls
radius = min(centre[0], centre[1], w - centre[0], h - centre[1])
Y, X = np.ogrid[:h, :w]
dist_from_centre = np.sqrt((X - centre[0]) ** 2 + (Y - centre[1]) ** 2)
mask = dist_from_centre <= radius
return mask
img = cv2.imread('image.png', 0) #read image
h,w = img.shape[:2]
mask = circular_mask(h,w, centre=(135,140),radius=75) #create a boolean circle mask
mask_img = img.copy()
inside = np.ma.array(mask_img, mask=~mask)
t1 = inside < 50 #threshold part of image within the circle, ignore rest of image
plt.imshow(inside)
plt.imshow(t1, alpha=.25)
plt.show()
outside = np.ma.array(mask_img, mask=mask)
t2 = outside < 20 #threshold image outside circle region, ignoring image in circle
plt.imshow(outside)
plt.imshow(t2, alpha=.25)
plt.show()
fin = np.logical_or(t1, t2) #combine the results from both thresholds together
plt.imshow(fin)
plt.show()
Working solution:
img = cv2.imread('image.png', 0)
h,w = img.shape[:2]
mask = circular_mask(h,w, centre=(135,140),radius=75)
inside = img.copy()*mask
t1 = inside < 50#get_threshold(inside, 1)
plt.imshow(inside)
plt.show()
outside = img.copy()*~mask
t2 = outside < 70
plt.imshow(outside)
plt.show()
plt.imshow(t1)
plt.show()
plt.imshow(t2)
plt.show()
plt.imshow(np.logical_and(t1,t2))
plt.show()
I assume your image is single layered (e.g. Grey Scale).
You can make 2 copies of the image. Multiply (or Logical AND) your mask with one of them and invert of that mask with the other one. Now apply your desired threshold to each of them. In the end merge both images using Logical OR operation.