Colour Detection During Line Following - python

I am working on a line follower bot which follows black line on white background The code is something like this
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
try:
while(1):
## Read the image
ret, img = cap.read()
#cv2.imshow('image',img)
img = img[160:320,:,:]
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,170,255,cv2.THRESH_BINARY_INV)
#Applying Dilation Morphological Operation To Dilate the image
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
dilation = cv2.dilate(thresh, kernel, iterations = 5)
#Apllying Gaussian Bluring Algorithm to reduce the number of contours
blur3 = cv2.medianBlur (dilation,5)
i = 0
contours, hierarchy = cv2.findContours(blur3,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img,contours,-1,(0,255,0),3)
cv2.imshow('image',img)
M = cv2.moments(contours[i])
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
if cx < 190:
print "LEFT"
if cx > 450:
print "RIGHT "
if ((cx > 189) & (cx < 449)):
print "FRONT"
finally:
cap.release()
cv2.destroyAllWindows()
I want to also detect blue and red color while the bot is following the line using the same camera.
I tried reading center pixel of the centroid center_px = img[cy,cx]
But reading just one pixel dosent prove efficient in determining the color.
So is there any other approach to do this
I am using python2.7 with cv2

Related

How improve image quality to extract text from image using Tesseract

I'm trying to use Tessract in the code below to extract the two lines of the image. I tryied to improve the image quality but even though it didn't work.
Can anyone help me?
from PIL import Image, ImageEnhance, ImageFilter
import pytesseract
img = Image.open(r'C:\ocr\test00.jpg')
new_size = tuple(4*x for x in img.size)
img = img.resize(new_size, Image.ANTIALIAS)
img.save(r'C:\\test02.jpg', 'JPEG')
print( pytesseract.image_to_string( img ) )
Given the comment by #barny I don't know if this will work, but you can try the code below. I created a script that selects the display area and warps this into a straight image. Next a threshold to a black and white mask of the characters and the result is cleaned up a bit.
Try if it improves recognition. If it does, also look at the intermediate stages so you'll understand all that happens.
Update: It seems Tesseract prefers black text on white background, inverted and dilated the result.
Result:
Updated result:
Code:
import numpy as np
import cv2
# load image
image = cv2.imread('disp.jpg')
# create grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask = cv2.threshold(gray_image, 190, 255, cv2.THRESH_BINARY)
# findcontours
ret, contours, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in contours:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# find the rectangle (and the cornerpoints of that rectangle) that surrounds the contours / photo
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
#### Warp image to square
# assign cornerpoints of the region of interest
pts1 = np.float32([box[2],box[3],box[1],box[0]])
# provide new coordinates of cornerpoints
pts2 = np.float32([[0,0],[500,0],[0,110],[500,110]])
# determine and apply transformationmatrix
M = cv2.getPerspectiveTransform(pts1,pts2)
tmp = cv2.warpPerspective(image,M,(500,110))
# create grayscale
gray_image2 = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask2 = cv2.threshold(gray_image2, 160, 255, cv2.THRESH_BINARY_INV)
# remove noise / close gaps
kernel = np.ones((5,5),np.uint8)
result = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
#draw rectangle on original image
cv2.drawContours(image, [box], 0, (255,0,0), 2)
# dilate result to make characters more solid
kernel2 = np.ones((3,3),np.uint8)
result = cv2.dilate(result,kernel2,iterations = 1)
#invert to get black text on white background
result = cv2.bitwise_not(result)
#show image
cv2.imshow("Result", result)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

Fill area marked in black line

I am trying to find area contained within a black line in an image.
Here is the Sample starting image "photo.jpg":
Sample starting image "photo.jpg"
I have used OpenCV and SimpleCV for this.
Here is the code:
from SimpleCV import Camera, Display, Image, Color
import time
import cv2
import numpy as np
n_image = Image('photo.jpg')
n_image2 = n_image.crop(55, 72, 546, 276) #Crop X,Y,W,H
n_image2.save('photo_2.jpg')
imagea = Image("photo_2.jpg")
greya = imagea.stretch(50).invert() #50=Blackness level of Black
greya.show()
greya.save('photo_2-GREY.jpg')
im = cv2.imread('photo_2-GREY.jpg')
imgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(imgray,220,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(im, [largest_areas[-2]], 0, (255,255,255,255), -1)
cv2.drawContours(im,contours,-1,(255,255,255),-1)
cv2.imshow('Image Window',im)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('photo_3.jpg',im)
n_image = Image('photo_3.jpg')
mask = n_image.colorDistance((127, 127, 127))
mask.show()
mask.save('mask.jpg')
time.sleep(3)
binarised = mask.binarize()
blobs = binarised.findBlobs()
blobs.show(width=3)
time.sleep(60)
individualareaofholes = blobs.area()
compositeareaofholes = sum(individualareaofholes)
orig_area = 132432
finalarea = (orig_area - compositeareaofholes)
res = round(((finalarea/orig_area)*100),0)
print "Area is %d" % res
Here is the image "mask.jpg" which is used for area calculation:
Generated image "mask.jpg"
Observe:
1. the black patches inside the white area in "mask.jpg"
2. the white portion in the bottom left corner with the word "TAXI"
How do I eliminate them?
I just want everything enclosed within the black line to be gobbled up and everything outside the line not to be accounted for while calculating the area.
I think you are complicating your solution(I may be wrong). I tried to modify your code and get the area within the black boundary. Not sure if the area is correct but it will get you a way to fine-tune it.
import cv2
import numpy as np
n_image = cv2.imread('5GZ6X.jpg') # Your original image
imgray = cv2.cvtColor(n_image,cv2.COLOR_BGR2GRAY)
im_new = np.zeros_like(imgray)
ret,thresh = cv2.threshold(imgray,10,255,0)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
largest_areas = sorted(contours, key=cv2.contourArea)
cv2.drawContours(im_new, [largest_areas[-2]], 0, (255,255,255,255), -1)
image_masked = cv2.bitwise_and(imgray, imgray, mask=im_new)
area = cv2.contourArea(largest_areas[-2])
for contour in largest_areas:
areas = cv2.contourArea(contour)
if areas > 300:
print areas
print 'Complete area :' + str(n_image.shape[0] * n_image.shape[1])
print 'Area of selected region : ' + str(area)
cv2.imshow('main', image_masked)
cv2.waitKey(1000)
The result I got from this is
113455.5
135587.0
303849.0
Complete area :307200
Area of selected region : 135587.0
I got this image result after masking the image with the contour generated(largest contour)
Hope this helps! good luck :)

Having trouble with orientation detection in OpenCV

I am trying to make a computer vision script that detects the orientation of objects. It works a majority of the time, but it seems that it is not able to have the same success for certain images.
This script relies on blurring and Canny edge detection to find the contours.
Working example:
Part which it fails:
For the part where it fails, it two lines for one of the same shapes and it completely ignores one of the others shapes.
Main code:
import cv2
from imgops import imutils
import CVAlgo
z = 'am'
path = 'images/pca.jpg'
#path = 'images/pca2.jpg'
img = cv2.imread(path)
imgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = imutils.resize(img, height = 600)
imgray = imutils.resize(img, height = 600)
final = img.copy()
thresh, imgray = CVAlgo.filtering(img, imgray, z)
__ , contours, hierarchy = cv2.findContours(thresh.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
# Iterate through all contours
test = CVAlgo.cnt_gui(final, contours)
#cv2.imwrite('1.jpg', final)
cv2.imshow('thresh', thresh)
cv2.imshow('contours', final)
cv2.waitKey(0)
CVAlgo.py
import cv2
from numpy import *
from pylab import *
from imgops import imutils
import math
def invert_img(img):
img = (255-img)
return img
def canny(imgray):
imgray = cv2.GaussianBlur(imgray, (11,11), 200)
canny_low = 0
canny_high = 100
thresh = cv2.Canny(imgray,canny_low,canny_high)
return thresh
def cnt_gui(img, contours):
cnts = sorted(contours, key = cv2.contourArea, reverse = True)
for i in range(0,len(cnts)):
sel_cnts = sorted(contours, key = cv2.contourArea, reverse = True)[i]
area = cv2.contourArea(sel_cnts)
if area < 1000:
continue
# get orientation angle and center coord
center, axis,angle = cv2.fitEllipse(sel_cnts)
hyp = 100 # length of the orientation line
# Find out coordinates of 2nd point if given length of line and center coord
linex = int(center[0]) + int(math.sin(math.radians(angle))*hyp)
liney = int(center[1]) - int(math.cos(math.radians(angle))*hyp)
# Draw orienation
cv2.line(img, (int(center[0]),int(center[1])), (linex, liney), (0,0,255),5)
cv2.circle(img, (int(center[0]), int(center[1])), 10, (255,0,0), -1)
return img
def filtering(img, imgray, mode):
imgray = cv2.medianBlur(imgray, 11)
thresh = cv2.Canny(imgray,75,200)
return thresh, imgray
Does anyone know what the problem is? Anyone know how I can improve this script?
The shape that has not been detected is too close to the black background and as such its contour has been merged with the contour of the white object area. The second orientation you find in one of the objects is in fact the orientation of the outer contour. To circumvent some of this you can dilate or close the binary image after thresholding using the cv2.dilate function from: cv2.dilate.
I have a suggestion. Since you have extracted each of the object in
the image as a contour, try fitting an ellipse to each of them.
Then find the major axis of each of the ellipse.
Now find the angle of orientation of these major axis.

Calculate angle of scissors image

I have a pair of black scissors (image below) and I want to calculate its angle. The angle is between 180 and 0.so I used cv2.line() to get two lines for angle calculation but I could only detect angel if its 180 or 0.how can I find a " > " shape with contours?(the handle is also black)
#Decapitary
import cv2
import numpy as np
import cv2.cv as cv
import math
import sys
cap = cv2.VideoCapture(0)
max_area =0
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([180,255,30])
mask= cv2.inRange(hsv, lower_red, upper_red)
mask = cv2.GaussianBlur(mask,(1,1),2)
mask = cv2.dilate(mask, np.ones((7,7),np.uint8))
mask = cv2.erode(mask, np.ones((5,5),np.uint8))
edges = cv2.Canny(mask,thresh,thresh*5)
contours, hierarchy = cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) ### use this to calculte the angel.
for c in contours:
area = cv2.contourArea(c)
if area > max_area:
max_area = area
yu = c
nss = np.asarray(yu,dtype=np.int32)
cv2.polylines(frame, [nss], 0 , (0,0,250))
cv2.imshow('frame',frame)
cv2.imshow('edges',edges)
cv2.imshow('mask',mask)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
Im open to ideas and really need help.just show me the right direction please
EDIT:
Can I train a cascade to find a specific point a an object(in this case 3points on edges to calculate angel) ???

Find the red circle in video

I want to track a moving cap of a coke bottle in the webcam Feed with OpenCV in Python (or C++). I tried to search for all the red in the frame and then I used some kind of HOUGH TRANSFORM to search for circles.
I cant find the right radius for the circle and fix it so it doesn't change every frame.the process time is not so important I dont want a real time detection but I do want a precise red circle detection.
This is what I have so far:
import cv2
import numpy as np
import cv2.cv as cv
cap = cv2.VideoCapture(0)
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_red = np.array([160,140,50])
upper_red = np.array([180,255,255])
imgThreshHigh = cv2.inRange(hsv, lower_red, upper_red)
imgray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
thresh = 18
edges = cv2.Canny(imgray,thresh,thresh*3)
circles = cv2.HoughCircles(imgThreshHigh, cv.CV_HOUGH_GRADIENT, 1, 500, 25, 75, 5, 15)
maxRadius= 0
xc = 0.00
yc = 0.00
found = False
if circles is not None:
found = True
for i in circles[0,:3]:
if i[2] < 100:
if i[2] > maxRadius:
maxRadius = i[2]
if maxRadius > 1.0:
# draw the outer circle
cv2.circle(frame,(i[0],i[1]),maxRadius,(0,0,255),2)
# draw the center of the circle
cv2.circle(frame,(i[0],i[1]),1,(0,0,255),3)
xc = i[0]
yc = i[1]
if found:
print "ball detected at position:",xc, ",", yc, " with radius:", maxRadius
else:
print "no ball"
cv2.imshow('frame',frame)
cv2.imshow('edges',edges)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
I dont think HOUGH TRANSFORM works for this. So I want to use the edges.
How can I use an equation like (X-Xc)^2 + (Y-Yc)^2 =R^2 and the contours to find circles?
Also if there is an improvement for Hough Transform I will appreciate it if you share with me
Contours:
contours,hierarchy=cv2.findContours(edges,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
My example image: ---> this is not my example Image.this is the object I want to find in videos.
Well for the color you have to find the right range, for the human eye the bottle cap is red, but for the camera could be orange or something like that. Also is affected by the light so the camera could get some white. I learned this on the hard way haha. You could do a code with trackbars and hsv values to get the exact rang for your objet.
Now, are you looking for the center (x,y)? You can try with cv2.moments()
and for
Here is an example
For your code is something like this
import cv2
import numpy as np
import cv2.cv as cv
cap = cv2.VideoCapture(0)
coords = []
while(1):
# Take each frame
_, frame = cap.read()
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of blue color in HSV
lower_red = np.array([160,140,50])
upper_red = np.array([180,255,255])
imgThreshHigh = cv2.inRange(hsv, lower_red, upper_red)
thresh = imgThreshHigh.copy()
countours,_ = cv2.findContours(thresh, cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in countours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
M = cv2.moments(best_cnt)
cx,cy = int(M['m10']/M['m00']), int(M['m01']/M['m00'])
coord = cx, cy #This are your coordinates for the circle
# area = moments['m00'] save the object area
#perimeter = cv2.arcLength(best_cnt,True) is the object perimeter
#Save the coords every frame on a list
#Here you can make more conditions if you don't want repeated coordinates
points.append(coord)
cv2.imshow('frame',frame)
cv2.imshow('Object',thresh)
k = cv2.waitKey(5) & 0xFF
if k == 27:
break
cv2.destroyAllWindows()
With moments you don't have the raidius hough transform that you mentioned because calculate the relative center of the object every time.
Here is the code I use to know which is the exact value, hope it helps you HSV detection code

Categories