How to fill a polygon in OpenCV? - python

The Python code given below draws a triangle, how can I fill it with a color inside? Or another easier way to draw a triangle in OpenCV?
pts = np.array([[100,350],[165,350],[165,240]], np.int32)
cv2.polylines(img,[pts],True,(0,255,255),2)

You have to use cv2.fillPoly().
Illustration for 2-channeled image
Change the second line to:
cv2.fillPoly(img, [pts], 255)
Code:
img = np.zeros([400, 400],dtype=np.uint8)
pts = np.array([[100,350],[165,350],[165,240]], np.int32)
cv2.fillPoly(img, [pts], 255)
cv2.imshow('Original', img)
Result:
Illustration for 3-channeled color image
img = cv2.imread('image_path')
pts = np.array([[170,50],[240, 40],[240, 150], [210, 100], [130, 130]], np.int32)
cv2.fillPoly(img, [pts], (255,150,255))
Result:

Related

Improve quality of extracted image in OpenCV

#Segmenting the red pointer
img = cv2.imread('flatmap.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([140, 110, 0])
upper_red = np.array([255, 255 , 255])
# Threshold with inRange() get only specific colors
mask_red = cv2.inRange(hsv, lower_red, upper_red)
# Perform bitwise operation with the masks and original image
red_pointer = cv2.bitwise_and(img,img, mask= mask_red)
# Display results
cv2.imshow('Red pointer', red_pointer)
cv2.imwrite('redpointer.jpg', red_pointer)
cv2.waitKey(0)
cv2.destroyAllWindows()
I have a map and need to extract the red arrow. The code works but the arrow has black patches in it. How would I go about altering the code to improve the output of the arrow so it's a solid shape?
You could use:
dilate to fill up the internal noise in the shape
external contour finding to get the outline of the triangle
convex hull to further smooth it out
import cv2
import numpy as np
img = cv2.imread('dCkpC.jpg')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_red = np.array([140, 60, 0])
upper_red = np.array([255, 255, 255])
mask_red = cv2.inRange(hsv, lower_red, upper_red)
element = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
mask_red = cv2.dilate(mask_red, element)
contours, _ = cv2.findContours(mask_red, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
hull_list = [cv2.convexHull(contour) for contour in contours]
drawing = np.zeros_like(img)
for hull in hull_list:
cv2.fillConvexPoly(img, hull, (255, 0, 0))
cv2.imshow('Image', img)
cv2.imwrite('out.jpg', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
out.jpg ends up looking like
where the triangle has been filled in with blue.
I've looked at the channels in HSL/HSV space.
The arrows are the only stuff in the picture that has any saturation. That would be one required (but insufficient) aspect to get a lock on the desired arrow. I've picked those pixels and they appear to have a bit more than 50% saturation, so I'll use a lower bound of 25% (64).
That red arrow's hue dithers around 0 degrees (red)... that means some of its pixels are on the negative side of 0, i.e. something like 359 degrees.
You need to use two inRange calls to collect all hues from 0 up, and all hues from 359 down. Since OpenCV encodes hues in 2-degree steps, that'll be a value of 180 and down. I'll select 0 +- 20 degrees (0 .. 10 and 170 .. 180).
In summary:
hsv_im = cv.cvtColor(im, cv.COLOR_BGR2HSV)
mask1 = cv.inRange(hsv_im, np.array([0, 64, 0]), np.array([10, 255, 255]))
mask2 = cv.inRange(hsv_im, np.array([170, 64, 0]), np.array([180, 255, 255]))
mask = mask1 | mask2
cv.imshow("mask", mask)
cv.waitKey()

opencv align two images by keypoints with stretching

I have two images
image1 (object):
]3
Original image without marked keypoints:
image2 is a white picture (500x500)
In image1 and in image2 I have marked keypoints.
I want to align image1 on image2 by keypoints. So the goal that both keypoints overlaps with stretching, scaling and transforming image2.
This are my keypoints (csv file). The coordinates are x and y for image1 in image1 and for image2 in image2.
object1_x,object1_y,image_x,image_y
0,0,80,137
286,0,409,42
286,198,416,390
174,198,331,384
158,116,291,119
0,97,111,311
How can I do this with opencv and python?
So the result image should looks like this (without the red dots, the red dots are only for demonstration the keypoints):
The Concept
Extract sets of 3 indices from the first set of keypoints that will form triangles when indexed from both sets of keypoints. With the indices we can get corresponding triangles from both sets of keypoints, allowing us to build the warped image triangle by triangle (see Warp one triangle to another using OpenCV for more details):
image1.png (with added points):
image2.png (with added points):
Result (with added points)
The Code
import cv2
import numpy as np
def triangles(points):
points = np.where(points, points, 1)
subdiv = cv2.Subdiv2D((*points.min(0), *points.max(0)))
subdiv.insert(list(points))
for pts in subdiv.getTriangleList().reshape(-1, 3, 2):
yield [np.where(np.all(points == pt, 1))[0][0] for pt in pts]
def crop(img, pts):
x, y, w, h = cv2.boundingRect(pts)
img_cropped = img[y: y + h, x: x + w]
pts[:, 0] -= x
pts[:, 1] -= y
return img_cropped, pts
def warp(img1, img2, pts1, pts2):
for indices in triangles(pts1):
img1_cropped, triangle1 = crop(img1, pts1[indices])
img2_cropped, triangle2 = crop(img2, pts2[indices])
transform = cv2.getAffineTransform(np.float32(triangle1), np.float32(triangle2))
img2_warped = cv2.warpAffine(img1_cropped, transform, img2_cropped.shape[:2][::-1], None, cv2.INTER_LINEAR, cv2.BORDER_REFLECT_101)
mask = np.zeros_like(img2_cropped)
cv2.fillConvexPoly(mask, np.int32(triangle2), (1, 1, 1), 16, 0)
img2_cropped *= 1 - mask
img2_cropped += img2_warped * mask
img1 = cv2.imread("image1.png")
img2 = cv2.imread("image2.png")
pts1 = np.array([[0, 0], [286, 0], [286, 198], [174, 198], [158, 116], [0, 97]])
pts2 = np.array([[80, 37], [409, 42], [416, 390], [331, 384], [291, 119], [111, 311]])
warp(img1, img2, pts1, pts2)
for pt in pts2:
cv2.circle(img2, tuple(pt), 15, (0, 0, 255), -1)
cv2.imshow("Original", img1)
cv2.imshow("Transformed", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
The Output
The Explanation
Import the necessary libraries:
import cv2
import numpy as np
Define a function, triangles, that will take in an array of coordinates, points, and yield lists of 3 indices of the array for triangles that will cover the area of the original array of coordinates:
def triangles(points):
points = np.where(points, points, 1)
subdiv = cv2.Subdiv2D((*points.min(0), *points.max(0)))
subdiv.insert(list(points))
for pts in subdiv.getTriangleList().reshape(-1, 3, 2):
yield [np.where(np.all(points == pt, 1))[0][0] for pt in pts]
Define a function, crop, that will take in an image array, img, and an array of three coordinates, pts. It will return a rectangular segment of the image just large enough to fit the triangle formed by the three point, and return the array of three coordinates transferred to the top-left corner of image:
def crop(img, pts):
x, y, w, h = cv2.boundingRect(pts)
img_cropped = img[y: y + h, x: x + w]
pts[:, 0] -= x
pts[:, 1] -= y
return img_cropped, pts
Define a function, warp, that will take in 2 image arrays, img1 and img2, and 2 arrays of coordinates, pts1 and pts2. It will utilize the triangles function defined before iterate through the triangles from the first array of coordinates, the crop function defined before to crop both images at coordinates corresponding to the triangle indices and use the cv2.warpAffine() method to warp the image at the current triangle of the iterations:
def warp(img1, img2, pts1, pts2):
for indices in triangles(pts1):
img1_cropped, triangle1 = crop(img1, pts1[indices])
img2_cropped, triangle2 = crop(img2, pts2[indices])
transform = cv2.getAffineTransform(np.float32(triangle1), np.float32(triangle2))
img2_warped = cv2.warpAffine(img1_cropped, transform, img2_cropped.shape[:2][::-1], None, cv2.INTER_LINEAR, cv2.BORDER_REFLECT_101)
mask = np.zeros_like(img2_cropped)
cv2.fillConvexPoly(mask, np.int32(triangle2), (1, 1, 1), 16, 0)
img2_cropped *= 1 - mask
img2_cropped += img2_warped * mask
Read in your images. In your case, img1 is the image we want to warp, and img2 is the blank 500 x 500 image. Also, define 2 array of coordinates to be the keypoints of the images:
img1 = cv2.imread("image1.png")
img2 = cv2.imread("image2.png")
pts1 = np.array([[0, 0], [286, 0], [286, 198], [174, 198], [158, 116], [0, 97]])
pts2 = np.array([[80, 37], [409, 42], [416, 390], [331, 384], [291, 119], [111, 311]])
Finally, use the warp function defined before to warp img1 to have its keypoints overlap with the kewpoints of img2 and show the resulting image. I drew the points from the second array of coordinates onto the resulting warped image to make the warping process easier to visualize:
warp(img1, img2, pts1, pts2)
for pt in pts2:
cv2.circle(img2, tuple(pt), 15, (0, 0, 255), -1)
cv2.imshow("Original", img1)
cv2.imshow("Transformed", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()

Classifying black and silver images

I am struggling to classify the images at black and silver colores
How can I use OpenCV in order to classify them? Image with black color Image with Silver color?
What error is there below ?
import numpy as np
import cv2
# RGB color boundaries
black = ([0, 0, 0], [50, 50, 50])
boundaries = [black]
#
# Load an color image in grayscale
img = cv2.imread('zmFf4.jpg')
print(img.shape)
breakpoint()
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(img, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
# show the images
cv2.imshow("images", np.hstack([img, output]))
cv2.waitKey(0)
You are not looking the silver range
Do as under
import numpy as np
import cv2
# RGB color boundaries
black = ([0, 0, 0], [50, 50, 50])
silver = ([192, 192, 192], [212, 212, 212])
boundaries = [black, silver]
#
# Load an color image in grayscale
img = cv2.imread('zmFf4.jpg')
print(img.shape)
breakpoint()
for (lower, upper) in boundaries:
# create NumPy arrays from the boundaries
lower = np.array(lower, dtype="uint8")
upper = np.array(upper, dtype="uint8")
# find the colors within the specified boundaries and apply
# the mask
mask = cv2.inRange(img, lower, upper)
output = cv2.bitwise_and(img, img, mask=mask)
# show the images
cv2.imshow("images", np.hstack([img, output]))
cv2.waitKey(0)

python - opencv morphologyEx remove specific color

After remove captcha's background.
The image remain digits and noise.
Noise line is all in one color : RGB(127,127,127)
And then using morphology method.
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2))
self.im = cv2.morphologyEx(self.im, cv2.MORPH_CLOSE, kernel)
Some part of digit will be remove.
How to use morphologyEx() remove only color in RGB(127,127,127) ?
In order to eliminate color within a particular range you have to use cv2.inRange() function.
Here is the code:
lower = np.array([126,126,126]) #-- Lower range --
upper = np.array([127,127,127]) #-- Upper range --
mask = cv2.inRange(img, lower, upper)
res = cv2.bitwise_and(img, img, mask= mask) #-- Contains pixels having the gray color--
cv2.imshow('Result',res)
This is what I got for the two images you have:
Image 1:
Image 2:
You carry on from here.
COLOR RANGE
color_dict_HSV = {'black': [[180, 255, 30], [0, 0, 0]],
'white': [[180, 18, 255], [0, 0, 231]],
'red1': [[180, 255, 255], [159, 50, 70]],
'red2': [[9, 255, 255], [0, 50, 70]],
'green': [[89, 255, 255], [36, 50, 70]],
'blue': [[128, 255, 255], [90, 50, 70]],
'yellow': [[35, 255, 255], [25, 50, 70]],
'purple': [[158, 255, 255], [129, 50, 70]],
'orange': [[24, 255, 255], [10, 50, 70]],
'gray': [[180, 18, 230], [0, 0, 40]]}
CREDITS:
Ali Hashemian
HOW TO REMOVE A COLOR FROM YOUR IMAGE USING OPENCV
Since most of you would like to do that, i.e. in my case the task was to remove blue color from the image, I used the following code, to remove blue ink stamps and, blue tick marks from my image in order for proper OCR using Tesseract.
[COLOR REMOVAL] CODE
import cv2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# image path:
#path = "D://opencvImages//"
#fileName = "out.jpg"
# Reading an image in default mode:
inputImage = cv2.imread('0.jpg')
# Convert RGB to grayscale:
grayscaleImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)
# Convert the BGR image to HSV:
hsvImage = cv2.cvtColor(inputImage, cv2.COLOR_BGR2HSV)
# Create the HSV range for the blue ink:
# [128, 255, 255], [90, 50, 70]
lowerValues = np.array([90, 50, 70])
upperValues = np.array([128, 255, 255])
# Get binary mask of the blue ink:
bluepenMask = cv2.inRange(hsvImage, lowerValues, upperValues)
# Use a little bit of morphology to clean the mask:
# Set kernel (structuring element) size:
kernelSize = 3
# Set morph operation iterations:
opIterations = 1
# Get the structuring element:
morphKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernelSize, kernelSize))
# Perform closing:
bluepenMask = cv2.morphologyEx(bluepenMask, cv2.MORPH_CLOSE, morphKernel, None, None, opIterations, cv2.BORDER_REFLECT101)
# Add the white mask to the grayscale image:
colorMask = cv2.add(grayscaleImage, bluepenMask)
_, binaryImage = cv2.threshold(colorMask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imwrite('bwimage.jpg',binaryImage)
thresh, im_bw = cv2.threshold(binaryImage, 210, 230, cv2.THRESH_BINARY)
kernel = np.ones((1, 1), np.uint8)
imgfinal = cv2.dilate(im_bw, kernel=kernel, iterations=1)
cv2.imshow(imgfinal)
BEFORE [Original Image]
Blue Mark Extraction
Final Image
Here you can see that all of the tick marks are almost, removed the reason is that because there is always room for improvement, but this, as it seems, is the best we can get because even removing these little marks is not going to have a profound effect on the OCR using Tesseract.
HOPE THAT HELPS!
Here is my solution.
Your answer is obvious better than my.
def mop_close(self):
def morphological(operator=min):
height, width, _ = self.im.shape
# create empty image
out_im = np.zeros((height,width,3), np.uint8)
out_im.fill(255) # fill with white
for y in range(height):
for x in range(width):
try:
if self.im[y,x][0] ==127 and self.im[y,x][1] ==127 and self.im[y,x][2] ==127:
nlst = neighbours(self.im, y, x)
out_im[y, x] = operator(nlst,key = lambda x:np.mean(x))
else:
out_im[y,x] = self.im[y,x]
except Exception as e:
print(e)
return out_im
def neighbours(pix,y, x):
nlst = []
# search pixels around im[y,x] add them to nlst
for yy in range(y-1,y+1):
for xx in range(x-1,x+1):
try:
nlst.append(pix[yy, xx])
except:
pass
return np.array(nlst)
def erosion(im):
return morphological(min)
def dilation(im):
return morphological(max)
self.im = dilation(self.im)
self.im = erosion(self.im)
final result:

Fill the outside of contours OpenCV

I am trying to color in black the outside region of a contours using openCV and python language.
Here is my code :
contours, hierarchy = cv2.findContours(copy.deepcopy(img_copy),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt=contours[max_index]
# how to fill of black the outside of the contours cnt please? `
Here's how you can fill an image with black color outside of a set of contours:
import cv2
import numpy
img = cv2.imread("zebra.jpg")
stencil = numpy.zeros(img.shape).astype(img.dtype)
contours = [numpy.array([[100, 180], [200, 280], [200, 180]]), numpy.array([[280, 70], [12, 20], [80, 150]])]
color = [255, 255, 255]
cv2.fillPoly(stencil, contours, color)
result = cv2.bitwise_and(img, stencil)
cv2.imwrite("result.jpg", result)
UPD.: The code above exploits the fact that bitwise_and with 0-s produces 0-s, and won't work for fill colors other than black. To fill with an arbitrary color:
import cv2
import numpy
img = cv2.imread("zebra.jpg")
fill_color = [127, 256, 32] # any BGR color value to fill with
mask_value = 255 # 1 channel white (can be any non-zero uint8 value)
# contours to fill outside of
contours = [ numpy.array([ [100, 180], [200, 280], [200, 180] ]),
numpy.array([ [280, 70], [12, 20], [80, 150]])
]
# our stencil - some `mask_value` contours on black (zeros) background,
# the image has same height and width as `img`, but only 1 color channel
stencil = numpy.zeros(img.shape[:-1]).astype(numpy.uint8)
cv2.fillPoly(stencil, contours, mask_value)
sel = stencil != mask_value # select everything that is not mask_value
img[sel] = fill_color # and fill it with fill_color
cv2.imwrite("result.jpg", img)
Can fill with another image as well, for example, using img[sel] = ~img[sel] instead of img[sel] = fill_color would fill it with the same inverted image outside of the contours:

Categories