Im trying to set the Minimum and Maximum value of HSV of an Image in opencv python but after running the code all I can see is a blank rectangle box.
import cv2
import sys
import numpy as np
# Load in image
image = cv2.imread('power.jpg')
# Set minimum and max HSV values to display
lower = np.array([0, 209, 0])
upper = np.array([179, 255, 236])
# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(image,image, mask= mask)
# Display output image
cv2.imshow('image',output)
I was able to solve it.
import numpy as np
import cv2
img = cv2.imread( "power.jpg" )
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of red (36,0,0) ~ (70, 255,255)
mask = cv2.inRange(hsv, (0, 209, 0), (179, 255,236))
bak = img.copy()
# Show only red
#bak[mask > 0] = (0, 0, 255)
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
## save
cv2.imwrite("image.png", green)
Related
I am looking for a way to remove the black dots around the image border using OpenCV.
Image:
Expected solution:
import cv2
def get_img(img_name):
lower = (0, 0, 0)
upper = (75, 75, 75)
img = cv2.imread(img_name)
#print(img)
img_rgb_inrange = cv2.inRange(img, lower, upper)
neg_rgb_image = ~img_rgb_inrange
w = cv2.cvtColor(neg_rgb_image,cv2.COLOR_GRAY2RGB)
image3 = img-w
cv2.imwrite('img.png', image3)
get_img('address of the img')
I used the above code that I saw in link. The results are below:
output mask I got after running the code:
Final Output:
Wondering, is there any dynamic way (instead of initializing upper and lower bounds) where I can remove the noise from the image but still maintain my foreground and background?
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('E3BbU.jpeg')
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 150, 255, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
image2 = image.copy()
cv2.drawContours(image=image2, contours=sorted(contours, key=len)[:-1], contourIdx=-1, color=(255, 255, 255), thickness=2, lineType=cv2.LINE_AA)
fig, ax = plt.subplots(2, 1)
for i, img in enumerate([image, image2]):
ax[i].imshow(img);
ax[i].axes.get_xaxis().set_visible(False)
ax[i].axes.get_yaxis().set_visible(False)
plt.show()
I'm trying to detect colorful dots on a white/gray background. The dots are 3 different colors (yellow, purple, blue) of different sizes. Here is the original image:
I converted the image to HSV and found lower and upper bounds for each image then applied contour detection to find those dots. The following code detects most of the dots:
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('image1_1.png')
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_yellow = np.array([22,25,219])
upper_yellow = np.array([25,75,225])
lower_purple = np.array([141,31,223])
upper_purple = np.array([143,83,225])
lower_blue = np.array([92,32,202])
upper_blue = np.array([96,36,208])
mask_blue = cv2.inRange(hsv, lower_blue, upper_blue)
mask_purple = cv2.inRange(hsv, lower_purple, upper_purple)
mask_yellow = cv2.inRange(hsv, lower_yellow, upper_yellow)
res_blue = cv2.bitwise_and(img,img, mask=mask_blue)
res_purple = cv2.bitwise_and(img,img, mask=mask_purple)
res_yellow = cv2.bitwise_and(img,img, mask=mask_yellow)
gray_blue = cv2.cvtColor(res_blue, cv2.COLOR_BGR2GRAY)
gray_purple = cv2.cvtColor(res_purple, cv2.COLOR_BGR2GRAY)
gray_yellow = cv2.cvtColor(res_yellow, cv2.COLOR_BGR2GRAY)
_,thresh_blue = cv2.threshold(gray_blue,10,255,cv2.THRESH_BINARY)
_,thresh_purple = cv2.threshold(gray_purple,10,255,cv2.THRESH_BINARY)
_,thresh_yellow = cv2.threshold(gray_yellow,10,255,cv2.THRESH_BINARY)
contours_blue, hierarhy1 = cv2.findContours(thresh_blue,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours_purple, hierarhy2 = cv2.findContours(thresh_purple,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours_yellow, hierarhy3 = cv2.findContours(thresh_yellow,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
result = img.copy()
cv2.drawContours(result, contours_blue, -1, (0, 0, 255), 2)
cv2.drawContours(result, contours_purple, -1, (0, 0, 255), 2)
cv2.drawContours(result, contours_yellow, -1, (0, 0, 255), 2)
cv2.imwrite("_allContours.jpg", result)
Here are the detected contours:
The problem is that some of the colored dots are not detected. I understand by fine-tuning the color ranges (lower and upper) it's possible to detect more dots. But that is very time consuming and not generalizable to similar images. For example the following image looks similar to the first image above and has the same colorful dots but the background is slightly different, once I ran it through above code it was not able to detect even one of the dots. Am I on the right track? Is there a more scalable and reliable solution with less need to tune color parameters in order to solve this problem? Here is the other image I tried:
I would suggest simply using adaptiveThreshold in Python/OpenCV
import cv2
import numpy as np
# read image
img = cv2.imread("dots.png")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# do adaptive threshold on gray image
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 25, 6)
# write results to disk
cv2.imwrite("dots_thresh.jpg", thresh)
# display it
cv2.imshow("thresh", thresh)
cv2.waitKey(0)
I need to remove the gray drawing from the image background and only need symbols drawn over it.
Here is my code to do that using morphologyEx but it did not remove the entire gray drawing that is in background.
img_path = "images/new_drawing.png"
img = cv2.imread(img_path)
kernel = np.ones((2,2), dtype=np.uint8)
result = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=1)
cv2.imshow('Without background',result);
cv2.waitKey(0)
cv2.destroyAllWindows()
I tried this also and got expected results in grayscale but unable to convert it to BGR.
Here is my code
img = cv2.imread('images/new_drawing.png')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
med_blur = cv2.medianBlur(gray_img, ksize=3)
_, thresh = cv2.threshold(med_blur, 190, 255, cv2.THRESH_BINARY)
blending = cv2.addWeighted(gray_img, 0.5, thresh, 0.9, gamma=0)
cv2.imshow("blending", blending);
Also i used contours to identify symbols and draw them to white image but problem is that it also identify background drawing that i don't want.
Input image
Expected output image
Also the drawing will be always in gray color as in image.
Please help me out to get better result.
You are almost there...
Instead of using cv2.inRange to "catch" the non-gray pixel I suggest using cv2.inRange for catching all the pixels you want to change to white color:
mask = cv2.inRange(hsv, (0, 0, 100), (255, 5, 255))
The hue range is irrelevant.
The saturation is close to zero (shades of gray).
The brightness excludes the black pixels (you like to keep).
In order to get a nicer solution, I also used the following additional stages:
Build a mask of non-black pixels:
nzmask = cv2.inRange(hsv, (0, 0, 5), (255, 255, 255))
Erode the above mask:
nzmask = cv2.erode(nzmask, np.ones((3,3)))
Apply and operation between mask and nzmask:
mask = mask & nzmask
The above stages keeps the gray pixels around the black text.
Without the above stages, the black text gets thinner.
The last stage is replacing mask pixels with white:
new_img = img.copy()
new_img[np.where(mask)] = 255
Here is the code:
import numpy as np
import cv2
img_path = "new_drawing.png"
img = cv2.imread(img_path)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 0, 100), (255, 5, 255))
cv2.imshow('mask before and with nzmask', mask);
# Build mask of non black pixels.
nzmask = cv2.inRange(hsv, (0, 0, 5), (255, 255, 255))
# Erode the mask - all pixels around a black pixels should not be masked.
nzmask = cv2.erode(nzmask, np.ones((3,3)))
cv2.imshow('nzmask', nzmask);
mask = mask & nzmask
new_img = img.copy()
new_img[np.where(mask)] = 255
cv2.imshow('mask', mask);
cv2.imshow('new_img', new_img);
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Here is one way to do that in Python/OpenCV.
Read the input
Convert to HSV and separate channels
Threshold the saturation channel
Threshold the value channel and invert
Combine the two threshold images as a mask
Apply the mask to the input to write white where the mask is black
Save the result
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('symbols.png')
# convert image to hsv colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
# threshold saturation image
thresh1 = cv2.threshold(s, 92, 255, cv2.THRESH_BINARY)[1]
# threshold value image and invert
thresh2 = cv2.threshold(v, 128, 255, cv2.THRESH_BINARY)[1]
thresh2 = 255 - thresh2
# combine the two threshold images as a mask
mask = cv2.add(thresh1,thresh2)
# use mask to remove lines in background of input
result = img.copy()
result[mask==0] = (255,255,255)
# display IN and OUT images
cv2.imshow('IMAGE', img)
cv2.imshow('SAT', s)
cv2.imshow('VAL', v)
cv2.imshow('THRESH1', thresh1)
cv2.imshow('THRESH2', thresh2)
cv2.imshow('MASK', mask)
cv2.imshow('RESULT', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save output image
cv2.imwrite('symbols_thresh1.png', thresh1)
cv2.imwrite('symbols_thresh2.png', thresh2)
cv2.imwrite('symbols_mask.png', mask)
cv2.imwrite('symbols_cleaned.png', result)
Saturation channel thresholded:
Value channel thresholded and inverted:
Mask:
Result:
I am trying to develop a way to output the number of pixels that fall between the HSV mask limits of each frame of a given video on a Raspberry Pi camera.
The aim of this is to determine the 'Red' intensity of a red dimmer light and therefore split its intensity into different levels, and hence determine which intensity the light is switched to in each frame. How would I go about calculating said pixel count?
My progress so far is that I have a method and limits for masking a frame using OpenCV commands. I just need a way to count the remaining pixels of each frame.
Here is my current code, which I have slightly adapted from this great tutorial I found: Automatic Vision Object Tracking
import cv2
import numpy as np
img = cv2.imread('hsvmeasure.jpg', 1)
img = cv2.resize(img, (0,0), fx=0.2, fy=0.2)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_range = np.array([160,100,100], dtype=np.uint8)
upper_range = np.array([180,255,255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_range, upper_range)
cv2.imshow('mask', mask)
cv2.imshow('img', img)
while(1):
k = cv2.waitKey(0)
if (k == 27):
break
cv2.destroyAllWindows()
You have already done most of the work, now you can just make a range of lower and upper intensity (non-overlapping) and count how many pixels are 255 in the mask.
import cv2
import numpy as np
img = cv2.imread('test.jpg', 1)
img = cv2.resize(img, (0,0), fx=0.2, fy=0.2)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower_intensity_levels = [ [150,100,100], [161, 100,100], [171, 100, 100] ] # non overlapping
upper_intensity_levels = [ [160,255,255] , [170, 255, 255], [180, 255, 255] ] # make these list based on your intensity requirements
mask_on_counts = []
for i in range(len(lower_intensity_levels)):
lower_range = np.array(lower_intensity_levels[i], dtype=np.uint8)
upper_range = np.array(upper_intensity_levels[i], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_range, upper_range)
mask_on_counts.append(np.sum(mask==255))
import matplotlib.pyplot as plt
plt.imshow(mask)
plt.show()
for i in range(len(mask_on_counts)):
print(f'level {i+1} number of pixels: {mask_on_counts[i]}')
How to convert this HSV into RGB I would like it using cv2.color cv2.COLOR_BGR2RGB but its not same result.
This is the image i've used
enter image description here
Heres' the result that I want but using the cv2.COLOR_BGR2RGB but the code below is using the cv2.COLOR_BGR2HSV
enter image description here
import cv2
import numpy as np
## Read
img = cv2.imread("ni.jpg")
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
## mask of green (36,25,25) ~ (86, 255,255)
# mask = cv2.inRange(hsv, (36, 25, 25), (86, 255,255))
mask = cv2.inRange(hsv, (7, 25, 25), (70, 255,255))
## slice the green
imask = mask>0
green = np.zeros_like(img, np.uint8)
green[imask] = img[imask]
## save
cv2.imwrite("green.png", green)
To convert an image from HSV to RGB you can do:
rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
and to do HSV to BGR it is
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
You have to know that OpenCV is using BGR when reading/saving images.
You can convert between RGB and BGR with cvtColor and cv2.COLOR_RGB2BGR, or cv2.COLOR_BGR2RGB.
EDIT:
However, if what you want is having a mask of green bananas (or yellow bananas), The issue is the way you defined green color : it is including a lot of other colors right now, including yellow.
What you can do with the HSV, is to only look at the first channel, the hue :
Here you can see that green and yellow can be differentiated : green bananas have pixel value roughly between 30 and 50, and yellow between 20 and 30.
You can do a mask with that. I used another library to do the cleanup of pixel we don't want. It is Scikit-image. This can be done in OpenCV as well, but it takes a bit more time...
SO here is my code :
import cv2
import numpy as np
import matplotlib.pyplot as plt
from skimage.morphology import remove_small_objects, remove_small_holes
## Read
img = cv2.imread("ni.jpg")
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hue = hsv[:,:,0]
# plt.imshow(hue) # this show the figure in my post
# plt.show()
# mask = np.bitwise_and(hue > 20, hue < 35) # for yellow
mask = np.bitwise_and(hue > 30, hue < 50) # for green
mask = remove_small_objects(mask, 1000)
mask = remove_small_holes(mask, 1000)
green = np.zeros_like(img, np.uint8)
green[mask] = img[mask]
## save
cv2.imwrite("green.png", green)