Count red pixel values and plot histogram in Python - python

I have a set of images that are located in 3 separate folders, based on their Type. I want to iterate through every Type and count the red pixel values of every image. I have set a limit for red, being in range from 200 to 256. I want to create histograms for each type and later cluster the histogram and discriminate between the 3 classes. My experience with Python is very limited and I am stuck at how to isolate and count the red pixel values. I have attached my code and the resulting histogram for Type 1, which is a straight line. Could someone help on this?
import numpy as np
import cv2
import os.path
import glob
import matplotlib.pyplot as plt
## take the image, compute sum of all row colors and return the percentage
#iterate through every Type
for t in [1]:
#load_files
files = glob.glob(os.path.join("..", "data", "train", "Type_{}".format(t), "*.jpg"))
no_files = len(files)
#iterate and read
for n, file in enumerate(files):
try:
image = cv2.imread(file)
hist = cv2.calcHist([img], [0], None, [56], [200, 256])
print(file, t, "-files left", no_files - n)
except Exception as e:
print(e)
print(file)
plt.plot(hist)
plt.show()

This is the solution I came up with. I have taken the liberty to refactor and simplify your code a bit.
import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from skimage import io
root = 'C:\Users\you\imgs' # Change this appropriately
folders = ['Type_1', 'Type_2', 'Type_3']
extension = '*.bmp' # Change if necessary
threshold = 150 # Adjust to fit your neeeds
n_bins = 5 # Tune these values to customize the plot
width = 2.
colors = ['cyan', 'magenta', 'yellow']
edges = np.linspace(0, 100, n_bins+1)
centers = .5*(edges[:-1]+ edges[1:])
# This is just a convenience class used to encapsulate data
class img_type(object):
def __init__(self, folder, color):
self.folder = folder
self.percents = []
self.color = color
lst = [img_type(f, c) for f, c in zip(folders, colors)]
fig, ax = plt.subplots()
for n, obj in enumerate(lst):
filenames = glob.glob(os.path.join(root, obj.folder, extension))
for fn in filenames:
img = io.imread(fn)
red = img[:, :, 0]
obj.percents.append(100.*np.sum(red >= threshold)/red.size)
h, _ = np.histogram(obj.percents, bins=edges)
h = np.float64(h)
h /= h.sum()
h *= 100.
ax.bar(centers + (n - .5*len(lst))*width, h, width, color=obj.color)
ax.legend(folders)
ax.set_xlabel('% of pixels whose red component is >= threshold')
ax.set_ylabel('% of images')
plt.show()
Notice that I have I used scikit-image rather than OpenCV to read the images. If this is not an option for you, insert import cv2 and change:
img = io.imread(fn)
red = img[:, :, 0]
to:
img = cv2.imread(fn)
red = img[:, :, 2]

Related

How to measure the similarity and find out which image matches the corresponding image that have the highest similarity?

I have two folders of images, each containing about 145 images. I want to measure the similarity and find out which image matches the corresponding image and which images have the highest similarity. Let us assume that, depending on the distance between the centers of these two points. But the metric to measure the similarity should not depend on the size of the diameter of the two points that appear in each of these images.
This is the code that I have used to find similarities depending on Euclidean distance!
from skimage.metrics import structural_similarity as ssim
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import scipy.spatial.distance as dist
import os
np.seterr(divide='ignore', invalid='ignore')
images_list = []
images_list2 = []
resultlist = []
Maxlist= []
SIZE = 512
path = r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\U_Net_Segmentation_CT\result1\*.*"
for file in glob.glob(path):
print(file) # just stop here to see all file names printed
img = cv2.imread(file, 0) # now, we can read each file since we have the full path
images_list.append(img)
images_list = np.array(images_list)
path3 = r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\U_Net_Segmentation_CT\result2\*.*"
for file2 in glob.glob(path3):
print(file2) # just stop here to see all file names printed
img2 = cv2.imread(file2, 0) # now, we can read each file since we have the full path
images_list2.append(img2)
images_list2 = np.array(images_list2)
img_num:int= 1
for img2 in range(images_list2.shape[0]):
input_img2 = images_list2[img2, :, :]
print(img2)
img_number = 1
resultlist.clear()
for image in range(images_list.shape[0]):
input_img = images_list[image, :, :] # Grey images. For color add another dim.
s = dist.euclidean(input_img, input_img2)
resultlist.append(float(s))
img_number += 1
print(resultlist)
with open(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result euclidean\file"+str(img_num)+".txt", "w") as f:
for M in resultlist:
f.write(str(M) + "\n")
#print(len(resultlist))
ll=max(resultlist)
Maxlist.append(float(ll))
print(ll)
print(Maxlist)
MM=resultlist.index(ll)
print (MM)
list=images_list[MM]
###############################################
from skimage import data, img_as_float
im1 = images_list[MM]
im2=input_img2
im1 = img_as_float(im1)
rows, cols = im1.shape
im2 = img_as_float(im2)
rows, cols = im2.shape
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 4),
sharex=True, sharey=True)
ax = axes.ravel()
mse_none = mean_squared_error(im1, im2)
ssim_none = ssim(im1, im1, data_range=im1.max() - im1.min())
mse_noise = mean_squared_error(im1, im2)
ssim_noise = ssim(im1, im2,
data_range=im2.max() - im2.min())
mse_const = mean_squared_error(im1, im2)
ssim_const = ssim(im1, im2,
data_range=im2.max() - im2.min())
label = 'MSE: {:.16f},\n SSIM: {:.16f},\n MM'
ax[0].imshow(im1, cmap=plt.cm.gray, vmin=0, vmax=1)
ax[0].set_xlabel(label.format(mse_none, ssim_none))
ax[0].set_title('GT CT')
ax[1].imshow(im2, cmap=plt.cm.gray, vmin=0, vmax=1)
ax[1].set_xlabel(label.format(mse_noise, ssim_noise))
ax[1].set_title('RE CT')
###############################################
cv2.imwrite(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result euclidean\image"+str(img_num)+".jpg",list)
cv2.imwrite(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result euclidean\image" + str(img_num) + ".png", input_img2)
img_num += 1
#print(Maxlist)
#plt.plot(Maxlist, Maxlist)
#plt.show()
with open(r"C:\Users\fadil\Desktop\Project\Segmentation\Exp1_v1_30062021 - Copy-2\CT&DCT\CT\Similartiy Data result euclidean\file"+str(img_num)+".txt", "w") as f:
for N in Maxlist:
f.write(str(N) + "\n")
import pickle
plt.tight_layout()
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
I have received the following error code:
Traceback (most recent call last):
File "C:/Users/fadil/Desktop/Project/Segmentation/Exp1_v1_30062021 - Copy-2/CT&DCT/CT/U_Net_Segmentation_CT/Similarity euclidean.py", line 45, in <module>
s = dist.euclidean(input_img, input_img2)
File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 626, in euclidean
return minkowski(u, v, p=2, w=w)
File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 513, in minkowski
u = _validate_vector(u)
File "C:\Users\fadil\.conda\envs\tf\lib\site-packages\scipy\spatial\distance.py", line 340, in _validate_vector
raise ValueError("Input vector should be 1-D.")
ValueError: Input vector should be 1-D.
Process finished with exit code 1
Image matching can be done by applying Keypoint Extraction algorithms. One such algorithm is ORB, which is a part of opencv library. You can find more information about ORB here.
A simple code to match two images using ORB is as follows. You can amend the code as per your requirements and loop through all the images to find matching one.
import numpy as np
import cv2
query_img = cv2.imread('query.jpg')
train_img = cv2.imread('train.jpg')
# Convert images into grayscale
query_img_bw = cv2.cvtColor(query_img,cv2.COLOR_BGR2GRAY)
train_img_bw = cv2.cvtColor(train_img, cv2.COLOR_BGR2GRAY)
# Initialize the ORB detector algorithm
orb = cv2.ORB_create()
# Now detect the keypoints and compute
queryKeypoints, queryDescriptors = orb.detectAndCompute(query_img_bw,None)
trainKeypoints, trainDescriptors = orb.detectAndCompute(train_img_bw,None)
#match the keypoints
matcher = cv2.BFMatcher()
matches = matcher.match(queryDescriptors,trainDescriptors)
# draw the matches to the final image
# containing both the images
final_img = cv2.drawMatches(query_img, queryKeypoints,
train_img, trainKeypoints, matches[:20],None)
final_img = cv2.resize(final_img, (1000,650))
# Show the final image
cv2.imshow("Matches", final_img)
cv2.waitKey(3000)

Why is my code only working on part of my image?

I created code to equalize the luminosity values of pixels in an image so that when the image is further edited I do not have dark or light spots in my final image. However, the code seems to stop short and only equalize part of my image. Any ideas as to why the code is stopping early?
Here is my code:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img = mpimg.imread('EXP_0159-2_8b.tif')
imgOut = img.copy()
for i in range(0, len(img[0, :])):
imgLine1 = (img[:, i] < 165) * img[:, i]
p = imgLine1.nonzero()
if len(p[0]) < 1:
imgOut[:, i] == 0
else:
imgLine2 = imgLine1[p[0]]
def curvefitting(lineFunction):
x = np.arange(0, len(lineFunction))
y = lineFunction
curve = np.polyfit(x, y, deg = 2)
a = curve[0]
b = curve[1]
c = curve[2]
curveEquation = (a*(x**2)) + (b*(x**1)) + (c)
curveCorrected = lineFunction - curveEquation + 200
return curveCorrected
imgLine1[p[0]] = curvefitting(imgLine2)
imgOut[:, i] = imgLine1
plt.imshow(imgOut, cmap = 'gray')
The for loop takes the individual columns of pixels in my image and restricts the endpoints of that column to (0, 165), so that pixels outside of that range are turned into zero and ignored by the nonzero() function. The if condition just finalizes the conversion of values outside (0, 165) to zero. Additionally, I converted the image to gray so I would not have to deal with colors and could focus only on luminosity.
This is my corrected image. The program works to average the luminosity values across the entire surface. However, you can see that it stops before reaching the end. The initial image was darker on the sides and lighter in the middle, but the file is too large to upload.
Any help is greatly appreciated.
If you are not interested in color you can convert input image to grayscale. That would simplified the matrix multiplications. The simplified version would be
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def curvefitting(lineFunction):
x = np.arange(0, len(lineFunction))
y = lineFunction
curve = np.polyfit(x, y, deg = 2)
a = curve[0]
b = curve[1]
c = curve[2]
curveEquation = [(a*(x_**2)) + (b*(x_**1)) + (c) for x_ in x]
curveCorrected = lineFunction - curveEquation + 200
return curveCorrected
img = mpimg.imread('EXP_0159-2_8b.tif')
img = rgb2gray(img)
imgOut = img.copy()
for i in range(0, len(img[0, :])):
imgLine1 = (img[:, i] < 165) * img[:, i]
p = imgLine1.nonzero()
if len(p) < 1:
imgOut[:, i] == 0
else:
imgLine2 = imgLine1[p]
imgLine1[p] = curvefitting(imgLine2)
imgOut[:, i] = imgLine1
plt.imshow(imgOut, cmap = 'gray')
plt.show()

Unique Color Detection and Storing images dynamically

If an image is given , find out the unique colors in that image and write output images corresponding to each unique color.
In that all other pixels which don't have that unique color should me marked white.
for eg , if an image has 3 colors - in the output folder there should be three images where each color is separated. Using Open CV & Python.
I've created the unique color list using my methods. What I want is to give a count of all those unique colors in the sample.png image and give the corresponding images output as per the question.
I believe the code below (with comments) should help you with this!
Feel free to follow up if any of the code is unclear!
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from copy import deepcopy
# Load image and convert it from BGR (opencv default) to RGB
fpath = "dog.png" # TODO: replace with your path
IMG = cv.cvtColor(cv.imread(fpath), cv.COLOR_BGR2RGB)
# Get dimensions and reshape into (H * W, C) vector - i.e. a long vector, where each element is a tuple corresponding to a color!
H, W, C = IMG.shape
IMG_FLATTENED = np.vstack([IMG[:, w, :] for w in range(W)])
# Get unique colors using np.unique function, and their counts
colors, counts = np.unique(IMG_FLATTENED, axis=0, return_counts = True)
# Jointly loop through colors and counts
for color, count in zip(colors, counts):
print("COLOR: {}, COUNT: {}".format(color, count))
# Create placeholder image and mark all pixels as white
SINGLE_COLOR = (255 * np.ones(IMG.shape)).astype(np.uint8) # Make sure casted to uint8
# Compute binary mask of pixel locations where color is, and set color in new image
color_idx = np.all(IMG[..., :] == color, axis=-1)
SINGLE_COLOR[color_idx, :] = color
# Write file to output with color and counts specified
cv.imwrite("color={}_count={}.png".format(color, count), SINGLE_COLOR)
Ack, he beat me to it. Well, here's what I've got.
Oh no, I don't think the line
blank[img == color] = img[img == color]
behaves how I think it does. I think it just coincidentally works for this case. I'll edit the code with a solution I'm more confident works for all cases.
Original Image
import cv2
import numpy as np
# load image
img = cv2.imread("circles.png");
# get uniques
unique_colors, counts = np.unique(img.reshape(-1, img.shape[-1]), axis=0, return_counts=True);
# split off each color
splits = [];
for a in range(len(unique_colors)):
# get the color
color = unique_colors[a];
blank = np.zeros_like(img);
mask = cv2.inRange(img, color, color); # edited line 1
blank[mask == 255] = img[mask == color]; # edited line 2
# show
cv2.imshow("Blank", blank);
cv2.waitKey(0);
# save each color with its count
file_str = "";
for b in range(3):
file_str += str(color[b]) + "_";
file_str += str(counts[a]) + ".png";
cv2.imwrite(file_str, blank);

How to convert RGB image pixels to L*a*b*?

Well, I'm working with image processing to identify the color variation of an image and to be able to plot that data in a histogram. For this, I use images of skin spots in the RGB color space. The code below I can get the colors of each pixel and convert to HSV using color.rgb2lab. But as I want to convert to L*a*b*, because it is closer to human vision, in the python library there is no conversion to L*a*b*. With this, through the separated pixels of RGB, how do I transform these pixels into LAB colors?
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
import colorsys
from PIL import Image
# (1) Import the file to be analyzed!
img_file = Image.open("IMD006.png")
img = img_file.load()
# (2) Get image width & height in pixels
[xs, ys] = img_file.size
max_intensity = 100
hues = {}
# (3) Examine each pixel in the image file
for x in xrange(0, xs):
for y in xrange(0, ys):
# (4) Get the RGB color of the pixel
[r, g, b] = img[x, y]
# (5) Normalize pixel color values
r /= 255.0
g /= 255.0
b /= 255.0
# (6) Convert RGB color to HSV
[h, s, v] = colorsys.rgb_to_hsv(r, g, b)
# (7) Marginalize s; count how many pixels have matching (h, v)
if h not in hues:
hues[h] = {}
if v not in hues[h]:
hues[h][v] = 1
else:
if hues[h][v] < max_intensity:
hues[h][v] += 1
You can do it with PIL/Pillow using the built-in Colour Management System and building a transform like this:
#!/usr/local/bin/python3
import numpy as np
from PIL import Image, ImageCms
# Open image and discard alpha channel which makes wheel round rather than square
im = Image.open('colorwheel.png').convert('RGB')
# Convert to Lab colourspace
srgb_p = ImageCms.createProfile("sRGB")
lab_p = ImageCms.createProfile("LAB")
rgb2lab = ImageCms.buildTransformFromOpenProfiles(srgb_p, lab_p, "RGB", "LAB")
Lab = ImageCms.applyTransform(im, rgb2lab)
And Lab is now your image in Lab colourspace. If you carry on and add the following lines to the end of the above code, you can split the Lab image into its constituent channels and save them each as greyscale images for checking.
# Split into constituent channels so we can save 3 separate greyscales
L, a, b = Lab.split()
L.save('L.png')
a.save('a.png')
b.save('b.png')
So, if you start with this image:
you will get this as the L channel:
this as the a channel:
and this the b channel:
Being non-scientific for a moment, the a channel should be negative/low where the image is green and should be high/positive where the image is magenta so it looks correct. And the b channel should be negative/low where the image is blue and high/positive where it is yellow, so that looks pretty good to me! As regards the L channel, the RGB to greyscale formula is (off the top of my head) something like:
L = 0.2*R + 0.7*G + 0.1*B
So you would expect the L channel to be much brighter where the image is green, and darkest where it is blue.
Alternatively, you can do it with the scikit-image module, maybe even more simply like this:
import numpy as np
from skimage import color, io
# Open image and make Numpy arrays 'rgb' and 'Lab'
rgb = io.imread('image.png')
Lab = color.rgb2lab(rgb)
I am not 100% sure of the scaling, but I suspect the L channel is a float in range 0..100, and that a and b are also floats in range -128..+128, though I may be wrong!
With my colour wheel image above I got the following minima/maxima for each channel:
Lab[:,:,0].min() # L min
32.29567256501352
Lab[:,:,0].max() # L max
97.13950703971322
Lab[:,:,1].min() # a min
-86.18302974439501
Lab[:,:,1].max() # a max
98.23305386311316
Lab[:,:,2].min() # b min
-107.85730020669489
Lab[:,:,2].max() # b max
94.47812227647823
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
def rgb_to_cielab(a):
"""
a is a pixel with RGB coloring
"""
a1,a2,a3 = a/255
color1_rgb = sRGBColor(a1, a2, a3);
color1_lab = convert_color(color1_rgb, LabColor);
return color1_lab
rgb_to_cielab(np.array([255,0,255]))
Output: LabColor(lab_l=60.32364943499053,lab_a=98.23532017664644,lab_b=-60.83501679458592)
Using cv2 you can easily implement this conversion. RGB->LAB, LAB->RGB.
import numpy as np
import cv2
img = cv2.imread('1.jpg')
LAB = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
cv2.imwrite('L.png', LAB[:,:,0])
cv2.imwrite('a.png', LAB[:,:,1])
cv2.imwrite('b.png', LAB[:,:,2])
BGR = cv2.cvtColor(LAB, cv2.COLOR_LAB2BGR)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imwrite('new.png', BGR)
I've seen this problem like you 'bout for 3 months and here is my solution for this
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
import colorsys
from PIL import Image
from past.builtins import xrange
img_file = Image.open("F:/coding/Project/FDD/neo5.png")
img = img_file.load()
[xs, ys] = img_file.size
max_intensity = 100
hues = {}
for x in xrange(0, xs):
for y in xrange(0, ys):
[r, g, b] = img[x, y]
r /= 255.0
g /= 255.0
b /= 255.0
[h, s, v] = colorsys.rgb_to_hsv(r, g, b)
if h not in hues:
hues[h] = {}
if v not in hues[h]:
hues[h][v] = 1
else:
if hues[h][v] < max_intensity:
hues[h][v] += 1
h_ = []
v_ = []
i = []
colours = []
for h in hues:
for v in hues[h]:
h_.append(h)
v_.append(v)
i.append(hues[h][v])
[r, g, b] = colorsys.hsv_to_rgb(h, 1, v)
colours.append([r, g, b])
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.scatter(h_, v_, i, s=5, c=colours, lw=0)
ax.set_xlabel('Hue')
ax.set_ylabel('Value')
ax.set_zlabel('Intensity')
fig.add_axes(ax)
plt.show()

how to create LAB color chart using opencv?

I am developing a project that has as a starting point to identify the colors of certain spots, for this I am plotting 3D graphics with the RGB colors of these images. With this I have identified some striking colors of these spots, as seen below.
Color is a matter of perception and subjectivity of interpretation. The purpose of this step is to identify so that you can find a pattern of color without differences of interpretation. With this, I have been searching the internet and for this, it is recommended to use the color space L * a * b *.
With this, can someone help me to obtain this graph with the colors LAB, or indicate another way to better classify the colors of these spots?
Code used to plot 3d graph
import numpy as np
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
import colorsys
from PIL import Image
# (1) Import the file to be analyzed!
img_file = Image.open("IMD405.png")
img = img_file.load()
# (2) Get image width & height in pixels
[xs, ys] = img_file.size
max_intensity = 100
hues = {}
# (3) Examine each pixel in the image file
for x in xrange(0, xs):
for y in xrange(0, ys):
# (4) Get the RGB color of the pixel
[r, g, b] = img[x, y]
# (5) Normalize pixel color values
r /= 255.0
g /= 255.0
b /= 255.0
# (6) Convert RGB color to HSV
[h, s, v] = colorsys.rgb_to_hsv(r, g, b)
# (7) Marginalize s; count how many pixels have matching (h, v)
if h not in hues:
hues[h] = {}
if v not in hues[h]:
hues[h][v] = 1
else:
if hues[h][v] < max_intensity:
hues[h][v] += 1
# (8) Decompose the hues object into a set of one dimensional arrays we can use with matplotlib
h_ = []
v_ = []
i = []
colours = []
for h in hues:
for v in hues[h]:
h_.append(h)
v_.append(v)
i.append(hues[h][v])
[r, g, b] = colorsys.hsv_to_rgb(h, 1, v)
colours.append([r, g, b])
# (9) Plot the graph!
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.scatter(h_, v_, i, s=5, c=colours, lw=0)
ax.set_xlabel('Hue')
ax.set_ylabel('Value')
ax.set_zlabel('Intensity')
fig.add_axes(ax)
plt.savefig('plot-IMD405.png')
plt.show()
Using OpenCV for Python is really straightforward. Here I created a function to plot a sample image. Note that for this function the image must be RGB or BGR.
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
image_BGR = np.uint8(np.random.rand(50,50,3) * 255)
#this image above is just an example. To load a real image use the line below
#image_BGR = cv2.imread('path/to/image')
def toLAB(image, input_type = 'BGR'):
conversion = cv2.COLOR_BGR2LAB if input_type == 'BGR' else cv2.COLOR_RGB2LAB
image_LAB = cv2.cvtColor(image, conversion)
y,x,z = image_LAB.shape
LAB_flat = np.reshape(image_LAB, [y*x,z])
colors = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) if input_type == 'BGR' else image
colors = np.reshape(colors, [y*x,z])/255.
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs=LAB_flat[:,2], ys=LAB_flat[:,1], zs=LAB_flat[:,0], s=10, c=colors, lw=0)
ax.set_xlabel('A')
ax.set_ylabel('B')
ax.set_zlabel('L')
plt.show()
return image_LAB
lab_image = toLAB(image_BGR)
The result is something like this:
I hope it helped!
The static map:
The gif map:
I prefer to use HSV to look up for specific color range, such as:
Choosing the correct upper and lower HSV boundaries for color detection with`cv::inRange` (OpenCV)
How to define a threshold value to detect only green colour objects in an image :Opencv
How to detect two different colors using `cv2.inRange` in Python-OpenCV?
what are recommended color spaces for detecting orange color in open cv?

Categories