Creating custom colormap for opencv python - python

I created a custom colormap in a text file, read from the python 3.6.
To map each color in for loop it takes approx. 9 seconds.
Here is the snippet:
for x in range(256):
# z = int(rgb_c[x][0])
# r = int(rgb_c[x][1])
# g = int(rgb_c[x][2])
# b = int(rgb_c[x][3])
# Apply color to ndvi
# ndvi_col[ndvi_g == z[x]] = [r[x], g[x], b[x]]
ndvi_col[ndvi_g == int(rgb_c[x][0])] = [int(rgb_c[x][1]), int(rgb_c[x][2]), int(rgb_c[x][3])]
Heard about pypy jit compiler can increase speed and performance, will this impact on for loop? I even tried a separate list but nothing changed.
I am open for any suggestions to improve speed and performance.

Posted solution in case. Original code in github.
#!/usr/bin/env python
'''
OpenCV Custom Colormap Example
Copyright 2015 by Satya Mallick <spmallick#learnopencv.com>
'''
import cv2
import numpy as np
def applyCustomColorMap(im_gray) :
lut = np.zeros((256, 1, 3), dtype=np.uint8)
#Red
lut[:, 0, 0] = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,253,251,249,247,245,242,241,238,237,235,233,231,229,227,225,223,221,219,217,215,213,211,209,207,205,203,201,199,197,195,193,191,189,187,185,183,181,179,177,175,173,171,169,167,165,163,161,159,157,155,153,151,149,147,145,143,141,138,136,134,132,131,129,126,125,122,121,118,116,115,113,111,109,107,105,102,100,98,97,94,93,91,89,87,84,83,81,79,77,75,73,70,68,66,64,63,61,59,57,54,52,51,49,47,44,42,40,39,37,34,33,31,29,27,25,22,20,18,17,14,13,11,9,6,4,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
#Green
lut[:, 0, 1] = [ 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,254,252,250,248,246,244,242,240,238,236,234,232,230,228,226,224,222,220,218,216,214,212,210,208,206,204,202,200,198,196,194,192,190,188,186,184,182,180,178,176,174,171,169,167,165,163,161,159,157,155,153,151,149,147,145,143,141,139,137,135,133,131,129,127,125,123,121,119,117,115,113,111,109,107,105,103,101,99,97,95,93,91,89,87,85,83,82,80,78,76,74,72,70,68,66,64,62,60,58,56,54,52,50,48,46,44,42,40,38,36,34,32,30,28,26,24,22,20,18,16,14,12,10,8,6,4,2,0 ]
#Blue
lut[:, 0, 2] = [195,194,193,191,190,189,188,187,186,185,184,183,182,181,179,178,177,176,175,174,173,172,171,170,169,167,166,165,164,163,162,161,160,159,158,157,155,154,153,152,151,150,149,148,147,146,145,143,142,141,140,139,138,137,136,135,134,133,131,130,129,128,127,126,125,125,125,125,125,125,125,125,125,125,125,125,125,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126,126]
#Apply custom colormap through LUT
im_color = cv2.LUT(im_gray, lut)
return im_color;
if __name__ == '__main__' :
im = cv2.imread("pluto.jpg", cv2.IMREAD_GRAYSCALE);
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR);
im_color = applyCustomColorMap(im);
cv2.imwrite('/tmp/colormap_algae.jpg', im_color)
cv2.imshow("Pseudo Colored Image", im_color);
cv2.waitKey(0);

Related

Disparity Map implementation in Python not matching in-built OpenCV function

I'm facing an issue, and would like some inputs from the community on how to improve the disparity map. I'm following this tutorial for calculating the disparity map between 2 images. The code I have is as follows:
import cv2
import numpy as np
import sys
from matplotlib import pyplot as plt
num_disparities = 64 # number of disparities to check
block = 9 # block size to match
def preprocess_frame(path):
image = cv2.imread(path, 0)
image = cv2.equalizeHist(image)
image = cv2.GaussianBlur(image, (5, 5), 0)
return image
def calculate_disparity_matrix(args):
left_image = preprocess_frame(args[1])
right_image = preprocess_frame(args[2])
rows, cols = left_image.shape
kernel = np.ones([block, block]) / block
disparity_maps = np.zeros(
[left_image.shape[0], left_image.shape[1], num_disparities])
for d in range(0, num_disparities):
# shift image
translation_matrix = np.float32([[1, 0, d], [0, 1, 0]])
shifted_image = cv2.warpAffine(
right_image, translation_matrix,
(right_image.shape[1], right_image.shape[0]))
# calculate squared differences
SAD = abs(np.float32(left_image) - np.float32(shifted_image))
# convolve with kernel and find SAD at each point
filtered_image = cv2.filter2D(SAD, -1, kernel)
disparity_maps[:, :, d] = filtered_image
disparity = np.argmin(disparity_maps, axis=2)
disparity = np.uint8(disparity * 255 / num_disparities)
disparity = cv2.equalizeHist(disparity)
plt.imshow(disparity, cmap='gray', vmin=0, vmax=255)
plt.show()
def calculate_disparity_inbuilt(args):
left_image = preprocess_frame(args[1])
right_image = preprocess_frame(args[2])
rows, cols = left_image.shape
stereo = cv2.StereoBM_create(numDisparities=num_disparities,
blockSize=block)
disparity = stereo.compute(left_image, right_image)
plt.imshow(disparity, cmap='gray', vmin=0, vmax=255)
plt.show()
The problem is that the output that I get from the inbuilt function in OpenCV is hardly similar to the one I've implemented. I was expecting at least a slight similarity between the 2. Is this expected? or am I doing something wrong here?
Implemented Algorithm
OpenCV Algorithm

How do I do the equivalent of Gimp's Colors, Auto, White Balance in Python-Fu?

the only function I can find is : gimp-color-balance, which takes the applicable parameters : preserve-lum(osity), cyan-red, magenta-green, and yellow-blue.
I'm not sure what values to pass for these parameters to duplicate the menu option in the title.
To complete the answer of #banderlog013, I think the Gimp Doc specifies that the end pixels of each channel are first discarded, then the remaining ranges are stretched. I believe the right code is :
img = cv2.imread('test.jpg')
balanced_img = np.zeros_like(img) #Initialize final image
for i in range(3): #i stands for the channel index
hist, bins = np.histogram(img[..., i].ravel(), 256, (0, 256))
bmin = np.min(np.where(hist>(hist.sum()*0.0005)))
bmax = np.max(np.where(hist>(hist.sum()*0.0005)))
balanced_img[...,i] = np.clip(img[...,i], bmin, bmax)
balanced_img[...,i] = (balanced_img[...,i]-bmin) / (bmax - bmin) * 255
I obtain good results with it, try it out !
According to GIMP doc, we need to discard pixel colors at each end of the Red, Green and Blue histograms which are used by only 0.05% of the pixels in the image and stretch the remaining range as much as possible (Python code):
import numpy as np
import cv2 # opencv-python
import matplotlib.pyplot as plt
img = cv2.imread('test.jpg')
x = []
# get histogram for each channel
for i in cv2.split(img):
hist, bins = np.histogram(i, 256, (0, 256))
# discard colors at each end of the histogram which are used by only 0.05%
tmp = np.where(hist > hist.sum() * 0.0005)[0]
i_min = tmp.min()
i_max = tmp.max()
# stretch hist
tmp = (i.astype(np.int32) - i_min) / (i_max - i_min) * 255
tmp = np.clip(tmp, 0, 255)
x.append(tmp.astype(np.uint8))
# combine image back and show it
s = np.dstack(x)
plt.imshow(s[::,::,::-1])
The result is pretty the same as after GIMP's 'Colors -> Auto -> White Balance'
UPD: we need np.clip() because OpenCV and numpy differently casts int32 to uint8:
# Numpy
np.array([-10, 260]).astype(np.uint8)
>>> array([246, 4], dtype=uint8)
# but we need just [0, 255]
From what I understand after a quick look at the source code (and more or less confirmed with a test image), these are unrelated and under the hood,Colors>Auto>White Balance:
obtains the histogram for each channel
get the values that determine the bottom and top 0.6%
stretches the range of values for that channel using these two values as the black and white points using an internal call that is very similar to "Levels".
Proof with a synthetic image:
Before:
After:
All this isn't hard to do in Python.
How to essentially get the equivalent of GIMP's Colors --> Auto --> White Balance feature:
Tested on Ubuntu 20.04.
Download the below code from my eRCaGuy_hello_world repo here: python/auto_white_balance_img.py.
Install dependencies:
pip3 install opencv-python # for cv2
pip3 install numpy
Now here is some fully-functional code, unlike some of the other answers here which are snippets and lacking things like import statements. I'm borrowing from #Canette Ouverture's answer here, and #banderlog013's answer here.
Create file auto_white_balance_img.py:
#!/usr/bin/python3
import cv2
import numpy as np
file_in = 'test.jpg'
file_in_base = file_in[:-4] # strip file extension
file_in_extension = file_in[-4:]
img = cv2.imread(file_in)
# From #banderlog013's answer: https://stackoverflow.com/a/54864315/4561887
x = []
# get histogram for each channel
for i in cv2.split(img):
hist, bins = np.histogram(i, 256, (0, 256))
# discard colors at each end of the histogram which are used by only 0.05%
img_out1 = np.where(hist > hist.sum() * 0.0005)[0]
i_min = img_out1.min()
i_max = img_out1.max()
# stretch hist
img_out1 = (i.astype(np.int32) - i_min) / (i_max - i_min) * 255
img_out1 = np.clip(img_out1, 0, 255)
x.append(img_out1.astype(np.uint8))
# From #Canette Ouverture's answer: https://stackoverflow.com/a/56365560/4561887
img_out2 = np.zeros_like(img) # Initialize final image
for channel_index in range(3):
hist, bins = np.histogram(img[..., channel_index].ravel(), 256, (0, 256))
bmin = np.min(np.where(hist>(hist.sum()*0.0005)))
bmax = np.max(np.where(hist>(hist.sum()*0.0005)))
img_out2[...,channel_index] = np.clip(img[...,channel_index], bmin, bmax)
img_out2[...,channel_index] = ((img_out2[...,channel_index]-bmin) /
(bmax - bmin) * 255)
# Write new files
cv2.imwrite(file_in_base + '_out1' + file_in_extension, img_out1)
cv2.imwrite(file_in_base + '_out2' + file_in_extension, img_out2)
Make auto_white_balance_img.py executable:
chmod +x auto_white_balance_img.py
Now set the file_in variable in the file above to your desired input image path, then run it with:
python3 auto_white_balance_img.py
# OR
./auto_white_balance_img.py
Assuming you have set file_in = 'test.jpg', it will produce these two files:
test_out1.jpg # The result from #banderlog013's answer here
test_out2.jpg # The result from #Canette Ouverture's answer here
I use this function to auto white balance images. Unlike Gimp function, it does not normalize image contrast. So it is useful with low contrast images too.
import numpy as np
from imageio import imread
import matplotlib.pyplot as plt
def auto_white_balance(im, p=.6):
'''Stretch each channel histogram to same percentile as mean.'''
# get mean values
p0, p1 = np.percentile(im, p), np.percentile(im, 100-p)
for i in range(3):
ch = im[:,:,i]
# get channel values
pc0, pc1 = np.percentile(ch, p), np.percentile(ch, 100-p)
# stretch channel to same range as mean
ch = (p1 - p0) * (ch - pc0) / (pc1 - pc0) + p0
im[:,:,i] = ch
return im
def test():
im = imread('imageio:astronaut.png')
# distort white balance
im[:,:,0] = im[:,:,0] *.6
im[:,:,1] = im[:,:,1] *.8
plt.imshow(im)
plt.show()
im2 = auto_white_balance(im)
im2 = np.clip(im2, 0, 255) # or 0, 1 for float images
plt.imshow(im2)
plt.show()
if __name__ == "__main__":
test()
If you want equivalent of Gimp function, use fixed values instead:
p0, p1 = 0, 255
K, cool. Figured out how to script one up.
Use it if you like. Does alright by me.
https://github.com/doyousketch2/eAWB

How to get grabCut to work opencv python with GC_INIT_WITH_MASK

I am trying to get the messi example to work: https://docs.opencv.org/3.1.0/d8/d83/tutorial_py_grabcut.html
In my setup, I want the entire process to be automated.
For example, I grab an image from the web:
http://wanderlustandlipstick.com/travel-tips/opting-out-full-body-scanners/
And using some opencv tools I autogenerate the following mask:
Black is supposed to be a certain background, White is supposed to be a certain foreground, and Grey is supposed to be unknown.
Following the messi tutorial (https://docs.opencv.org/3.1.0/d8/d83/tutorial_py_grabcut.html), below is my code. However, it only shows the small white circle area, as if it is treating grey like black (certain background)
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread("imagescan.png")
dimy = np.shape(img)[0] # seems to be backwards (x,y)
# https://stackoverflow.com/questions/22490721/how-can-i-get-the-x-and-y-dimensions-of-a-ndarray-numpy-python
dimx = np.shape(img)[1]
mask = np.zeros((dimy,dimx),np.uint8) # zeroes as array/matrix size of image
bgdModel = fgdModel = np.zeros((1,65),np.float64)
newmask = cv2.imread('imagemask.png',0)
# informational purposes
removeBg = (newmask == 0)
removeBg = np.ravel(removeBg)
np.bincount(removeBg)
keepFg = (newmask == 255)
keepFg = np.ravel(keepFg)
np.bincount(keepFg)
#otherEl = (not (newmask == 0 or newmask == 255)) # throws error
#otherEl = np.ravel(otherEl)
#np.bincount(otherEl)
# appears at least one of each elements is required
# otherwise throws bgdSamples.empty error / fgdSamples.empty error
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img2 = img*mask2[:,:,np.newaxis]
plt.imshow(img2),plt.colorbar(),plt.show()
The result is just a mask off the circle, as if the gray area is being treated as black.
In the mask image, you basically have 3 colors: black,white,grey. In the following lines of code, you're setting background and foreground, but not the probable foreground.
mask[newmask == 0] = 0
mask[newmask == 255] = 1
Try using using OpenCV provided constants (cv2.GC_BGD etc) to avoid confusion.
# this line sets the grey areas - meaning any color not 0 and not 255 - to probable foreground.
mask = np.where(((newmask>0) & (newmask<255)),cv2.GC_PR_FGD,0).astype('uint8')
mask[newmask == 0] = cv2.GC_BGD
mask[newmask == 255] = cv2.GC_FGD
.

Image recognition from Computer Screen

I am trying to extract text from the below image. I tried OCR in python. But it is giving me incorrect results.
I preprocessed the image removed the underline, used canny edge detector increased contrast ratio and then feed it to OCR. Still, I am not getting expected output.
With limited knowledge, I tried to separate characters out of image after increasing contrast.
import cv2
import numpy as np
import os
image_path = os.path.join(os.path.dirname(__file__), "image.png")
im = cv2.imread(image_path)
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
# converted intermediate pixels to black and white
gray[gray<100] = 0
gray[gray>=100] = 255
gray = gray[~np.all(gray == 255, axis=1)]
gray = gray[:,~np.all(gray == 255, axis=0)]
gray = gray[~np.all(gray == 0, axis=1)]
print (np.where(np.all(gray == 255,axis=0)))
print (gray[:,20:33])
words = np.hsplit(gray, np.where(np.all(gray == 255,axis=0))[0])
i = 0
for word in words:
word = word[:,~np.all(word == 255, axis=0)]
if(word.size):
print (word.shape)
i = i + 1
cv2.imwrite("temp" + str(i) + ".png", word)
It became like this
And again I gave this as input to pytesseract. It gave me blank output.
Here are my doubts.
Can we have a better mechanism to separate characters on white-space from image. Currently it seems highly breakable to me.
How can we pre-process image to be better detected by OCR.
Can we use neural-networks or SVM over here like we used for MNIST Digits dataset
Short pointers are ok if it seems too broad. What is the best approach to tackle this kind of problem?
This answer implements what is said in my comment.
I changed your code a little and refrained form using opencv. The code is written using Python 3.5
To extract the digits, I am summing the image columnwise and scale the resulting array to get check. I am here operating on the gray image that you already cut, effectively getting rid of the underline.
x_sum = np.sum(gray, axis = 0)
check = ((x_sum)/np.max(x_sum)*10)
This array can now be used to compare with a threshold to identify the regions where a letter/digit is located such as:
plt.imshow(gray, cmap='gray')
x_sum = np.sum(gray, axis = 0)
check = ((x_sum)/np.max(x_sum)*10)
plt.plot((check<8).astype(int))
plt.show()
Now we will use this information to modify the image and erase the regions where the check array is valued 0 such as:
for idx,i in enumerate((check<8).astype(int)):
if i < 1:
gray[:,idx] = 255
Therefore we have this image:
Which can be further processed just are you are already doing. This provides seperated letters/digits which can then be postprocessed for learning.
The next step that you would work on is scaling/resizing the letters/images to be described by the same amount of features.
Then finally, you can use a pretrained classifier to predict the most probable letter/digits.
The full code is provided here:
import numpy as np
import os
import matplotlib.pyplot as plt
from scipy.stats import mstats
import scipy
from matplotlib import gridspec
from PIL import Image
image = Image.open("testl.png")
f = image.convert('I')
gray = np.array(f)
gray[gray<200] = 0
gray[gray>=200] = 255
gray = gray[~np.all(gray == 255, axis=1)]
gray = gray[:,~np.all(gray == 255, axis=0)]
gray = gray[~np.all(gray == 0, axis=1)]
plt.imshow(gray, cmap='gray')
x_sum = np.sum(gray, axis = 0)
check = ((x_sum)/np.max(x_sum)*10)
plt.plot((check<8).astype(int))
plt.show()
plt.matshow(gray)
plt.show()
for idx,i in enumerate((check<8).astype(int)):
if i < 1:
gray[:,idx] = 255
plt.matshow(gray)
plt.show()
words = np.hsplit(gray, np.where(np.all(gray >= 200,axis=0))[0])
gs = gridspec.GridSpec(1,len(words))
fig = plt.figure(figsize=(len(words),1))
i = 0
for word in words:
word = word[:,~np.all(word >= 230, axis=0)]
if(word.size):
ax = fig.add_subplot(gs[i])
print (word.shape)
i = i + 1
ax.matshow(word, aspect = 'auto')
plt.show()
This finally yields all seperated letters/digits such as:

OpenCV Python Bindings for GrabCut Algorithm

I've been trying to use the OpenCV implementation of the grab cut method via the Python bindings. I have tried using the version in both cv and cv2 but I am having trouble finding out the correct parameters to use to get the method to run correctly. I have tried several permutations of the parameters and nothing seems to work (basically every example I've seen on Github). Here are a couple examples I have tried to follow:
Example 1
Example 2
And here is the method's documentation and a known bug report:
Documentation
Known Grabcut Bug
I can get the code to execute using the example below, but it returns a blank (all black) image mask.
img = Image("pills.png")
mask = img.getEmpty(1)
bgModel = cv.CreateMat(1, 13*5, cv.CV_64FC1)
fgModel = cv.CreateMat(1, 13*5, cv.CV_64FC1)
for i in range(0, 13*5):
cv.SetReal2D(fgModel, 0, i, 0)
cv.SetReal2D(bgModel, 0, i, 0)
rect = (150,70,170,220)
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
cv.GrabCut(img.getBitmap(),mask,rect,tmp1,tmp2,5,cv.GC_INIT_WITH_RECT)
I am using SimpleCV to load the images. The mask type and return type from img.getBitmap() are:
iplimage(nChannels=1 width=730 height=530 widthStep=732 )
iplimage(nChannels=3 width=730 height=530 widthStep=2192 )
If someone has a working example of this code I would love to see it. For what it is worth I am running on OSX Snow Leopard, and my version of OpenCV was installed from the SVN repository (as of a few weeks ago). For reference my input image is this:
I've tried changing the result mask enum values to something more visible. It is not the return values that are the problem. This returns a completely black image. I will try a couple more values.
img = Image("pills.png")
mask = img.getEmpty(1)
bgModel = cv.CreateMat(1, 13*5, cv.CV_64FC1)
fgModel = cv.CreateMat(1, 13*5, cv.CV_64FC1)
for i in range(0, 13*5):
cv.SetReal2D(fgModel, 0, i, 0)
cv.SetReal2D(bgModel, 0, i, 0)
rect = (150,70,170,220)
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
cv.GrabCut(img.getBitmap(), mask, rect, tmp1, tmp2, 5, cv.GC_INIT_WITH_MASK)
mask[mask == cv.GC_BGD] = 0
mask[mask == cv.GC_PR_BGD] = 0
mask[mask == cv.GC_FGD] = 255
mask[mask == cv.GC_PR_FGD] = 255
result = Image(mask)
result.show()
result.save("result.png")
Kat, this version of your code seems to work for me.
import numpy as np
import matplotlib.pyplot as plt
import cv2
filename = "pills.png"
im = cv2.imread(filename)
h,w = im.shape[:2]
mask = np.zeros((h,w),dtype='uint8')
rect = (150,70,170,220)
tmp1 = np.zeros((1, 13 * 5))
tmp2 = np.zeros((1, 13 * 5))
cv2.grabCut(im,mask,rect,tmp1,tmp2,10,mode=cv2.GC_INIT_WITH_RECT)
plt.figure()
plt.imshow(mask)
plt.colorbar()
plt.show()
Produces a figure like this, with labels 0,2 and 3.
Your mask is filled with the following values:
GC_BGD defines an obvious background pixels.
GC_FGD defines an obvious foreground (object) pixel.
GC_PR_BGD defines a possible background pixel.
GC_PR_FGD defines a possible foreground pixel.
Which are all part of an enum:
enum { GC_BGD = 0, // background
GC_FGD = 1, // foreground
GC_PR_BGD = 2, // most probably background
GC_PR_FGD = 3 // most probably foreground
};
Which translates to the colors: completely black, very black, dark black, and black. I think you'll find that if you add the following code (taken from your example 1 and slightly modified) your mask will look nicer:
mask[mask == cv.GC_BGD] = 0 //certain background is black
mask[mask == cv.GC_PR_BGD] = 63 //possible background is dark grey
mask[mask == cv.GC_FGD] = 255 //foreground is white
mask[mask == cv.GC_PR_FGD] = 192 //possible foreground is light grey

Categories