How to turn grayscale image into RGB? - python

I am making a application in python that allows people to share their screens, but in order to get a decent frame rate I wanted to compress the image into a grayscale format and then on the client side turn it back into an RGB image. But when I tried to do that it still showed a grayscale image.
Then I tried using HSV color conversion which did display the color, but with a red filter for some reason.
I won't show all of the code due to the fact it is at least 2000 lines, but I will show what part of the code where I am having my problem.
Server side:
sct_img = sct.grab(bounding_box)
img_np = np.array(sct_img)
frame = img_np
frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
frame = cv2.resize(frame, (0,0), fx = 0.70, fy = 0.70)
data = pickle.dumps(frame)
message_size = struct.pack("L", len(data))
clientsocket.sendall(message_size + data)
Client side:
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame = cv2.resize(frame, (x, y))
cv2.imshow('frame', frame)

When you convert an RGB image to grayscale, color data gets thrown away, hence you won't be able to get the original image back. Observe the output from code below:
import cv2
import numpy as np
# Create image
img = np.full((500, 500, 3), 255, 'uint8')
cv2.rectangle(img, (50, 100), (250, 300), (0, 0, 96), -1)
cv2.circle(img, (300, 350), 100, (0, 50, 0), -1)
cv2.drawContours(img, [np.array([(300, 50), (200, 250), (400, 250)])], 0, (255, 0, 0), -1)
# Convert to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(np.unique(img_gray))
# Show images
cv2.imshow("BGR", img)
cv2.imshow("Gray", img_gray)
cv2.waitKey(0)
Output:
As you can see, with the image of a red, green and blue shape (each a specific shade of its color), converting it into grayscale results in the three colors turning into one; (29, 29, 29). There is no way the computer will be able to tell that the three shapes used to be different colors.

When you reduce a color image to grayscale, you're discarding information. There's no way to get color back. If you want to get an acceptable frame rate, you're going to have to choose some other approach.

Related

Drawing OpenCV contours and save as transparent image

I'm trying to draw contours i have found using findContours.
If i draw like this, i get a black background with the contour drawn on it.
out = np.zeros_like(someimage)
cv2.drawContours(out, contours, -1, 255, 1)
cv2.imwrite('contours.png',out)
If i draw like this, i get a fully transparent image with no drawn contours.
out = np.zeros((55, 55, 4), dtype=np.uint8)
cv2.drawContours(out, contours, -1, 255, 1)
cv2.imwrite('contours.png',out)
How do i go about making an image with size (55,55) and draw a contour on this, while keeping a transparent background?
Thanks
To work with transparent images in OpenCV you need to utilize the fourth channel after BGR called alpha with controls it. So instead of creating a three-channel image, create one with four channels, and also while drawing make sure you assign the fourth channel to 255.
mask = np.zeros((55, 55, 4), dtype=np.uint8)
cv2.drawContours(mask, cnts, -1, (255, 255, 255, 255), 1) #change first three channels to any color you want.
cv2.imwrite('res.png', mask)
Input image whose contours to draw.
Result
In Python/OpenCV, use the black and white image as the alpha channel as well as using it for a 3 channel BGR image.
cntr_img = np.zeros((55, 55, 4), dtype=np.uint8)
cv2.drawContours(cntr_img, contours, -1, 255, 1)
out = cv2.cvtColor(cntr_img, cv2.COLOR_GRAY2BGRA)
out[:,:,3] = cntr_img
cv2.imwrite('contours.png',out)
This works for me in Python/OpenCV. I am using a white blob on black background for input, since I do not have a contour image available. The contour image needs to be grayscale.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('mask.png')
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
out = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGRA)
out[:,:,3] = gray
# write output
cv2.imwrite('mask_transp.png',out)
# display it
cv2.imshow("out", out)
cv2.waitKey(0)
Transparent result (download to see it since it is white on transparent background):

OpenCV: Remove the background noise for Tesseract OCR

I have a drone FPV video, from which I need extract GPS coordinates. The text is white, but because of bad quality of video it seems gray and light blue. Since the background is changing, I have some problems, because in some frames the background has a totally different and in some frames a similar color to the text one.
Here is 2 original images (frames) from the video:
Dark background
Light background
And here is the code that I've found after googling:
import numpy as np
import cv2
import pytesseract
cap = cv2.VideoCapture('v1.avi')
p = 10000
while(cap.isOpened()):
ret, frame = cap.read()
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img = img[380:460, 220:640]
img = cv2.bilateralFilter(img, 9, 27, 27)
img = cv2.threshold(img, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
img = cv2.GaussianBlur(img, (9, 9), 0)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
img = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
img = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)[1]
img = cv2.dilate(img, kernel)
img = cv2.threshold(img, 0, 250, cv2.THRESH_BINARY_INV)[1]
cv2.imshow('frame', img)
cv2.imshow('or', frame)
print('\n==============')
print(pytesseract.image_to_string(img, config='digits'))
if cv2.waitKey(50) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
And also the results:
Dark background
Light background
As you can see, in the second case the background isn't clear, there is some noise, and from that image Tesseract doesn't extract the text properly.
EDIT:
For some reasons I can't share the video I wrote about above, but here is a similar video from Youtube, and if the text can be extracted from that video, I guess that method will also work for mine or solve many problems at least:
I was able to get something working using a combination of cv2.bilateralFilter and cv2.adaptiveThreshold. Once the background is in one main blob, the numbers can be extracted based on their patch sizes.
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Bilaterial filter and adaptive histogram thresholding to get background into mostly one patch
img = cv2.bilateralFilter(img, 9, 29, 29)
thresh = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 13, 0)
# Add padding to join any background around edges into the same patch
pad = 2
img_pad = cv2.copyMakeBorder(thresh, pad, pad, pad, pad, cv2.BORDER_CONSTANT, value = 1)
# Label patches and remove padding
ret, markers = cv2.connectedComponents(img_pad)
markers = markers[pad:-pad,pad:-pad]
# Count pixels in each patch
counts = [(markers==i).sum() for i in range(markers.max()+1)]
# Keep patches based on pixel counts
maxCount = 200 # removes large background patches
minCount = 40 # removes specs and centres of numbering
keep = [c<maxCount and c>minCount for c in counts]
output = markers.copy()
for i,k in enumerate(keep):
output[markers==i] = k
Here is what the images look like at each stage.

I want to mask multiple Image horizontally

I have few Journal pages images where there are two columns I want to mask one column white without a changing the dimension.which means the output image should have same dimensions as input image even though there is one column.
I was able to mask image but the mask part is coming black which I want as white.
import cv2
import numpy as np
# Load the original image
image = cv2.imread(filename = "D:\output_final_word5\image1.jpg")
# Create the basic black image
mask = np.zeros(shape = image.shape, dtype = "uint8")
# Draw a white, filled rectangle on the mask image
cv2.rectangle(img = mask, pt1 = (0, 0), pt2 = (795, 3000), color = (255, 255,
255), thickness = -1)
# Apply the mask and display the result
maskedImg = cv2.bitwise_and(src1 = image, src2 = mask)
#cv2.namedWindow(winname = "masked image", flags = cv2.WINDOW_NORMAL)
cv2.imshow("masked image",maskedImg)
cv2.waitKey(delay = 0)
cv2.imwrite("D:\Test_Mask.jpg",maskedImg)
My final objective is to read a folder where are several Journal pages In which need to be saved by masking first one column and then another column without affecting the dimension of Input image and mask part should be white.
Below are Input Image Attached...
And Output Should be like this....
You don't need mask to draw rectangle. You can draw it directly on image.
You can also use image.copy() to create second image with other column
BTW: if 795 is in the middle of width then you can use image.shape to get its (height,width) and use width//2 instead of 795 so it will work with images which have different widths. But if 795 is not ideally in the middle then use half_width = 795
import cv2
image_1 = cv2.imread('image.jpg')
image_2 = image_1.copy()
height, width, depth = image_1.shape # it gives `height,width`, not `width,height`
half_width = width//2
#half_width = 795
cv2.rectangle(img=image_1, pt1=(0, 0), pt2=(half_width, height), color=(255, 255, 255), thickness=-1)
cv2.rectangle(img=image_2, pt1=(half_width, 0), pt2=(width, height), color=(255, 255, 255), thickness=-1)
cv2.imwrite("image_1.jpg", image_1)
cv2.imwrite("image_2.jpg", image_2)
cv2.imshow("image 1", image_1)
cv2.imshow("image 2", image_2)
cv2.waitKey(0)
cv2.destroyAllWindows()

removing pixels less than n size(noise) in an image - open CV python

i am trying to remove noise in an image less and am currently running this code
import numpy as np
import argparse
import cv2
from skimage import morphology
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required = True,
help = "Path to the image")
args = vars(ap.parse_args())
# Load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
cv2.imshow("Image", image)
cv2.imwrite("image.jpg", image)
greenLower = np.array([50, 100, 0], dtype = "uint8")
greenUpper = np.array([120, 255, 120], dtype = "uint8")
green = cv2.inRange(image, greenLower, greenUpper)
#green = cv2.GaussianBlur(green, (3, 3), 0)
cv2.imshow("green", green)
cv2.imwrite("green.jpg", green)
cleaned = morphology.remove_small_objects(green, min_size=64, connectivity=2)
cv2.imshow("cleaned", cleaned)
cv2.imwrite("cleaned.jpg", cleaned)
cv2.waitKey(0)
However, the image does not seem to have changed from "green" to "cleaned" despite using the remove_small_objects function. why is this and how do i clean the image up? Ideally i would like to isolate only the image of the cabbage.
My thought process is after thresholding to remove pixels less than 100 in size, then smoothen the image with blur and fill up the black holes surrounded by white - that is what i did in matlab. If anybody could direct me to get the same results as my matlab implementation, that would be greatly appreciated. Thanks for your help.
Edit: made a few mistakes when changing the code, updated to what it currently is now and display the 3 images
image:
green:
clean:
my goal is to get somthing like this picture below from matlab implementation:
Preprocessing
A good idea when you're filtering an image is to lowpass the image or blur it a bit; that way neighboring pixels become a little more uniform in color, so it will ease brighter and darker spots on the image and keep holes out of your mask.
img = cv2.imread('image.jpg')
blur = cv2.GaussianBlur(img, (15, 15), 2)
lower_green = np.array([50, 100, 0])
upper_green = np.array([120, 255, 120])
mask = cv2.inRange(blur, lower_green, upper_green)
masked_img = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('', masked_img)
cv2.waitKey()
Colorspace
Currently, you're trying to contain an image by a range of colors with different brightness---you want green pixels, regardless of whether they are dark or light. This is much more easily accomplished in the HSV colorspace. Check out my answer here going in-depth on the HSV colorspace.
img = cv2.imread('image.jpg')
blur = cv2.GaussianBlur(img, (15, 15), 2)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_green = np.array([37, 0, 0])
upper_green = np.array([179, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
masked_img = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('', masked_img)
cv2.waitKey()
Removing noise in a binary image/mask
The answer provided by ngalstyan shows how to do this nicely with morphology. What you want to do is called opening, which is the combined process of eroding (which more or less just removes everything within a certain radius) and then dilating (which adds back to any remaining objects however much was removed). In OpenCV, this is accomplished with cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel). The tutorials on that page show how it works nicely.
img = cv2.imread('image.jpg')
blur = cv2.GaussianBlur(img, (15, 15), 2)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower_green = np.array([37, 0, 0])
upper_green = np.array([179, 255, 255])
mask = cv2.inRange(hsv, lower_green, upper_green)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
opened_mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
masked_img = cv2.bitwise_and(img, img, mask=opened_mask)
cv2.imshow('', masked_img)
cv2.waitKey()
Filling in gaps
In the above, opening was shown as the method to remove small bits of white from your binary mask. Closing is the opposite operation---removing chunks of black from your image that are surrounded by white. You can do this with the same idea as above, but using cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel). This isn't even necessary after the above in your case, as the mask doesn't have any holes. But if it did, you could close them up with closing. You'll notice my opening step actually removed a small bit of the plant at the bottom. You could actually fill those gaps with closing first, and then opening to remove the spurious bits elsewhere, but it's probably not necessary for this image.
Trying out new values for thresholding
You might want to get more comfortable playing around with different colorspaces and threshold levels to get a feel for what will work best for a particular image. It's not complete yet and the interface is a bit wonky, but I have a tool you can use online to try out different thresholding values in different colorspaces; check it out here if you'd like. That's how I quickly found values for your image.
Although, the above problem is solved using cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel). But, if somebody wants to use morphology.remove_small_objects to remove area less than a specified size, for those this answer may be helpful.
Code I used to remove noise for above image is:
import numpy as np
import cv2
from skimage import morphology
# Load the image, convert it to grayscale, and blur it slightly
image = cv2.imread('im.jpg')
cv2.imshow("Image", image)
#cv2.imwrite("image.jpg", image)
greenLower = np.array([50, 100, 0], dtype = "uint8")
greenUpper = np.array([120, 255, 120], dtype = "uint8")
green = cv2.inRange(image, greenLower, greenUpper)
#green = cv2.GaussianBlur(green, (3, 3), 0)
cv2.imshow("green", green)
cv2.imwrite("green.jpg", green)
imglab = morphology.label(green) # create labels in segmented image
cleaned = morphology.remove_small_objects(imglab, min_size=64, connectivity=2)
img3 = np.zeros((cleaned.shape)) # create array of size cleaned
img3[cleaned > 0] = 255
img3= np.uint8(img3)
cv2.imshow("cleaned", img3)
cv2.imwrite("cleaned.jpg", img3)
cv2.waitKey(0)
Cleaned image is shown below:
To use morphology.remove_small_objects, first labeling of blobs is essential. For that I use imglab = morphology.label(green). Labeling is done like, all pixels of 1st blob is numbered as 1. similarly, all pixels of 7th blob numbered as 7 and so on. So, after removing small area, remaining blob's pixels values should be set to 255 so that cv2.imshow() can show these blobs. For that I create an array img3 of the same size as of cleaned image. I used img3[cleaned > 0] = 255 line to convert all pixels which value is more than 0 to 255.
It seems what you want to remove is a disconnected group of small blobs.
I think erode() will do a good job remove them with the right kernel.
Given an nxn kernel, erode moves the kernel through the image and replaces the center pixel by the minimum pixel in the kernel.
Then you can dilate() the resulting image to restore eroded edges of the green part.
Another option would be to use fastndenoising
##### option 1
kernel_size = (5,5) # should roughly have the size of the elements you want to remove
kernel_el = cv2.getStructuringElement(cv2.MORPH_RECT, kernel_size)
eroded = cv2.erode(green, kernel_el, (-1, -1))
cleaned = cv2.dilate(eroded, kernel_el, (-1, -1))
##### option 2
cleaned = cv2.fastNlMeansDenoisingColored(green, h=10)

Skin detection from hue-saturation histogram - OpenCV Python

I'm working on a little program in python to estimate the direction of pointing gestures with 2D picture from a monocular camera and I'm using OpenCV 2.3.
I know it's a bit tricky but I'm motivated! :)
My approach is fisrt to use the face detection to detect an area into which I'm sure there is a lot of skin:
img = cv2.imread("/home/max/recordings/cameras/imageTEST.jpg",1)
img_hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hc1 = cv2.CascadeClassifier("/home/max/haarcascade_frontalface_alt.xml")
faces1 = hc1.detectMultiScale(img)
for (x,y,w,h) in faces1:
cv2.rectangle(img, (x,y), (x+w,y+h), 255)
crop_img = img[y+2:y+w, x+2:x+h]
I really want to use this method because I want my detection to be robust to light variation. Then I compute the hue-saturation histogram of the picture of the detected face to make a back projection:
roihist = cv2.calcHist([crop_img],[0,1], None, [180, 256], [0, 180, 0, 256] )
dst = cv2.calcBackProject([img],[0,1],roihist,[0,180,0,256],1)
And finally I would be able to binarize the picture with a threshold and track the head and hands blobs to estimate the direction of pointing.
I've no problem with my code but the skin is not detected...
What am I doing wrong?
Thx for your help!
Max
Have you tried using the Cr channel from the YCbCr format? I had some luck with Cr when I had previously worked on hand detection using skin colour. Also, there is this paper, which uses a nice method for detecting hands. But keep in mind that as long as you use skin colour, the detection will not work for all hands, but can be tuned for a given user or a bunch of users.
I've been working through the available opencv examples on the web lately (just the basic stuff for fun). I've moved of from the face recognition (interesting, but too black box for my liking) to manually selecting the roi in the HSV space, then using 'camshift' to track. I was still getting variable results I didn't understand so I also plot all the intermediate processing windows such as the hsv image and the backproject image, also graph the histogram across the windows. Suddenly all is clear - you can see exactly what the computer is trying to work with.
Here is my working code for python3.4, opencv3. You can manually select the skin. Credit mostly to other examples I've found on the web.
The 'cv2.calcBAckProject' function thresholds out the skin features nicely.
import numpy as np
import cv2
roiPts = []
track_mode = False
termination = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
roiBox = None
kernel = np.ones((5, 5), np.uint8)
frame_width_in_px = 640
number_of_histogram_elements=16
def selectROI(event, x,y,flags,param):
global track_mode, roiPts
if (event == cv2.EVENT_LBUTTONDOWN) and (len(roiPts)==4): #reselecting ROI points so take out of tracking mode and empty current roipoints
roiPts=[]
track_mode = False
if (event==cv2.EVENT_LBUTTONDOWN) and (len(roiPts) < 4): #ROI point selection
roiPts.append([x, y])
cap = cv2.VideoCapture(0)
cv2.namedWindow("frame")
cv2.setMouseCallback("frame", selectROI)
while True:
ret, frame = cap.read()
if len(roiPts)<=4 and len(roiPts)>0:
for x,y in roiPts:
cv2.circle(frame, (x,y), 4, (0, 255, 0), 1) # draw small circle for each roi click
if len(roiPts)==4 and track_mode==False: #initialize the camshift
# convert the selected points to a box shape
roiBox = np.array(roiPts, dtype=np.int32)
s = roiBox.sum(axis=1)
tl = roiBox[np.argmin(s)]
br = roiBox[np.argmax(s)]
#extract the roi from the image and calculate the histograme
roi = frame[tl[1]:br[1], tl[0]:br[0]]
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) #
roiHist = cv2.calcHist([roi], [0], None, [number_of_histogram_elements], [0, 180])
roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)
roiBox = (tl[0], tl[1], br[0], br[1])
track_mode = True #ready for camshift
if track_mode == True: #tracking mode
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0], roiHist, [0, 180], 1)
#perfrom some noise reduction and smoothing
erosion = cv2.erode(backProj, kernel, iterations=2)
dilate = cv2.dilate(erosion, kernel, iterations=2)
(r, roiBox) = cv2.CamShift(dilate, roiBox, termination) #this takes prev roiBox and calculates the new roiBox
pts = np.int0(cv2.boxPoints(r))
cv2.polylines(frame, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(backProj, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(dilate, [pts], True, (0, 255, 0), 2) #tracking box
cv2.polylines(hsv, [pts], True, (0, 255, 0), 2) #tracking box
# plot histogram polyline across the windows
x = np.linspace(0,640,number_of_histogram_elements,dtype=np.int32)
y = roiHist.flatten().astype(np.int32, copy=False)-255 #note frame height needs to be greater than 255 which is the max histo value
y=np.absolute(y)
pts2 = np.stack((x, y), axis=1)
cv2.polylines(frame, [pts2], False, (0, 255, 0), 2)
cv2.polylines(hsv, [pts2], False, (0, 255, 0), 2)
cv2.imshow("backproject", backProj)
cv2.imshow("dilate", dilate)
cv2.imshow("hsv", hsv)
cv2.imshow("frame", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

Categories