Shadow Removal in Traffic Lane Lines - python

I am working on lane lines detection. My current working strategy is:
defining a region of interest where lane lines could be
Warping the image to get a bird eye view
Converting the image to YUV color space
Normalizing the Y channel
Fitting the second order polynomial and sliding window approach
every thing works fine but where there are shadows the algorithm do not work.
I have tried adaptive thresholding, otssu thresholding but not succeeded.
Source Image without Shadow:
Processed Source Image without Shadow:
Source Image with Shadow:
Processed Source Image with Shadow:
In the second Image it can be seen that the shadowed area is not detected. Actually shadows drops the image values down so i tried to threshold the image with new values lower than the previous one new image can be found here:
This technique does not work as it comes with a lot of noise
Currently I am trying background subtraction and shadow removal techniques but its not working. I am struck in this problem from last 2 3 weeks.
Any help will really be appreciated...
import cv2
import matplotlib.pyplot as plt
import numpy as np
from helper_functions import undistort, threshholding, unwarp,sliding_window_polyfit
from helper_functions import polyfit_using_prev_fit,calc_curv_rad_and_center_dist
from Lane_Lines_Finding import RoI
img = cv2.imread('./test_images/new_test.jpg')
new =undistort(img)
new = cv2.cvtColor(new, cv2.COLOR_RGB2BGR)
#new = threshholding(new)
h,w = new.shape[:2]
# define source and destination points for transform
imshape = img.shape
vertices = np.array([[
(257,670),
(590, 446),
(722, 440),
(1150,650)
]],
dtype=np.int32)
p1 = (170,670)
p2 = (472, 475)
p3 = (745, 466)
p4 = (1050,650)
vertices = np.array([[p1,
p2,
p3,
p4
]],
dtype=np.int32)
masked_edges = RoI(new, vertices)
#masked_edges = cv2.cvtColor(masked_edges, cv2.COLOR_RGB2BGR)
src = np.float32([(575,464),
(707,464),
(258,682),
(1049,682)])
dst = np.float32([(450,0),
(w-450,0),
(450,h),
(w-450,h)])
warp_img, M, Minv = unwarp(masked_edges, src, dst)
warp_img = increase_brightness_img(warp_img)
warp_img = contrast_img(warp_img)
YUV = cv2.cvtColor(warp_img, cv2.COLOR_RGB2YUV)
Y,U,V = cv2.split(YUV)
Y_equalized= cv2.equalizeHist(Y)
YUV = cv2.merge((Y,U,V))
thresh_min = 253
thresh_max = 255
binary = np.zeros_like(Y)
binary[(Y_equalized>= thresh_min) & (Y_equalized <= thresh_max)] = 1
kernel_opening= np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(binary, cv2.MORPH_OPEN, kernel_opening)
kernel= np.ones((7,7),np.uint8)
dilation = cv2.dilate(opening,kernel,iterations = 3)

Related

How to remove CT bed/shadows in a CT image with python?

I am working with 3D CT images and trying to remove the lines from the bed.
A slice from the original Image:
Following is my code to generate the mask:
segmentation = morphology.dilation(image_norm, np.ones((1, 1, 1)))
labels, label_nb = ndimage.label(segmentation)
label_count = np.bincount(labels.ravel().astype(int))
label_count[0] = 0
mask = labels == label_count.argmax()
mask = morphology.dilation(mask, np.ones((40, 40, 40)))
mask = ndimage.morphology.binary_fill_holes(mask)
mask = morphology.dilation(mask, np.ones((1, 1, 1)))
This results in the following image:
As you can see, in the above image the CT scan as distorted as well.
If I change: mask = morphology.dilation(mask, np.ones((40, 40, 40))) to mask = morphology.dilation(mask, np.ones((100, 100, 100))), the resulting image is as follows:
How can I remove only the two lines under the image without changing the image area? Any help is appreciated.
You've probably found another solution by now. Regardless, I've seen similar CT processing questions on SO, and figured it would be helpful to demonstrate a Scikit-Image solution. Here's the end result.
Here's the code to produce the above images.
from skimage import io, filters, color, morphology
import matplotlib.pyplot as plt
import numpy as np
image = color.rgba2rgb(
io.imread("ctimage.png")[9:-23,32:-9]
)
gray = color.rgb2gray(image)
tgray = gray > filters.threshold_otsu(gray)
keep_mask = morphology.remove_small_objects(tgray,min_size=463)
keep_mask = morphology.remove_small_holes(keep_mask)
maskedimg = np.einsum('ijk,ij->ijk',image,keep_mask)
fig,axes = plt.subplots(ncols=3)
image_list = [image,keep_mask,maskedimg]
title_list = ["Original","Mask","Imgage w/mask"]
for i,ax in enumerate(axes):
ax.imshow(image_list[i])
ax.set_title(title_list[i])
ax.axis("off")
fig.tight_layout()
Notes on code
image = color.rgba2rgb(
io.imread("ctimage.png")[9:-23,32:-9]
)
gray = color.rgb2gray(image)
The image saved as RGBA when I loaded it from SO. It needs to be in grayscale for use in the threshold function.
Your image might already by in grayscale.
Also, the downloaded image showed axis markings. That's why I've trimmed the image.
maskedimg = np.einsum('ijk,ij->ijk',image,keep_mask)
I wanted to apply keep_mask to every channel of the RGB image. The mask is a 2D array, and the image is a 3D array. I referenced this previous question in order to apply the mask to the image.

Creating new image for given size containing cropped image

I am currently working on the below and am struggling to understand the best approach.
I've searched a lot but was not able to find answers that would match what I am trying to do
The problem:
Relocating an Object (e.g. Shoe) within the existing image (white background) to certain location (e.g. move up)
Inserting and positioning the Object (e.g. Shoe) at by the user specified location within a new background (still white) with by the user specified new height / width
How far I got:
I've managed identify the object within the picture using CV2, got the outer contours, added a little padding and cropped the object (see below). I am happy with cropping it that way as all my images have a one coloured background and I will keep the background in the same colour.
Where I am stuck:
My cropped Object and old image background / new background do not share the same shape, hence I am not able to overlay / concatenate / merge ...
Given both images are store as np arrays, I assume the answer will be to somehow place the Shoe crop np.array within the background np.array, however I have no clue how to do this.
Maybe there is an easier / different way to do this?
Would be very grateful to hear from anyone who can lead me into the right direction.
Code
#importing dependencies
import os
import numpy as np
import cv2
from matplotlib import pyplot as plt
# Config
path = '/Users/..../Shoes/'
img_list = os.listdir(path)
img_path = path + img_list[0]
#Outline
color = (0,255,0)
thickness = 3
padding = 10
# convert to RGB
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# create a binary thresholded image
_, binary = cv2.threshold(gray, 225, 255, cv2.THRESH_BINARY_INV)
# find the contours from the thresholded image
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Identifying outer contours
x_axis = []
y_axis = []
for i in range(len(contours)):
for y in range (len(contours[i])):
x_axis.append(contours[i][y][0][0])
y_axis.append(contours[i][y][0][1])
min_x = min(x_axis) - padding
min_y = min(y_axis) - padding
max_x = max(x_axis) + padding
max_y = max(y_axis) + padding
# Defining start and endpoint of outline Rectangle based on identified outer corners + Padding
start_point = (min_x, min_y)
end_point = (max_x, max_y)
image_outline = cv2.rectangle(image, start_point, end_point, color, thickness)
plt.imshow(image_outline)
plt.show()
#Crop Image
crop_img = image[min_y:max_y, min_x:max_x]
print(crop_img.shape)
plt.imshow(crop_img)
plt.show()
I think I got to the solution, this centers the image for any new given background height/width
Still interested in quicker / cleaner ways
#Define the new height and width you want to have
new_height = 1200
new_width = 1200
#Check current hight and with of Cropped image
crop_height = crop_img.shape[0]
crop_width = crop_img.shape[1]
#calculate how much you need add to the sides and top - basically halft of the remaining height / with ... currently not working correctly for odd numbers
add_sides = int((new_width - crop_width)/2)
add_top_and_btm = int((new_height - crop_height)/2)
# Adding background to the sides
bg_sides = np.zeros(shape=[crop_height, add_sides, 3], dtype=np.uint8)
bg_sides2 = 255 * np.ones(shape=[crop_height, add_sides, 3], dtype=np.uint8)
new_crop_img = np.insert(crop_img, [1], bg_sides2, axis=1)
new_crop_img = np.insert(new_crop_img, [-1], bg_sides2, axis=1)
# Then adding Background to top and bottom
bg_top_and_btm = np.zeros(shape=[add_top_and_btm, new_width, 3],
dtype=np.uint8)
bg_top_and_btm2 = 255 * np.ones(shape=[add_top_and_btm, new_width, 3],
dtype=np.uint8)
new_crop_img = np.insert(new_crop_img, [1], bg_top_and_btm2, axis=0)
new_crop_img = np.insert(new_crop_img, [-1], bg_top_and_btm2, axis=0)
plt.imshow(new_crop_img)

How do I crop an image based on custom mask in python?

Below I have attached two images. I want the first image to be cropped in a heart shape according to the mask image (2nd image).
I searched for solutions but I was not able to get the simple and easier way to do this. Kindly help me with the solution.
2 images:
Image to be cropped:
Mask image:
Let's start by loading the temple image from sklearn:
from sklearn.datasets import load_sample_images
dataset = load_sample_images()
temple = dataset.images[0]
plt.imshow(temple)
Since, we need to use the second image as mask, we must do a binary thresholding operation. This will create a black and white masked image, which we can then use to mask the former image.
from matplotlib.pyplot import imread
heart = imread(r'path_to_im\heart.jpg', cv2.IMREAD_GRAYSCALE)
_, mask = cv2.threshold(heart, thresh=180, maxval=255, type=cv2.THRESH_BINARY)
We can now trim the image so its dimensions are compatible with the temple image:
temple_x, temple_y, _ = temple.shape
heart_x, heart_y = mask.shape
x_heart = min(temple_x, heart_x)
x_half_heart = mask.shape[0]//2
heart_mask = mask[x_half_heart-x_heart//2 : x_half_heart+x_heart//2+1, :temple_y]
plt.imshow(heart_mask, cmap='Greys_r')
Now we have to slice the image that we want to mask, to fit the dimensions of the actual mask. Another shape would have been to resize the mask, which is doable, but we'd then end up with a distorted heart image. To apply the mask, we have cv2.bitwise_and:
temple_width_half = temple.shape[1]//2
temple_to_mask = temple[:,temple_width_half-x_half_heart:temple_width_half+x_half_heart]
masked = cv2.bitwise_and(temple_to_mask,temple_to_mask,mask = heart_mask)
plt.imshow(masked)
If you want to instead make the masked (black) region transparent:
tmp = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)
_,alpha = cv2.threshold(tmp,0,255,cv2.THRESH_BINARY)
b, g, r = cv2.split(masked)
rgba = [b,g,r, alpha]
masked_tr = cv2.merge(rgba,4)
plt.axis('off')
plt.imshow(dst)
Since I am on a remote server, cv2.imshow doesnt work for me. I imported plt.
This code does what you are looking for:
import cv2
import matplotlib.pyplot as plt
img_org = cv2.imread('~/temple.jpg')
img_mask = cv2.imread('~/heart.jpg')
##Resizing images
img_org = cv2.resize(img_org, (400,400), interpolation = cv2.INTER_AREA)
img_mask = cv2.resize(img_mask, (400,400), interpolation = cv2.INTER_AREA)
for h in range(len(img_mask)):
for w in range(len(img_mask)):
if img_mask[h][w][0] == 0:
for i in range(3):
img_org[h][w][i] = 0
else:
continue
plt.imshow(img_org)

MRI (brain tumor) image processing and segmentation, skull removing

I need help for image segmentation. I have a MRI image of brain with tumor. I need to remove cranium (skull) from MRI and then segment only tumor object. How could I do that in python? with image processing. I have tried make contours, but I don't know how to find and remove the largest contour and get only brain without a skull.
Thank's a lot.
def get_brain(img):
row_size = img.shape[0]
col_size = img.shape[1]
mean = np.mean(img)
std = np.std(img)
img = img - mean
img = img / std
middle = img[int(col_size / 5):int(col_size / 5 * 4), int(row_size / 5):int(row_size / 5 * 4)]
mean = np.mean(middle)
max = np.max(img)
min = np.min(img)
img[img == max] = mean
img[img == min] = mean
kmeans = KMeans(n_clusters=2).fit(np.reshape(middle, [np.prod(middle.shape), 1]))
centers = sorted(kmeans.cluster_centers_.flatten())
threshold = np.mean(centers)
thresh_img = np.where(img < threshold, 1.0, 0.0) # threshold the image
eroded = morphology.erosion(thresh_img, np.ones([3, 3]))
dilation = morphology.dilation(eroded, np.ones([5, 5]))
These images are similar to the ones I'm looking at:
Thanks for answers.
Preliminaries
Some preliminary code:
%matplotlib inline
import numpy as np
import cv2
from matplotlib import pyplot as plt
from skimage.morphology import extrema
from skimage.morphology import watershed as skwater
def ShowImage(title,img,ctype):
plt.figure(figsize=(10, 10))
if ctype=='bgr':
b,g,r = cv2.split(img) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
plt.imshow(rgb_img)
elif ctype=='hsv':
rgb = cv2.cvtColor(img,cv2.COLOR_HSV2RGB)
plt.imshow(rgb)
elif ctype=='gray':
plt.imshow(img,cmap='gray')
elif ctype=='rgb':
plt.imshow(img)
else:
raise Exception("Unknown colour type")
plt.axis('off')
plt.title(title)
plt.show()
For reference, here's one of the brain+skulls you linked to:
#Read in image
img = cv2.imread('brain.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ShowImage('Brain with Skull',gray,'gray')
Extracting a Mask
If the pixels in the image can be classified into two different intensity classes, that is, if they have a bimodal histogram, then Otsu's method can be used to threshold them into a binary mask. Let's check that assumption.
#Make a histogram of the intensities in the grayscale image
plt.hist(gray.ravel(),256)
plt.show()
Okay, the data is nicely bimodal. Let's apply the threshold and see how we do.
#Threshold the image to binary using Otsu's method
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_OTSU)
ShowImage('Applying Otsu',thresh,'gray')
Things are easier to see if we overlay our mask onto the original image
colormask = np.zeros(img.shape, dtype=np.uint8)
colormask[thresh!=0] = np.array((0,0,255))
blended = cv2.addWeighted(img,0.7,colormask,0.1,0)
ShowImage('Blended', blended, 'bgr')
Extracting the Brain
The overlap of the brain (shown in red) with the mask is so perfect, that we'll stop right here. To do so, let's extract the connected components and find the largest one, which will be the brain.
ret, markers = cv2.connectedComponents(thresh)
#Get the area taken by each component. Ignore label 0 since this is the background.
marker_area = [np.sum(markers==m) for m in range(np.max(markers)) if m!=0]
#Get label of largest component by area
largest_component = np.argmax(marker_area)+1 #Add 1 since we dropped zero above
#Get pixels which correspond to the brain
brain_mask = markers==largest_component
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[brain_mask==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
Considering the Second Brain
Running this again with your second image produces a mask with many holes:
We can close many of these holes using a closing transformation:
brain_mask = np.uint8(brain_mask)
kernel = np.ones((8,8),np.uint8)
closing = cv2.morphologyEx(brain_mask, cv2.MORPH_CLOSE, kernel)
ShowImage('Closing', closing, 'gray')
We can now extract the brain:
brain_out = img.copy()
#In a copy of the original image, clear those pixels that don't correspond to the brain
brain_out[closing==False] = (0,0,0)
ShowImage('Connected Components',brain_out,'rgb')
If you need to cite this for some reason:
Richard Barnes. (2018). Using Otsu's method for skull-brain segmentation (v1.0.1). Zenodo. https://doi.org/10.5281/zenodo.6042312
Have you perhaps tried to use python skull_stripping.py
You can modify the parameters but it normally works good.
There are some new studies using deep learning for skull stripping which I found it interesting:
https://github.com/mateuszbuda/brain-segmentation/tree/master/skull-stripping
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 28 17:10:56 2021
#author: K Somasundaram, ka.somasundaram#gmail.com
"""
import numpy as npy
from skimage.filters import threshold_otsu
from skimage import measure
# import image reading module image from matplotlib
import matplotlib.image as img
#import image ploting module pyplot from matplotlib
import matplotlib.pyplot as plt
inim=img.imread('015.bmp')
#Find the dimension of the input image
dimn=inim.shape
print('dim=',dimn)
plt.figure(1)
plt.imshow(inim)
#-----------------------------------------------
# Find a threshold for the image using Otsu method in filters
th=threshold_otsu(inim)
print('Threshold = ',th)
# Binarize using threshold th
binim1=inim>th
plt.figure(2)
plt.imshow(binim1)
#--------------------------------------------------
# Erode the binary image with a structuring element
from skimage.morphology import disk
import skimage.morphology as morph
#Erode it with a radius of 5
eroded_image=morph.erosion(binim1,disk(3))
plt.figure(3)
plt.imshow(eroded_image)
#---------------------------------------------
#------------------------------------------------
# label the binar image
labelimg=measure.label(eroded_image,background=0)
plt.figure(4)
plt.imshow(labelimg)
#--------------------------------------------------
# Find area of the connected regiond
prop=measure.regionprops(labelimg)
# Find the number of objecte in the image
ncount=len(prop)
print ( 'Number of regions=',ncount)
#-----------------------------------------------------
# Find the LLC index
argmax=0
maxarea=0
#Find the largets connected region
for i in range(ncount):
if(prop[i].area >maxarea):
maxarea=prop[i].area
argmax=i
print('max area=',maxarea,'arg max=',argmax)
print('values=',[region.area for region in prop])
# Take only the largest connected region
# Generate a mask of size of th einput image with all zeros
bmask=npy.zeros(inim.shape,dtype=npy.uint8)
# Set all pixel values in whole image to the LCC index to 1
bmask[labelimg == (argmax+1)] =1
plt.figure(5)
plt.imshow(bmask)
#------------------------------------------------
#Dilate the isolated region to recover the pixels lost in erosion
dilated_mask=morph.dilation(bmask,disk(6))
plt.figure(6)
plt.imshow(dilated_mask)
#---------------------------------------
# Extract the brain using the barinmask
brain=inim*dilated_mask
plt.figure(7)
plt.imshow(brain)
-----------------------------------------
Input Image
--------------------

Using inpaint function in OpenCV via Python to interpolate broken river-data in a watershed

Background
A raster file collected via LIDAR records the topography of a watershed. To properly model the watershed, the river must appear continuous without any breaks or interruptions. The roads in the raster file appear like dams that interrupt the river as seen in the picture below
Specific Area Under Consideration in the Watershed
Objective
These river breaks are the main problem and I am trying but failing to remove them.
Approach
Via Python, I used the various tools and prebuilt functions in the OpenCV library. The primary function I used in this approach is the cv2.inpaint function. This function takes in an image file and a mask file and interpolates the original image wherever the mask file pixels are nonzero.
The main step here is determining the mask file which I did by detecting the corners at the break in the river. The mask file will guide the inpaint function to fill in the pixels according to the patterns in the surrounding pixels.
Problem
My issue is that this happens from all directions whereas I require it to only extrapolate pixel data from the river itself. The image below shows the flawed result: inpaint works but it considers data from outside the river too.
Inpainted Result
Here is my code if you are so kind as to help:
import scipy.io as sio
import numpy as np
from matplotlib import pyplot as plt
import cv2
matfile = sio.loadmat('box.mat') ## box.mat original raster file linked below
ztopo = matfile['box']
#Crop smaller section for analysis
ztopo2 = ztopo[200:500, 1400:1700]
## Step 1) Isolate river
river = ztopo2.copy()
river[ztopo2<217.5] = 0
#This will become problematic for entire watershed w/o proper slicing
## Step 2) Detect Corners
dst = cv2.cornerHarris(river,3,7,0.04)
# cornerHarris arguments adjust qualities of corner markers
# Dilate Image (unnecessary)
#dst = cv2.dilate(dst,None)
# Threshold for an optimal value, it may vary depending on the image.
# This adjusts what defines a corner
river2 = river.copy()
river2[dst>0.01*dst.max()]=[150]
## Step 3) Remove river and keep corners
#Initiate loop to isolate detected corners
i=0
j=0
rows,columns = river2.shape
for i in np.arange(rows):
for j in np.arange(columns):
if river2[i,j] != 150:
river2[i,j] = 0
j = j + 1
i = i + 1
# Save corners as new image for import during next step.
# Import must be via cv2 as thresholding and contour detection can only work on BGR files. Sio import in line 6 (matfile = sio.loadmat('box.mat')) imports 1 dimensional image rather than BGR.
cv2.imwrite('corners.png', river2)
## Step 4) Create mask image by defining and filling a contour around the previously detected corners
#Step 4 code retrieved from http://dsp.stackexchange.com/questions/2564/opencv-c-connect-nearby-contours-based-on-distance-between-them
#Article: OpenCV/C++ connect nearby contours based on distance between them
#Initiate function to specify features of contour connections
def find_if_close(cnt1,cnt2):
row1,row2 = cnt1.shape[0],cnt2.shape[0]
for i in xrange(row1):
for j in xrange(row2):
dist = np.linalg.norm(cnt1[i]-cnt2[j])
if abs(dist) < 50 :
return True
elif i==row1-1 and j==row2-1:
return False
#import image of corners created in step 3 so thresholding can function properly
img = cv2.imread('corners.png')
#Thresholding and Finding contours only works on grayscale image
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
contours,hier = cv2.findContours(thresh,cv2.RETR_EXTERNAL,2)
LENGTH = len(contours)
status = np.zeros((LENGTH,1))
for i,cnt1 in enumerate(contours):
x = i
if i != LENGTH-1:
for j,cnt2 in enumerate(contours[i+1:]):
x = x+1
dist = find_if_close(cnt1,cnt2)
if dist == True:
val = min(status[i],status[x])
status[x] = status[i] = val
else:
if status[x]==status[i]:
status[x] = i+1
unified = []
maximum = int(status.max())+1
for i in xrange(maximum):
pos = np.where(status==i)[0]
if pos.size != 0:
cont = np.vstack(contours[i] for i in pos)
hull = cv2.convexHull(cont) # I don't know why/how this is used
unified.append(hull)
cv2.drawContours(img,unified,-1,(0,255,0),1) #Last argument specifies contour width
cv2.drawContours(thresh,unified,-1,255,-1)
# Thresh is the filled contour while img is the contour itself
# The contour surrounds the corners
#cv2.imshow('pic', thresh) #Produces black and white image
## Step 5) Merge via inpaint
river = np.uint8(river)
ztopo2 = np.uint8(ztopo2)
thresh[thresh>0] = 1
#new = river.copy()
merged = cv2.inpaint(river,thresh,12,cv2.INPAINT_TELEA)
plt.imshow(merged)
plt.colorbar()
plt.show()

Categories