Adding a border to and Image in my code - python

Help, I need to put a border on the image displayed from the code below. How do I do this? The code below pulls up the image, and now i need a border. Help.. All of this code is Python
# -*- coding: utf-8 -*-
'''
LoganCaroline_1_4_7: Merge images
'''
import matplotlib.pyplot as plt
import os.path
import numpy as np # “as” lets us use standard abbreviations
'''Read the image data'''
# Get the directory of this python script
directory = os.path.dirname(os.path.abspath(__file__))
# Build an absolute filename from directory + filename
filename = os.path.join(directory, 'family1.jpg')
# Read the image data into an array
img = plt.imread(filename)
'''Show the image data'''
# Create figure with 1 subplot
fig, ax = plt.subplots(1, 1)
# Show the image data in a subplot
ax.imshow(img, interpolation='none')
# Show the figure on the screen
fig.show()

Just create a slightly larger array that is black and fill the central pixels with your image.
import numpy as np
import matplotlib.pyplot as plt
def frame_image(img, frame_width):
b = frame_width # border size in pixel
ny, nx = img.shape[0], img.shape[1] # resolution / number of pixels in x and y
if img.ndim == 3: # rgb or rgba array
framed_img = np.zeros((b+ny+b, b+nx+b, img.shape[2]))
elif img.ndim == 2: # grayscale image
framed_img = np.zeros((b+ny+b, b+nx+b))
framed_img[b:-b, b:-b] = img
return framed_img
img = np.random.rand(456, 333, 3)
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(img, interpolation='none')
ax2.imshow(frame_image(img, 20), interpolation='none')
plt.show()

Based on the previous answer, if you want customizable RGB colors (3 dimensions images) for the border color:
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
def frame_image(img, frame_width):
b = frame_width # border size in pixel
ny, nx = img.shape[0], img.shape[1] # resolution / number of pixels in x and y
framed_img = Image.new('RGB', (b+ny+b, b+nx+b), (255, 0, 0)) # RGB color tuple
framed_img = np.array(framed_img.getdata()).reshape(framed_img.size[0], framed_img.size[1], 3)
framed_img[b:-b, b:-b] = img
return framed_img
img = np.random.rand(456, 333, 3)
fig, (ax1, ax2) = plt.subplots(1,2)
ax1.imshow(img, interpolation='none')
ax2.imshow(frame_image(img*255, 20), interpolation='none')
plt.show()
Output example

Related

Contour Filling using open CV or skimage?

I'm trying to find the total area of glue regions in this digital microscopy image.
I have the problem that I can't seem to fill the regions inside, I can however succesfully get the contours of the glue regions.
Here is my code so far:
from skimage.io import imread
from scipy.ndimage import distance_transform_edt
from skimage.color import rgb2hed
from skimage.filters import threshold_otsu
from skimage.morphology import opening, closing, disk
import numpy as np
from scipy import fftpack
from matplotlib.colors import LogNorm
from scipy import ndimage
from skimage import img_as_ubyte
import cv2
# Read the image
im = imread("10C_11_lugol_smaller.jpg")
# Show the image
plt.figure()
plt.imshow(im)
# Convert image from RGB to HED
hed = rgb2hed(im)
# Remove Stripes
im_fft = fftpack.fft2(hed[:, :, 0])
plt.figure()
plt.imshow((np.abs(im_fft)).astype(np.uint8), norm=LogNorm())
plt.colorbar()
plt.title('Fourier transform')
keep_fraction = 0.1
im_fft2 = im_fft.copy()
# Set r and c to be the number of rows and columns of the array
r, c = im_fft2.shape
im_fft2[int(r * keep_fraction):int(r * (1 - keep_fraction))] = 0
im_fft2[:, int(c * keep_fraction):int(c * (1 - keep_fraction))] = 0
plt.figure()
plt.imshow((np.abs(im_fft2)).astype(np.uint8), norm=LogNorm())
plt.colorbar()
plt.title('Filtered Spectrum')
# Reconstructing
im_new = fftpack.ifft2(im_fft2).real
plt.figure()
plt.imshow(im_new)
plt.title('Reconstructed Image')
# Creating the Otsu Threshold on the first layer
t = threshold_otsu(im_new)
mask = im_new > t
mask = closing(opening(mask, disk(1)), disk(1))
# Show the result of the thresholding
fig = plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(im)
plt.subplot(1, 2, 2)
plt.imshow(mask)
# Filling the contour
cv_image = img_as_ubyte(mask)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
res = cv2.morphologyEx(cv_image, cv2.MORPH_OPEN, kernel)
plt.figure()
plt.imshow(res, plt.cm.gray)
plt.title('Filled Contours')
# Apply the distance transform on the results
distance = distance_transform_edt(mask)
plt.figure()
plt.imshow(distance)
plt.colorbar()
plt.show()
Can someone please help :(
Thanks in advance for reading!
Results So Far

RandomWalker Segmentation algorithm results in a segmentation same as the initial seeds

I have a medical Image, and I'm trying to segment a specific zone inside.
After several steps of conventional image processing, I was able to locate the region, and managed to get the seeds for the segmentation, but when I try to apply RandomWalker algorithm, I don't get a good segmentation.
can you please tell me what is the problem here, and how to correct it?
Code :
# import math
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
from skimage.feature import canny
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.draw import circle_perimeter
from skimage.segmentation import watershed, random_walker, active_contour
import skimage.filters as filters
# Read image
img = cv.imread("CT.png")
# Get image center coordinates
img_center = (img.shape[0]//2, img.shape[1]//2)
# Edge detector
edges = canny(img, sigma=2.0, low_threshold=19, high_threshold=57)
# Hough_circle
hough_radii = np.arange(29, 32, 1)
hough_res = hough_circle(edges, hough_radii)
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,total_num_peaks=4, min_xdistance=70,min_ydistance=200, threshold=0.25)
# Remove false-posite circle
sortX = np.argsort(cx)
cx = cx[sortX[:-1]]
cy = cy[sortX[:-1]]
radii = radii[sortX[:-1]]
#--------------------------------------
# get the closest circle to the centre
#--------------------------------------
dist = []
for idx in range(len(cx)):
dist.append(abs(img_center[1]-cx[idx])+abs(img_center[0]-cy[idx]))
sortD = np.argsort(dist)
Cx = cx[sortD[0]]
Cy = cy[sortD[0]]
radius = radii[sortD[0]]
markers = np.ones(img.shape, dtype=np.uint)
markers[img==0] = 0
markers[Cy-radius//2:Cy+radius//2, Cx-radius//2:Cx+radius//2] = 2
# markers[(Cy-radius//2)+1:(Cy+radius//2)-1, (Cx-radius//2)+1:(Cx+radius//2)-1] = 0
#---------------------------------
labels = random_walker(img, markers)
# print(labels.shape)
# Plot results
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 3.2),
sharex=True, sharey=True)
ax1.imshow(img, cmap='gray')
ax1.axis('off')
ax1.set_title('Noisy data')
ax2.imshow(markers, cmap='magma')
ax2.axis('off')
ax2.set_title('Markers')
ax3.imshow(labels, cmap='gray')
ax3.axis('off')
ax3.set_title('Segmentation')
fig.tight_layout()
plt.show()
#======================================
Random walker only expands labels from markers into the regions having label 0. You end up with an image containing only ones everywhere except for 2 in the original square. That's because the label 2 has nowhere to expand into: it is surrounded by 1s.
I was able to modify the segmentation a little by using:
border = 71
surround = (
(dilation(markers, np.ones((border, border))) == 2)
^ (markers==2)
)
markers[surround] = 0
labels = random_walker(img, markers) * (img != 0)
It's definitely still not perfect. Beyond this, you will need to play with the border size as well as the beta= and tol= parameters of random_walker.
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
from skimage.feature import canny
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.segmentation import watershed, random_walker, active_contour
from skimage.morphology import erosion, dilation
from skimage.restoration import denoise_bilateral
from skimage.color import rgb2gray
from skimage.filters import threshold_local
image=plt.imread('medical_image.png')
plt.imshow(image)
plt.show()
canny_edges=canny(image, sigma=1.5 )
hough_radii = np.arange(29, 32, 1)
hough_res = hough_circle(canny_edges, hough_radii)
#Identifies most prominent circles separated by certain distances in a
#Hough space.
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii,total_num_peaks=4, min_xdistance=70,min_ydistance=200, threshold=0.25)
img_center = (image.shape[0]//2, image.shape[1]//2)
dist = []
for idx in range(len(cx)):
dist.append(abs(img_center[1]-cx[idx])+abs(img_center[0]-cy[idx]))
sortD = np.argsort(dist)
Cx = cx[sortD[0]]
Cy = cy[sortD[0]]
radius = radii[sortD[0]]
markers = np.ones(image.shape, dtype=np.uint)
markers[image==0] = 0
markers[Cy-radius//2:Cy+radius//2, Cx-radius//2:Cx+radius//2] = 2
border = 71
surround = (
(dilation(markers, np.ones((border, border))) == 2)
^ (markers==2)
)
markers[surround] = 0
labels = random_walker(image, markers)
block_size=35
grayscale_image=rgb2gray(image)
denoised_image=denoise_bilateral(grayscale_image,multichannel=False)
local_thresh= threshold_local(grayscale_image, block_size,offset=.01)
#apply the thresholding to the image
binary_global = grayscale_image<local_thresh
plt.clf()
fig, (ax1, ax2, ax3,ax4,ax5) = plt.subplots(1, 5, figsize=(8, 3.2),
sharex=True, sharey=True)
ax1.imshow(canny_edges, cmap='gray')
ax1.axis('off')
ax2.imshow(markers,cmap='gray')
ax2.axis('off')
ax3.imshow(labels,cmap='gray')
ax3.axis('off')
ax4.imshow(binary_global,cmap='gray')
ax4.axis('off')
ax5.imshow(denoised_image,cmap='gray')
ax5.axis('off')
plt.show()

Too many values to unpack error in frangi filter

I receive an error "Too many values to unpack" when running the following code:
from skimage.filters import frangi, hessian
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('test.png')
image= cv2.resize(image,(300,300))
cv2.imshow('im',image)
cv2.waitKey(0)
fig, ax = plt.subplots(ncols=3, subplot_kw={'adjustable': 'box-forced'})
k=frangi(image)
ax[0].imshow(k, cmap=plt.cm.gray)
ax[0].set_title('Frangi filter result')
If you take a look at the documentation, it says that the image has to be:
image: (N, M) ndarray
Array with input image data.
Basically, you can convert the image to grayscale before applying the filter; something like this:
import cv2
import matplotlib.pyplot as plt
from skimage.filters import frangi, hessian
image = cv2.imread('test.png') # <-- shape: (N, M, C)
image = cv2.resize(image, (300,300))
cv2.imshow('im', image)
cv2.waitKey(0)
fig, ax = plt.subplots(ncols=3, subplot_kw={'adjustable': 'box-forced'})
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # <-- shape: (N, M)
k = frangi(gray_image)
ax[0].imshow(k, cmap=plt.cm.gray)
ax[0].set_title('Frangi filter result')
This is an MCVE, based on scikit examples:
import cv2
from skimage.data import camera
from skimage.filters import frangi
import matplotlib.pyplot as plt
image = cv2.imread('lenna.png')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
frangi_result = frangi(gray_image)
fig, ax = plt.subplots(ncols=3)
ax[0].imshow(image[..., ::-1]) # BGR to RGB
ax[0].set_title('Original image')
ax[1].imshow(gray_image, cmap=plt.cm.gray)
ax[1].set_title('Grayscale image')
ax[2].imshow(frangi_result, cmap=plt.cm.gray)
ax[2].set_title('Frangi filter result')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
The output is:

convert plot to RGB array

I want to convert a plot generated with matplotlib to an rgb array. In my case, I want to draw two circles using matplotlib. Currently, there are two problems:
You can still see the space taken by the axes
The circles aren't circles anymore in the resulting rgb array
Here is the code so far:
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
Generate the plot:
def drawImage(color, posx, posy, radius):
posx_left, posx_right = posx
posy_left, posy_right = posy
radius_left, radius_right = radius
fig = Figure()
canvas = FigureCanvas(fig)
ax = fig.gca()
ax.axis('off')
ax.set_axis_off()
ax = fig.add_subplot(1, 1, 1)
circle_left = plt.Circle((posx_left, posy_left), radius=radius_left,color=color)
ax.add_patch(circle_left)
circle_right = plt.Circle((posx_right, posy_right), radius=radius_right,color=color)
ax.add_patch(circle_right)
fig.tight_layout(pad=0)
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
img = np.fromstring(fig.canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3)
return img
Generate plot and save as array:
color = "blue"
radius = 0.2, 0.12
posx = 0.0,0.8
posy = 0.3,0.7
img = drawImage(color,posx,posy,radius)
plt.imshow(img)

How to extract RGB from an image and plot only RG as a graph? R for X and G for Y

I am trying to extract the RGB components from an image and plot the 3D RGB histogrma using matplotlib. But I dont know how can I do it.
Here's my current code:
import cv2
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
img_file = 'Paw03.png'
img = cv2.imread(img_file, cv2.IMREAD_COLOR) # rgb
#hsv_img = cv2.cvtColor(img,cv2.COLOR_BGR2HSV) # hsv
rows, cols, ch = img.shape
for x in range(rows):
for y in range(cols):
if (img[x, y, 1] == img[0, 255, 0]):
break;
else:
print "Pixel:", x, y
print "R:", R
print "G:", g
print "B:", b
print "\n"
plt.plot(r, 'ro', b, 'b^')
plt.xlim([0, 255])
plt.xlabel('Pixel')
plt.ylabel('Quantity')
plt.title('Distribution of RGB in the image')
plt.show()
But it doesn't work!
So, I tried with three for:
import cv2
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
img_file = 'Paw03.png'
img = cv2.imread(img_file, cv2.IMREAD_COLOR) # rgb
#hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # hsv
rows, cols, ch = img.shape
for x in range(rows):
for y in range(cols):
for z in range(ch)
if (img[x, y, z] == img[0, 255, 0]):
break;
else:
print "Pixel:", x, y
print "R:", R
print "G:", g
print "B:", b
print "\n"
plt.plot(r, 'ro', b, 'b^')
plt.xlim([0, 255])
plt.xlabel('Pixel')
plt.ylabel('Quantity')
plt.title('Distribution of RGB in the image')
plt.show()
It works only for the print into the for and also save three times for each pixel, and for matplotlib it doesn't work.
Anyone can help me?
The following snippet displays a 3D scatter plot of the RGB colors of your image:
import numpy as np
import matplotlib.image as mpimg
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
img = mpimg.imread('Paw03.png')
pixels = img.shape[0]*img.shape[1]
channels = 3
data = np.reshape(img[:, :, :channels], (pixels, channels))
histo_rgb, _ = np.histogramdd(data, bins=256)
r, g, b = np.nonzero(histo_rgb)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter3D(r, g, b)
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Blue')
plt.title('RGB colors')
plt.show()
This is what you get when you run the code above (the result obviously depends on the particular image used):
If your goal is to visualize in 3D the 2D histogram of the intensities of the red and green channels, then you may find this code useful:
import numpy as np
import matplotlib.image as mpimg
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
img = mpimg.imread('Paw03.png')
pixels = img.shape[0]*img.shape[1]
channels = 3
data = np.reshape(img[:, :, :channels], (pixels, channels))
histo_rgb, _ = np.histogramdd(data, bins=256)
histo_rg = np.sum(histo_rgb, 2)
levels = np.arange(256)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for g in levels:
ax.bar(levels, histo_rg[:, g], zs=g, zdir='y', color='r')
ax.set_xlabel('Red')
ax.set_ylabel('Green')
ax.set_zlabel('Number of pixels')
plt.show()
And here is the corresponding output:
I don't see where the values R, g, and b are set in your code example. Also it looks like plot.plot(r,'ro',b,'b^') would not be plotting series but a single point.

Categories