I want to calculate selected area. I have a formula but I need to calculate number of pixels in the contour.
How can I do ?
Here is my code;
import os
import cv2
import logging
import numpy as np
from imageio import imread
import matplotlib
from matplotlib import pyplot as plt
import morphsnakes as ms
def _____():
# Load the image.
img = imread(PATH_IMG_NODULE)[..., 0] / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (100, 126), 20)
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=45,
init_level_set=init_ls,
smoothing=1, threshold=0.31,
balloon=1, iter_callback=callback)
Here is my selected area's picture;
Thank you.
Related
I am trying to open a FITS file using the AstroPy package in Python. In the code I have basically opened the fits file, and displayed the corresponding FITS object. For some reason a continious decrease in the brightness of the image is seen. When looking at the 3D graph of the pixel-values in the image we can see an almost see a constant slope being formed. Why is this the case? And how can this be changed. I can provide any additional information if needed.
Code that displays the FITS image:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from astropy.nddata import Cutout2D
from astropy import units
from astropy.io import fits
import glob
from astropy.visualization import astropy_mpl_style
from astropy.visualization import simple_norm
from astropy.utils.data import download_file
from matplotlib.colors import *
from scipy import interpolate
import cv2
import sys
np.set_printoptions(threshold=sys.maxsize)
X, Y = np.mgrid[-1:1:20j, -1:1:20j]
Z = (X+Y) * np.exp(-6.0*(X*X+Y*Y)) + np.random.rand(X.shape[0])
print(X.shape, Y.shape, Z.shape)
plt.style.use(astropy_mpl_style)
filename = "jw02739-o001_t001_nircam_clear-f187n_segm.fits"
# filename = download_file('https://fits.gsfc.nasa.gov/samples/WFPC2u5780205r_c0fx.fits', cache=True )
data_set = {}
image_data = []
with fits.open(filename) as hdul:
hdul.info()
image_data = fits.getdata(filename,ext = 1)
print(image_data.shape)
print()
# for i in range(len(image_data)):
image = image_data
print(type(image))
image3 = np.stack((image,)*3, axis=-1)
image3 = ((image3 - image3.min()) * (1/(image3.max() - image3.min()) * 255)).astype('uint8')
# image = np.resize(image,(200,200))
# print(image)
# print(image3)
# res = cv2.resize(image3, dsize=(864, 510), interpolation=cv2.INTER_CUBIC)
cv2.imshow("Pillars of Creation in OpenCV", image3)
cv2.waitKey()
# plt.figure()
# plt.imshow(image, cmap='binary',norm = simple_norm(image, 'sqrt'))
# plt.show()
How can this continuous decrease in brightness be removed? I want it to display the decompressed version of the FITS file.
I have been trying to make an algorithm that can detect the nucleii in GFP cell scans like this one:
GFP Cell Scan
for months. I want it to be able to output:
desired_cell_tracking
Is there an existing library for doing this, or is there a way to train my own basic ML classifier for this?
(I attached starter code that detects just the cell masks (but not nucleii))
###############################
# This code uses cellpose library to create a mask for each cell in 2 png files of GFP-channel cell scans.
###############################
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell detection original libraries
from skimage.io import imread
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import fnmatch
mpl.rcParams['figure.dpi'] = 300
from cellpose import utils, io
from skimage.measure import label, regionprops, regionprops_table
import pandas as pd
from PIL import Image, ImageChops
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell nucleii detection libraries
from skimage import (filters, measure, morphology, segmentation)
from scipy import ndimage as ndi
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
import cv2
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Load images
print("Libraries imported")
GFP_files = []
GFP_files.append('03152021_GFP_cells_1_lowesy_quality.tif')
GFP_files.append('03152021_GFP_cells_2_lowesy_quality.tif')
GFP_files= sorted(GFP_files) #sorting files
plt.figure(figsize=(2,2))
img = np.array(Image.open(GFP_files[0]))
img2 = img
wid = img.shape[0]
hei = img.shape[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Detect GFP cells
from cellpose import models, io
import random
model = models.Cellpose(gpu=True, model_type='cyto')
channels = [2,0]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# LOOPING THROUGH FILES
#defining a null array for number of cells.
imgname=[0]*len(GFP_files)
n_cell= [[2, 0]]*len(GFP_files)
seg_masks=np.zeros((len(GFP_files), wid,hei)) #saving mask as 3d array for all images
i=-1 #for indexing
kernel = np.ones((5,5), np.uint8)
print('Running...')
# THE BIG LOOP
for filename in GFP_files:
i+=1
img = np.array(Image.open(filename))
masks, flows, styles, diams = model.eval(img, diameter=30, channels=channels)
n_cell[i]= [filename,np.max(masks)]
seg_masks[i,:,:]= masks
imgname[i]=[filename]
im = np.copy(img[:,:,0])
im[masks==0]=0 #set main background threshold on
# 'im' now is a single-color-channel image that only has one of the cells in it, everything else is background.
I'm currently trying to create a bot for Minesweeper with computer vision. However using scipy.signal.correlate2d only yields noise. My test code is down below, why is the output just noise and not the heatmap I would expect?
from scipy import signal
import numpy as np
from cv2 import cv2
from PIL import Image
image = cv2.imread('MinesweeperTest.png',0)
template = cv2.imread('Mine2.png',0)
corr = signal.correlate2d(image,template,mode="same")
Image.fromarray(corr).save("correlation.png")
All the images involved can be found here:
MinesweeperTest.png: https://imgur.com/PpLLOW7
Mine2.png: https://imgur.com/ApIIs1Z
Correlation.png: https://imgur.com/hkskY00
Preprocessing the images so that their mean value is 0 before invoking correlate2d should help get a more meaningful 2D cross-correlation:
image = image - image.mean()
template = template - template.mean()
A reproducible example reads:
from imageio import imread
from matplotlib import pyplot as plt
from scipy import signal
import numpy as np
image = imread('https://i.imgur.com/PpLLOW7.png', pilmode='L')
template = imread('https://i.imgur.com/ApIIs1Z.png', pilmode='L')
# fixed these
image = image - image.mean()
template = template - template.mean()
corr = signal.correlate2d(image, template, mode="same")
plt.imshow(corr, cmap='hot')
Hey i am trying to resize an image without stretching it but adding white pixels instead. I looked arround but i found nothing specifying how that can be achieved from within skimage. So instead i used numpy to add the extra pixels before the resize as arrays of [float(255)].
from skimage.io import imread
from skimage.io import imsave
from skimage.transform import resize
from matplotlib import pyplot as plt
from pylab import cm
import numpy as np
from skimage import morphology
from skimage import measure
from scipy import misc
def process(file_):
im = imread(file_, as_grey=True)
#im = misc.imread(file_)
#im=np.fromfile(file_, dtype=np.int64)
#Filler to avoid stretching
orig_rows, orig_cols = im.shape
print orig_rows, orig_cols
if orig_rows < orig_cols:
for addition in range(0,orig_cols-orig_rows):
#adding white rows
lst = np.array(list(float(255) for x in range(0,orig_cols)))
im= np.vstack((im,lst))
if orig_rows > orig_cols:
for addition in range(0,orig_rows-orig_cols):
#adding white columns
lst = np.array(list([float(255)] for x in range(0,orig_rows)))
im= np.hstack((im,lst))
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
plt.imshow(im, cmap=cm.gray)
plt.show()
When i view the image with pyplot it looks like this
We can see that the columns have been added, but after i save the image with
image = resize(im, (48, 48))
imsave('test.jpg',im)
imsave('test1.jpg',image)
The the images look like negatives, and the resized image looks completely white(next to the dark its invisible on the sites background). Any ideas?
The code below should work. Note that the padded areas' color is not exactly white in order to see the image boundaries in the uploaded image. For white padding set fill_cval = np.max(img).
def resize_padded(img, new_shape, fill_cval=None, order=1):
import numpy as np
from skimage.transform import resize
if fill_cval is None:
fill_cval = np.max(img)
ratio = np.min([n / i for n, i in zip(new_shape, img.shape)])
interm_shape = np.rint([s * ratio for s in img.shape]).astype(np.int)
interm_img = resize(img, interm_shape, order=order, cval=fill_cval)
new_img = np.empty(new_shape, dtype=interm_img.dtype)
new_img.fill(fill_cval)
pad = [(n - s) >> 1 for n, s in zip(new_shape, interm_shape)]
new_img[[slice(p, -p, None) if 0 != p else slice(None, None, None)
for p in pad]] = interm_img
return new_img
import numpy as np
import matplotlib.pylab as plt
from skimage.data import astronaut
from skimage.color import rgb2gray # using luminance
from skimage.io import imsave
img = rgb2gray(astronaut())
# set desired image size
out_size = (30, 100) # height, width
# set the color of the padded area. Here: "95% luminance"
fill_cval = np.max(img) * 0.95
resized_img = resize_padded(img, out_size, fill_cval=fill_cval)
imsave('img.png', img)
imsave('img_res.png', resized_img)
import numpy as np
from matplotlib import cm
from matplotlib import pyplot as plt
import Image
from scipy import ndimage
import Image, ImageDraw
import PIL
import cv
import cv2
from scipy.ndimage import measurements, morphology
from PIL import Image
from numpy import *
from scipy.ndimage import filters
import pylab
img = np.asarray(Image.open('test.tif').convert('L')) #read and convert image
img = 1 * (img < 127) # threshold
plt.imshow(img, cmap=cm.Greys_r) # show as black and white
plt.show()
Code above gives white pixels on black background, how to compute white region on image, then to split image into 100 rectangles and find rectangles with min, max and average numbers of pixels? thanks
Since your image is binary you can just sum the values to get a count of the white pixels.
from PIL import Image
import numpy as np
img = np.asarray(Image.open("foo.jpg").convert('L'))
img = 1 * (img < 127)
m,n = img.shape
# use np.sum to count white pixels
print("{} white pixels, out of {} pixels in total.".format(img.sum(), m*n))
# use slicing to count any sub part, for example from rows 300-320 and columns 400-440
print("{} white pixels in rectangle.".format(img[300:320,400:440].sum()))
Use slicing to pick out any rectangle and use sum() on that part.