I have been trying to make an algorithm that can detect the nucleii in GFP cell scans like this one:
GFP Cell Scan
for months. I want it to be able to output:
desired_cell_tracking
Is there an existing library for doing this, or is there a way to train my own basic ML classifier for this?
(I attached starter code that detects just the cell masks (but not nucleii))
###############################
# This code uses cellpose library to create a mask for each cell in 2 png files of GFP-channel cell scans.
###############################
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell detection original libraries
from skimage.io import imread
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import fnmatch
mpl.rcParams['figure.dpi'] = 300
from cellpose import utils, io
from skimage.measure import label, regionprops, regionprops_table
import pandas as pd
from PIL import Image, ImageChops
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell nucleii detection libraries
from skimage import (filters, measure, morphology, segmentation)
from scipy import ndimage as ndi
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
import cv2
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Load images
print("Libraries imported")
GFP_files = []
GFP_files.append('03152021_GFP_cells_1_lowesy_quality.tif')
GFP_files.append('03152021_GFP_cells_2_lowesy_quality.tif')
GFP_files= sorted(GFP_files) #sorting files
plt.figure(figsize=(2,2))
img = np.array(Image.open(GFP_files[0]))
img2 = img
wid = img.shape[0]
hei = img.shape[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Detect GFP cells
from cellpose import models, io
import random
model = models.Cellpose(gpu=True, model_type='cyto')
channels = [2,0]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# LOOPING THROUGH FILES
#defining a null array for number of cells.
imgname=[0]*len(GFP_files)
n_cell= [[2, 0]]*len(GFP_files)
seg_masks=np.zeros((len(GFP_files), wid,hei)) #saving mask as 3d array for all images
i=-1 #for indexing
kernel = np.ones((5,5), np.uint8)
print('Running...')
# THE BIG LOOP
for filename in GFP_files:
i+=1
img = np.array(Image.open(filename))
masks, flows, styles, diams = model.eval(img, diameter=30, channels=channels)
n_cell[i]= [filename,np.max(masks)]
seg_masks[i,:,:]= masks
imgname[i]=[filename]
im = np.copy(img[:,:,0])
im[masks==0]=0 #set main background threshold on
# 'im' now is a single-color-channel image that only has one of the cells in it, everything else is background.
Related
I am trying to apply some blur using a low pass filter to a dicom image, however my resulting dicom image is not correct (see image below)
(all data below is publicly available)
from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
import numpy as np
import pydicom
def test(matrix):
image1_np = matrix #read_xray2("./CT000000.dcm")
#fft of image
fft1 = fftpack.fftshift(fftpack.fft2(image1_np))
#Create a low pass filter image
x,y = image1_np.shape[0],image1_np.shape[1]
#size of circle
e_x,e_y=50,50
#create a box
bbox=((x/2)-(e_x/2),(y/2)-(e_y/2),(x/2)+(e_x/2),(y/2)+(e_y/2))
low_pass=Image.new("L",(image1_np.shape[0],image1_np.shape[1]),color=0)
draw1=ImageDraw.Draw(low_pass)
draw1.ellipse(bbox, fill=1)
low_pass_np=np.array(low_pass)
#multiply both the images
filtered=np.multiply(fft1,low_pass_np)
#inverse fft
ifft2 = np.real(fftpack.ifft2(fftpack.ifftshift(filtered)))
ifft2 = np.maximum(0, np.minimum(ifft2, 255))
return ifft2
dicom = pydicom.dcmread("./CT000000.dcm")
dicom.PixelData = test(dicom.pixel_array)
dicom.save_as(r"./result.dcm")
Original Image
Resulting Image
I fixed the code using the GaussianBlur of cv2 library
dicom = pydicom.dcmread("./CT000000.dcm")
dicom.PixelData = cv2.GaussianBlur(dicom.pixel_array, (7, 7), 0)
#save the image
dicom.save_as(r"./result.dcm")
In python, I can use this code to determine the color distance between two images
but I have no idea how to do in with TensorFlow
(I already have a function to convert tensor from RGB to LAB color space)
from skimage.color import deltaE_ciede2000
import cv2
im1 = cv2.imread('image1.jpg')
im2 = cv2.imread('image2.jpg')
im1_lab = cv2.cvtColor(im1, cv2.COLOR_BGR2LAB).astype(np.float32)
im2_lab = cv2.cvtColor(im2, cv2.COLOR_BGR2LAB).astype(np.float32)
color_dist = deltaE_ciede2000(im1_lab, im2_lab).mean()
print('color distance:', color_dist)
You can wrap the deltaE_ciede2000 function in a tf.py_function:
import tensorflow as tf
from skimage.color import deltaE_ciede2000
a = tf.random.uniform((8,8,3))
b = tf.random.uniform((8,8,3))
color_dist = tf.reduce_mean(tf.py_function(deltaE_ciede2000, inp=[a,b], Tout=tf.float32))
Another solution is to re-implement the skimage function in TensorFlow, you can find the source code here
I'm new in programming and I'd like to ask you how can I write my code so that it read all the pic whitin a directory, process it one by one, and then save the output images in another directory.
%pylab
%matplotlib inline
import cv2
import glob
import os
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
filename = [img for img in glob.glob("outp/*.png")]
flist=sorted(filename)
images = []
for img in flist:
n = cv2.imread(img, 0)
images.append(n)
blur = cv.GaussianBlur(n, (9,9), 0)
cv.imwrite(flist, blur) #definitly wrong!! but idk how to do it
You don't need to create a list of all the pixel data of all the images in memory, that is just wasteful of memory. So, remove the lines:
images = []
and
images.apoend(...)
Then change your imwrite() to overwrite the current image:
cv2.imwrite(img,blur)
Here's a more complete version:
#!/usr/bin/env python3
import cv2
import glob
import os
import numpy as np
import cv2
# Go to where the images are
os.chdir('outp')
# Get list of filenames to convert
files = glob.glob("*.png")
for file in files:
n = cv2.imread(file, 0)
blur = cv2.GaussianBlur(n, (9,9), 0)
cv2.imwrite('blurred_' + file, blur)
Try this way:
cv2.imwrite("mypicture.jpg", gray[y:y+h,x:x+w])
I want to calculate selected area. I have a formula but I need to calculate number of pixels in the contour.
How can I do ?
Here is my code;
import os
import cv2
import logging
import numpy as np
from imageio import imread
import matplotlib
from matplotlib import pyplot as plt
import morphsnakes as ms
def _____():
# Load the image.
img = imread(PATH_IMG_NODULE)[..., 0] / 255.0
# g(I)
gimg = ms.inverse_gaussian_gradient(img, alpha=1000, sigma=5.48)
# Initialization of the level-set.
init_ls = ms.circle_level_set(img.shape, (100, 126), 20)
# Callback for visual plotting
callback = visual_callback_2d(img)
# MorphGAC.
ms.morphological_geodesic_active_contour(gimg, iterations=45,
init_level_set=init_ls,
smoothing=1, threshold=0.31,
balloon=1, iter_callback=callback)
Here is my selected area's picture;
Thank you.
I'm currently trying to create a bot for Minesweeper with computer vision. However using scipy.signal.correlate2d only yields noise. My test code is down below, why is the output just noise and not the heatmap I would expect?
from scipy import signal
import numpy as np
from cv2 import cv2
from PIL import Image
image = cv2.imread('MinesweeperTest.png',0)
template = cv2.imread('Mine2.png',0)
corr = signal.correlate2d(image,template,mode="same")
Image.fromarray(corr).save("correlation.png")
All the images involved can be found here:
MinesweeperTest.png: https://imgur.com/PpLLOW7
Mine2.png: https://imgur.com/ApIIs1Z
Correlation.png: https://imgur.com/hkskY00
Preprocessing the images so that their mean value is 0 before invoking correlate2d should help get a more meaningful 2D cross-correlation:
image = image - image.mean()
template = template - template.mean()
A reproducible example reads:
from imageio import imread
from matplotlib import pyplot as plt
from scipy import signal
import numpy as np
image = imread('https://i.imgur.com/PpLLOW7.png', pilmode='L')
template = imread('https://i.imgur.com/ApIIs1Z.png', pilmode='L')
# fixed these
image = image - image.mean()
template = template - template.mean()
corr = signal.correlate2d(image, template, mode="same")
plt.imshow(corr, cmap='hot')