I have the following code:
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
image = cv2.imread("/home/facu/holo.tif",0)
image = image
spectrum = np.fft.fftshift(np.fft.fft2(image))
intensity = 10*np.log(np.abs(spectrum))
mask = intensity.astype(bool)
img = intensity.astype(float)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data/graph.data.std())
labels = spectral_clustering(graph, k=2, mode = 'arpack')
label_img = -np.ones(mask.shape)
label_im[mask] = labels
So I'm trying to use the "spectral clustering" function but I get this error:
AttributeError: 'numpy.ndarray' object has no attribute 'img_to_graph'
How can I do for converting my "intensity" numpy array into a correct img_to_graph attribute?
You are overwriting your imported image = sklearn.feature_extraction.image with image = cv2.imread("/home/facu/holo.tif",0), so the function img_to_graph will not be accessible anymore.
The solution is to rename one of them, e.g. with
raw_img = cv2.imread("/home/facu/holo.tif",0)
and adjusting the rest accordingly.
Related
I am trying to open a FITS file using the AstroPy package in Python. In the code I have basically opened the fits file, and displayed the corresponding FITS object. For some reason a continious decrease in the brightness of the image is seen. When looking at the 3D graph of the pixel-values in the image we can see an almost see a constant slope being formed. Why is this the case? And how can this be changed. I can provide any additional information if needed.
Code that displays the FITS image:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator
from astropy.nddata import Cutout2D
from astropy import units
from astropy.io import fits
import glob
from astropy.visualization import astropy_mpl_style
from astropy.visualization import simple_norm
from astropy.utils.data import download_file
from matplotlib.colors import *
from scipy import interpolate
import cv2
import sys
np.set_printoptions(threshold=sys.maxsize)
X, Y = np.mgrid[-1:1:20j, -1:1:20j]
Z = (X+Y) * np.exp(-6.0*(X*X+Y*Y)) + np.random.rand(X.shape[0])
print(X.shape, Y.shape, Z.shape)
plt.style.use(astropy_mpl_style)
filename = "jw02739-o001_t001_nircam_clear-f187n_segm.fits"
# filename = download_file('https://fits.gsfc.nasa.gov/samples/WFPC2u5780205r_c0fx.fits', cache=True )
data_set = {}
image_data = []
with fits.open(filename) as hdul:
hdul.info()
image_data = fits.getdata(filename,ext = 1)
print(image_data.shape)
print()
# for i in range(len(image_data)):
image = image_data
print(type(image))
image3 = np.stack((image,)*3, axis=-1)
image3 = ((image3 - image3.min()) * (1/(image3.max() - image3.min()) * 255)).astype('uint8')
# image = np.resize(image,(200,200))
# print(image)
# print(image3)
# res = cv2.resize(image3, dsize=(864, 510), interpolation=cv2.INTER_CUBIC)
cv2.imshow("Pillars of Creation in OpenCV", image3)
cv2.waitKey()
# plt.figure()
# plt.imshow(image, cmap='binary',norm = simple_norm(image, 'sqrt'))
# plt.show()
How can this continuous decrease in brightness be removed? I want it to display the decompressed version of the FITS file.
I have been trying to make an algorithm that can detect the nucleii in GFP cell scans like this one:
GFP Cell Scan
for months. I want it to be able to output:
desired_cell_tracking
Is there an existing library for doing this, or is there a way to train my own basic ML classifier for this?
(I attached starter code that detects just the cell masks (but not nucleii))
###############################
# This code uses cellpose library to create a mask for each cell in 2 png files of GFP-channel cell scans.
###############################
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell detection original libraries
from skimage.io import imread
import numpy as np
import time, os, sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import fnmatch
mpl.rcParams['figure.dpi'] = 300
from cellpose import utils, io
from skimage.measure import label, regionprops, regionprops_table
import pandas as pd
from PIL import Image, ImageChops
#~~~~~~~~~~~~~~~~~~~~~~~~~
# GFP cell nucleii detection libraries
from skimage import (filters, measure, morphology, segmentation)
from scipy import ndimage as ndi
from skimage import data, color
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage.util import img_as_ubyte
import cv2
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Load images
print("Libraries imported")
GFP_files = []
GFP_files.append('03152021_GFP_cells_1_lowesy_quality.tif')
GFP_files.append('03152021_GFP_cells_2_lowesy_quality.tif')
GFP_files= sorted(GFP_files) #sorting files
plt.figure(figsize=(2,2))
img = np.array(Image.open(GFP_files[0]))
img2 = img
wid = img.shape[0]
hei = img.shape[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# Detect GFP cells
from cellpose import models, io
import random
model = models.Cellpose(gpu=True, model_type='cyto')
channels = [2,0]
#~~~~~~~~~~~~~~~~~~~~~~~~~
# LOOPING THROUGH FILES
#defining a null array for number of cells.
imgname=[0]*len(GFP_files)
n_cell= [[2, 0]]*len(GFP_files)
seg_masks=np.zeros((len(GFP_files), wid,hei)) #saving mask as 3d array for all images
i=-1 #for indexing
kernel = np.ones((5,5), np.uint8)
print('Running...')
# THE BIG LOOP
for filename in GFP_files:
i+=1
img = np.array(Image.open(filename))
masks, flows, styles, diams = model.eval(img, diameter=30, channels=channels)
n_cell[i]= [filename,np.max(masks)]
seg_masks[i,:,:]= masks
imgname[i]=[filename]
im = np.copy(img[:,:,0])
im[masks==0]=0 #set main background threshold on
# 'im' now is a single-color-channel image that only has one of the cells in it, everything else is background.
In python, I can use this code to determine the color distance between two images
but I have no idea how to do in with TensorFlow
(I already have a function to convert tensor from RGB to LAB color space)
from skimage.color import deltaE_ciede2000
import cv2
im1 = cv2.imread('image1.jpg')
im2 = cv2.imread('image2.jpg')
im1_lab = cv2.cvtColor(im1, cv2.COLOR_BGR2LAB).astype(np.float32)
im2_lab = cv2.cvtColor(im2, cv2.COLOR_BGR2LAB).astype(np.float32)
color_dist = deltaE_ciede2000(im1_lab, im2_lab).mean()
print('color distance:', color_dist)
You can wrap the deltaE_ciede2000 function in a tf.py_function:
import tensorflow as tf
from skimage.color import deltaE_ciede2000
a = tf.random.uniform((8,8,3))
b = tf.random.uniform((8,8,3))
color_dist = tf.reduce_mean(tf.py_function(deltaE_ciede2000, inp=[a,b], Tout=tf.float32))
Another solution is to re-implement the skimage function in TensorFlow, you can find the source code here
I'm currently trying to create a bot for Minesweeper with computer vision. However using scipy.signal.correlate2d only yields noise. My test code is down below, why is the output just noise and not the heatmap I would expect?
from scipy import signal
import numpy as np
from cv2 import cv2
from PIL import Image
image = cv2.imread('MinesweeperTest.png',0)
template = cv2.imread('Mine2.png',0)
corr = signal.correlate2d(image,template,mode="same")
Image.fromarray(corr).save("correlation.png")
All the images involved can be found here:
MinesweeperTest.png: https://imgur.com/PpLLOW7
Mine2.png: https://imgur.com/ApIIs1Z
Correlation.png: https://imgur.com/hkskY00
Preprocessing the images so that their mean value is 0 before invoking correlate2d should help get a more meaningful 2D cross-correlation:
image = image - image.mean()
template = template - template.mean()
A reproducible example reads:
from imageio import imread
from matplotlib import pyplot as plt
from scipy import signal
import numpy as np
image = imread('https://i.imgur.com/PpLLOW7.png', pilmode='L')
template = imread('https://i.imgur.com/ApIIs1Z.png', pilmode='L')
# fixed these
image = image - image.mean()
template = template - template.mean()
corr = signal.correlate2d(image, template, mode="same")
plt.imshow(corr, cmap='hot')
I am trying to implement dimming method to dim a image by converting pixel values and using this equation X(new) = 0.5 * X(old)^2. I did some googling and but could not succeed. Here is my code:
import math
from PIL import Image
import numpy as np
from skimage import color, io
import matplotlib.pyplot as plt
def load(image_path):
out = plt.imread(image_path)
out = out.astype(np.float64) / 255
return out
def dim_image(image):
out = image.point(lambda x: x*0.5)
return out
def display(img):
# Show image
plt.figure(figsize = (5,5))
plt.imshow(img)
plt.axis('off')
plt.show()
image1 = load(image1_path)
image2 = load(image2_path)
display(image1)
display(image2)
new_image = dim_image(image1)
display(new_image)
You are trying to use .point on a numpy array, which doesn't exist. I assume you meant to reduce all color values by 50% to dim the image, in which case you should use np.dot (docs):
def dim_image(image):
out = np.dot(image, 0.5)
return out