ValueError: need at least one array to concatenate - python

I am having issues with
ValueError: need at least one array to concatenate
Below is the whole error message.
Training mode
Traceback (most recent call last):
File "bcf.py", line 342, in <module>
bcf.train()
File "bcf.py", line 321, in train
self._learn_codebook()
File "bcf.py", line 142, in _learn_codebook
feats_sc = np.concatenate(feats_sc, axis=1).transpose()
ValueError: need at least one array to concatenate
Below is the area of the problem.
def _learn_codebook(self):
MAX_CFS = 800 # max number of contour fragments per image; if above, sample randomly
CLUSTERING_CENTERS = 1500
feats_sc = []
for image in self.data.values():
feats = image['cfs']
feat_sc = feats[1]
if feat_sc.shape[1] > MAX_CFS:
# Sample MAX_CFS from contour fragments
rand_indices = np.random.permutation(feat_sc.shape[1])
feat_sc = feat_sc[:, rand_indices[:MAX_CFS]]
feats_sc.append(feat_sc)
feats_sc = np.concatenate(feats_sc, axis=1).transpose()
print("Running KMeans...")
self.kmeans = sklearn.cluster.KMeans(min(CLUSTERING_CENTERS, feats_sc.shape[0]), n_jobs=-1, algorithm='elkan').fit(feats_sc)
print("Saving codebook...")
self._save_kmeans(self.kmeans)
return self.kmeans
Below is the complete CLASS
class BCF():
def __init__(self):
self.DATA_DIR = "/Users/minniemouse/TRAIN/bcf-master5/data/cuauv/"
self.PERC_TRAINING_PER_CLASS = 0.5
self.CODEBOOK_FILE = "codebook.data"
self.CLASSIFIER_FILE = "classifier"
self.LABEL_TO_CLASS_MAPPING_FILE = "labels_to_classes.data"
self.classes = defaultdict(list)
self.data = defaultdict(dict)
self.counter = defaultdict(int)
self.kmeans = None
self.clf = None
self.label_to_class_mapping = None
def _load_classes(self):
for dir_name, subdir_list, file_list in os.walk(self.DATA_DIR):
if subdir_list:
continue
for f in sorted(file_list, key=hash):
self.classes[dir_name.split('/')[-1]].append(os.path.join(dir_name, f))
def _load_training(self):
for cls in self.classes:
images = self.classes[cls]
for image in images[:int(len(images) * self.PERC_TRAINING_PER_CLASS)]:
image_id = self._get_image_identifier(cls)
self.data[image_id]['image'] = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
if self.data[image_id]['image'] is None:
print("Failed to load " + image)
def _load_testing(self):
for cls in self.classes:
images = self.classes[cls]
for image in images[int(len(images) * self.PERC_TRAINING_PER_CLASS):]:
image_id = self._get_image_identifier(cls)
self.data[image_id]['image'] = cv2.imread(image, cv2.IMREAD_GRAYSCALE)
if self.data[image_id]['image'] is None:
print("Failed to load " + image)
def _load_single(self, image):
# Load single image data
self.data.clear()
image_id = self._get_image_identifier(None)
self.data[image_id]['image'] = image
def _save_label_to_class_mapping(self):
self.label_to_class_mapping = {hash(cls): cls for cls in self.classes}
with open(self.LABEL_TO_CLASS_MAPPING_FILE, 'wb') as out_file:
pickle.dump(self.label_to_class_mapping, out_file, -1)
def _load_label_to_class_mapping(self):
if self.label_to_class_mapping is None:
with open(self.LABEL_TO_CLASS_MAPPING_FILE, 'rb') as in_file:
self.label_to_class_mapping = pickle.load(in_file)
return self.label_to_class_mapping
def _normalize_shapes(self):
for (cls, idx) in self.data.keys():
image = self.data[(cls, idx)]['image']
# Remove void space
y, x = np.where(image > 50)
max_y = y.max()
min_y = y.min()
max_x = x.max()
min_x = x.min()
trimmed = image[min_y:max_y, min_x:max_x] > 50
trimmed = trimmed.astype('uint8')
trimmed[trimmed > 0] = 255
self.data[(cls, idx)]['normalized_image'] = trimmed
def _extract_cf(self):
for (cls, idx) in self.data.keys():
image = self.data[(cls, idx)]['normalized_image']
images,contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contour = sorted(contours, key=len)[-1]
mat = np.zeros(image.shape, np.int8)
cv2.drawContours(mat, [contour], -1, (255, 255, 255))
#self.show(mat)
MAX_CURVATURE = 1.5
N_CONTSAMP = 50
N_PNTSAMP = 10
C = None
for pnt in contour:
if C is None:
C = np.array([[pnt[0][0], pnt[0][1]]])
else:
C = np.append(C, [[pnt[0][0], pnt[0][1]]], axis=0)
cfs = self._extr_raw_points(C, MAX_CURVATURE, N_CONTSAMP, N_PNTSAMP)
tmp = mat.copy()
for cf in cfs:
for pnt in cf:
cv2.circle(tmp, (pnt[0], pnt[1]), 2, (255, 0, 0))
#self.show(tmp)
num_cfs = len(cfs)
print("Extracted %s points" % (num_cfs))
feat_sc = np.zeros((300, num_cfs))
xy = np.zeros((num_cfs, 2))
for i in range(num_cfs):
cf = cfs[i]
sc, _, _, _ = shape_context(cf)
# shape context is 60x5 (60 bins at 5 reference points)
sc = sc.flatten(order='F')
sc /= np.sum(sc) # normalize
feat_sc[:, i] = sc
# shape context descriptor sc for each cf is 300x1
# save a point at the midpoint of the contour fragment
xy[i, 0:2] = cf[np.round(len(cf) / 2. - 1).astype('int32'), :]
sz = image.shape
self.data[(cls, idx)]['cfs'] = (cfs, feat_sc, xy, sz)
def _learn_codebook(self):
MAX_CFS = 800 # max number of contour fragments per image; if above, sample randomly
CLUSTERING_CENTERS = 1500
feats_sc = []
for image in self.data.values():
feats = image['cfs']
feat_sc = feats[1]
if feat_sc.shape[1] > MAX_CFS:
# Sample MAX_CFS from contour fragments
rand_indices = np.random.permutation(feat_sc.shape[1])
feat_sc = feat_sc[:, rand_indices[:MAX_CFS]]
feats_sc.append(feat_sc)
feats_sc = np.concatenate(feats_sc, axis=1).transpose()
print("Running KMeans...")
self.kmeans = sklearn.cluster.KMeans(min(CLUSTERING_CENTERS, feats_sc.shape[0]), n_jobs=-1, algorithm='elkan').fit(feats_sc)
print("Saving codebook...")
self._save_kmeans(self.kmeans)
return self.kmeans
I have read through the various posts on ValueError already described, but I am not having much luck on figuring it out. I have now attached the CLASS and full error message information.
Please, can someone point out what I am missing?
Thank you

the problem comes from the lenght of your array. Check if your array/list is longer than to 0 print(len(feats_sc)).
Don't forget to checkout the documentation numpy.concatenate — NumPy v1.16 Manual

The problem seems to be in np.concatenate where it expects an array of arrays and it's not receiving that.
Refer: Scipy docs
numpy.concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
Parameters:
a1, a2, … : sequence of array_like The arrays must have
the same shape, except in the dimension corresponding to axis (the
first, by default).
axis : int, optional The axis along which the arrays will be joined.
If axis is None, arrays are flattened before use. Default is 0.
out : ndarray, optional If provided, the destination to place the
result. The shape must be correct, matching that of what concatenate
would have returned if no out argument were specified.
Returns: res : ndarray The concatenated array.
In your case, check what feats_sc contains.
You can debug using pdb
python -m pdb <your-code>.py
(pdb) b fullpath/to/your-code.py:line-number-to-break
(pdb) c
c will continue until break point in encountered
n will move to next line
b is to set break point
q is to quit

Just to make it clearer, running the following piece of code throws the same ValueError: need at least one array to concatenate error.
import numpy as np
feats_sc = np.array([])
feats_sc = np.concatenate(feats_sc, axis=1)
whereas the following code does not.
import numpy as np
feats_sc = np.array(([[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1 ,2 ,3]]))
feats_sc = np.concatenate(feats_sc, axis=1)
The reason is that in the former, the numpy array is empty, and in the latter, it is not.

Related

Valueerror for reshaping array for image entropy

I have a code for finding entropy info of an image by resizing the image and dividing it into its RGB channels.
import os
from PIL import Image
import numpy as np
from scipy.misc import imread
import cv2
import imageio
#读取RGB图像
def openRGB(image_path):
f = open(image_path,"rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data =openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2;
However, whenever I run the getentropy() function on a given image it keeps giving back this error
ValueError: cannot reshape array of size 37048 into shape (65536,3)
Any idea how I can reformat the image to fit that array shape?
There is a simple explanation: image_path length in bytes is only 37048.
When using np.array(data).reshape((256*256, 3)), the length of data must be 256*256*3 = 196608 bytes.
You are getting an exception because the lengths do not match.
It's simple to reproduce the problem.
Create an input sample file in size 196608 bytes, and there is no exception.
Create an input sample file in size 37048 bytes, get an exception.
Here is a code sample that reproduces the problem:
import os
#from PIL import Image
import numpy as np
#from scipy.misc import imread
#import cv2
#import imageio
def openRGB(image_path):
f = open(image_path, "rb")
data = f.read()
f.close()
data = [int(x) for x in data]
data = np.array(data).reshape((256*256, 3)).astype(np.uint8)
return data
def entropy(X):
n = len(X)
counts = np.bincount(X)
probs = counts[np.nonzero(counts)] / n
en = 0
for i in range(len(probs)):
en = en - probs[i] * np.log(probs[i])/np.log(2)
return en
def getEntropy(image_path):
data = openRGB(image_path)
data_B = data[:, 0]
data_G = data[:, 1]
data_R = data[:, 2]
B = entropy(data_B)
G = entropy(data_G)
R = entropy(data_R)
return (R+B+G)/2
# Create a binary file with random bytes
image_path = 'tmp.bin'
# When n_bytes=196608, there is no exception.
################################################################################
n_bytes = 256*256*3
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
# When n_bytes=37048, an exception is raised: ValueError: cannot reshape array of size 37048 into shape (65536,3)
################################################################################
n_bytes = 37048
tmp = np.random.randint(0, 255, n_bytes, np.uint8) # Build random array of n_bytes bytes
with open(image_path, 'wb') as f:
f.write(tmp) # Write tmp to a binary file
file_size_in_bytes = os.path.getsize(image_path)
print('file_size_in_bytes = ' + str(file_size_in_bytes))
res = getEntropy(image_path)
print(res)
################################################################################
Why are you reading 37048 bytes instead of 196608 bytes?
image_path is a JPEG image file, and you are reading the image using f = open(image_path, "rb") and data = f.read().
You may read and reshape the image as follows:
import cv2
def openRGB(image_path):
# For example: image_path = 'img.jpg'
img = cv2.imread(image_path) # Read image in BGR color format.
data = np.array(img).reshape(img.shape[0]*img.shape[1], 3) # Reshape to rows x cols x 3 (Blue in data[:, 0], green in data[:, 1], red in data[:, 2]).
return data
In the above example I used img.shape[0]*img.shape[1], the image resolution is:
height = img.shape[0]
width = img.shape[1]

could not broadcast input array from shape (20,310,310) into shape (20)

I'm trying to detect lung cancer nodules using DICOM files. The main steps in cancer detection included following steps.
1) Preprocessing
* Converting the pixel values to Hounsfield Units (HU)
* Resampling to an isomorphic resolution to remove variance in scanner resolution
*Lung segmentation
2) Training the data set using preprocessed images in Tensorflow CNN
3) Testing and validation
I followed few online tutorials to do this.
I need to combine the given solutions in
1) https://www.kaggle.com/gzuidhof/full-preprocessing-tutorial
2) https://www.kaggle.com/sentdex/first-pass-through-data-w-3d-convnet.
I could implement the example in link two. But since it is lack ok lung segmentation and few other preprocessing steps I need to combine the steps in link one with link two. But I'm getting number of errors while doing it. Since I'm new to python can someone please help me in solving it.
There are 20 patient folders and each patient folder has number of slices, which are dicom files.
For the process_data method , slices_path of each patient and patient number was sent.
def process_data(slices,patient,labels_df,img_px_size,hm_slices):
try:
label=labels_df.get_value(patient,'cancer')
patient_pixels = get_pixels_hu(slices)
segmented_lungs2, spacing = resample(patient_pixels, slices, [1,1,1])
new_slices=[]
segmented_lung = segment_lung_mask(segmented_lungs2, False)
segmented_lungs_fill = segment_lung_mask(segmented_lungs2, True)
segmented_lungs=segmented_lungs_fill-segmented_lung
#This method returns smallest integer not less than x.
chunk_sizes =math.ceil(len(segmented_lungs)/HM_SLICES)
for slice_chunk in chunks(segmented_lungs,chunk_sizes):
slice_chunk=list(map(mean,zip(*slice_chunk))) #list - []
#print (slice_chunk)
new_slices.append(slice_chunk)
print(len(segmented_lungs), len(new_slices))
if len(new_slices)==HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices)==HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices)==HM_SLICES+2:
new_val =list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1]=new_val
if len(new_slices)==HM_SLICES+1:
new_val =list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1]=new_val
print('LENGTH ',len(segmented_lungs), len(new_slices))
except Exception as e:
# again, some patients are not labeled, but JIC we still want the error if something
# else is wrong with our code
print(str(e))
#print(len(new_slices))
if label==1: label=np.array([0,1])
elif label==0: label=np.array([1,0])
return np.array(new_slices),label
Main method
# Some constants
#data_dir = '../../CT_SCAN_IMAGE_SET/IMAGES/'
#patients = os.listdir(data_dir)
#labels_df=pd.read_csv('../../CT_SCAN_IMAGE_SET/stage1_labels.csv',index_col=0)
#patients.sort()
#print (labels_df.head())
much_data=[]
much_data2=[]
for num,patient in enumerate(patients):
if num%100==0:
print (num)
try:
slices = load_scan(data_dir + patients[num])
img_data,label=process_data(slices,patients[num],labels_df,IMG_PX_SIZE,HM_SLICES)
much_data.append([img_data,label])
#much_data2.append([processed,label])
except:
print ('This is unlabeled data')
np.save('muchdata-{}-{}-{}.npy'.format(IMG_PX_SIZE,IMG_PX_SIZE,HM_SLICES),much_data)
#np.save('muchdata-{}-{}-{}.npy'.format(IMG_PX_SIZE,IMG_PX_SIZE,HM_SLICES),much_data2)
The preprocessing part works fine but when I'm trying to enter the final out put to a Convolutional NN and train the data set , Following is the error I'm receiving including some of the comments that I had put
0
shape hu
(113, 512, 512)
Resize factor
[ 2.49557522 0.6015625 0.6015625 ]
shape
(282, 308, 308)
chunk size
15
282 19
LENGTH 282 20
Tensor("Placeholder:0", dtype=float32)
..........1.........
..........2.........
..........3.........
..........4.........
WARNING:tensorflow:From C:\Research\Python_installation\lib\site-packages\tensorflow\python\util\tf_should_use.py:170: initialize_all_variables (from tensorflow.python.ops.variables) is deprecated and will be removed after 2017-03-02.
Instructions for updating:
Use `tf.global_variables_initializer` instead.
..........5.........
..........6.........
Epoch 1 completed out of 20 loss: 0
..........7.........
Traceback (most recent call last):
File "C:\Research\LungCancerDetaction\sendbox2.py", line 436, in <module>
train_neural_network(x)
File "C:\Research\LungCancerDetaction\sendbox2.py", line 424, in train_neural_network
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
File "C:\Research\Python_installation\lib\site-packages\tensorflow\python\framework\ops.py", line 606, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "C:\Research\Python_installation\lib\site-packages\tensorflow\python\framework\ops.py", line 3928, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "C:\Research\Python_installation\lib\site-packages\tensorflow\python\client\session.py", line 789, in run
run_metadata_ptr)
File "C:\Research\Python_installation\lib\site-packages\tensorflow\python\client\session.py", line 968, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "C:\Research\Python_installation\lib\site-packages\numpy\core\numeric.py", line 531, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not broadcast input array from shape (20,310,310) into shape (20)
I think it is the issue with the 'segmented_lungs=segmented_lungs_fill-segmented_lung'
In the working example,
segmented_lungs=[cv2.resize(each_slice,(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in patient_pixels]
Please help me in solving this. I'm unable to proceed since some time. If anything is not clear please let me know.
Following is the whole code that had tried.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dicom
import os
import scipy.ndimage
import matplotlib.pyplot as plt
import cv2
import math
import tensorflow as tf
from skimage import measure, morphology
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# Some constants
data_dir = '../../CT_SCAN_IMAGE_SET/IMAGES/'
patients = os.listdir(data_dir)
labels_df=pd.read_csv('../../CT_SCAN_IMAGE_SET/stage1_labels.csv',index_col=0)
patients.sort()
print (labels_df.head())
#Image pixel array watching
for patient in patients[:10]:
#label is to get the label of the patient. This is what done in the .get_value method.
label=labels_df.get_value(patient,'cancer')
path=data_dir+patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#You have dicom files and they have attributes.
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
print (len(slices),slices[0].pixel_array.shape)
#If u need to see many slices and resize the large pixelated 2D images into 150*150 pixelated images
IMG_PX_SIZE=50
HM_SLICES=20
for patient in patients[:1]:
#label is to get the label of the patient. This is what done in the .get_value method.
label=labels_df.get_value(patient,'cancer')
path=data_dir+patient
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#You have dicom files and they have attributes.
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
#This shows the pixel arrayed image related to the second slice of each patient
#subplot
fig=plt.figure()
for num,each_slice in enumerate(slices[:16]):
print (num)
y=fig.add_subplot(4,4,num+1)
#down sizing everything. Resize the imag size as their pixel values are 512*512
new_image=cv2.resize(np.array(each_slice.pixel_array),(IMG_PX_SIZE,IMG_PX_SIZE))
y.imshow(new_image)
plt.show()
print (len(patients))
###################################################################################
def get_pixels_hu(slices):
image = np.array([s.pixel_array for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
return np.array(image, dtype=np.int16)
#The next problem is each patient is got different number of slices . This is a performance issue.
# Take the slices and put that into a list of slices and chunk that list of slices into fixed numer of
#chunk of slices and averaging those chunks.
#yield is like 'return'. It returns a generator
def chunks(l,n):
for i in range(0,len(l),n):
#print ('Inside yield')
#print (i)
yield l[i:i+n]
def mean(l):
return sum(l)/len(l)
def largest_label_volume(im, bg=-1):
vals, counts = np.unique(im, return_counts=True)
counts = counts[vals != bg]
vals = vals[vals != bg]
if len(counts) > 0:
return vals[np.argmax(counts)]
else:
return None
def segment_lung_mask(image, fill_lung_structures=True):
# not actually binary, but 1 and 2.
# 0 is treated as background, which we do not want
binary_image = np.array(image > -320, dtype=np.int8)+1
labels = measure.label(binary_image)
# Pick the pixel in the very corner to determine which label is air.
# Improvement: Pick multiple background labels from around the patient
# More resistant to "trays" on which the patient lays cutting the air
# around the person in half
background_label = labels[0,0,0]
#Fill the air around the person
binary_image[background_label == labels] = 2
# Method of filling the lung structures (that is superior to something like
# morphological closing)
if fill_lung_structures:
# For every slice we determine the largest solid structure
for i, axial_slice in enumerate(binary_image):
axial_slice = axial_slice - 1
labeling = measure.label(axial_slice)
l_max = largest_label_volume(labeling, bg=0)
if l_max is not None: #This slice contains some lung
binary_image[i][labeling != l_max] = 1
binary_image -= 1 #Make the image actual binary
binary_image = 1-binary_image # Invert it, lungs are now 1
# Remove other air pockets insided body
labels = measure.label(binary_image, background=0)
l_max = largest_label_volume(labels, bg=0)
if l_max is not None: # There are air pockets
binary_image[labels != l_max] = 0
return binary_image
#Loading the files
#Load the scans in given folder path
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def resample(image, scan, new_spacing=[1,1,1]):
# Determine current pixel spacing
spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)
resize_factor = spacing / new_spacing
new_real_shape = image.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize_factor = new_shape / image.shape
new_spacing = spacing / real_resize_factor
print ('Resize factor')
print (real_resize_factor)
image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')
print ('shape')
print (image.shape)
return image, new_spacing
'''def chunks(l,n):
for i in range(0,len(l),n):
#print ('Inside yield')
#print (i)
yield l[i:i+n]
def mean(l):
return sum(l)/len(l)'''
#processing data
def process_data(slices,patient,labels_df,img_px_size,hm_slices):
#for patient in patients[:10]:
#label is to get the label of the patient. This is what done in the .get_value method.
try:
label=labels_df.get_value(patient,'cancer')
print ('label process data')
print (label)
#path=data_dir+patient
#slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
#You have dicom files and they have attributes.
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
#This shows the pixel arrayed image related to the second slice of each patient
patient_pixels = get_pixels_hu(slices)
print ('shape hu')
print (patient_pixels.shape)
segmented_lungs2, spacing = resample(patient_pixels, slices, [1,1,1])
#print ('Pix shape')
#print (segmented_lungs2.shape)
#segmented_lungs=np.array(segmented_lungs2).tolist()
new_slices=[]
segmented_lung = segment_lung_mask(segmented_lungs2, False)
segmented_lungs_fill = segment_lung_mask(segmented_lungs2, True)
segmented_lungs=segmented_lungs_fill-segmented_lung
#print ('length of segmented lungs')
#print (len(segmented_lungs))
#print ('Shape of segmented lungs......................................')
#print (segmented_lungs.shape)
#print ('hiiii')
#segmented_lungs=[cv2.resize(each_slice,(IMG_PX_SIZE,IMG_PX_SIZE)) for each_slice in segmented_lungs3]
#print ('bye')
#print ('length of slices')
#print (len(slices))
#print ('shape of slices')
#print (slices.shape)
#print (each_slice.pixel_array)
#This method returns smallest integer not less than x.
chunk_sizes =math.ceil(len(segmented_lungs)/HM_SLICES)
print ('chunk size ')
print (chunk_sizes)
for slice_chunk in chunks(segmented_lungs,chunk_sizes):
slice_chunk=list(map(mean,zip(*slice_chunk))) #list - []
#print (slice_chunk)
new_slices.append(slice_chunk)
print(len(segmented_lungs), len(new_slices))
if len(new_slices)==HM_SLICES-1:
new_slices.append(new_slices[-1])
if len(new_slices)==HM_SLICES-2:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices)==HM_SLICES-3:
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
new_slices.append(new_slices[-1])
if len(new_slices)==HM_SLICES+2:
new_val =list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1]=new_val
if len(new_slices)==HM_SLICES+1:
new_val =list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1]=new_val
if len(new_slices)==HM_SLICES+3:
new_val =list(map(mean, zip(*[new_slices[HM_SLICES-1],new_slices[HM_SLICES],])))
del new_slices[HM_SLICES]
new_slices[HM_SLICES-1]=new_val
print('LENGTH ',len(segmented_lungs), len(new_slices))
except Exception as e:
# again, some patients are not labeled, but JIC we still want the error if something
# else is wrong with our code
print(str(e))
#print(len(new_slices))
if label==1: label=np.array([0,1])
elif label==0: label=np.array([1,0])
return np.array(new_slices),label
# Some constants
#data_dir = '../../CT_SCAN_IMAGE_SET/IMAGES/'
#patients = os.listdir(data_dir)
#labels_df=pd.read_csv('../../CT_SCAN_IMAGE_SET/stage1_labels.csv',index_col=0)
#patients.sort()
#print (labels_df.head())
much_data=[]
much_data2=[]
for num,patient in enumerate(patients):
if num%100==0:
print (num)
try:
slices = load_scan(data_dir + patients[num])
img_data,label=process_data(slices,patients[num],labels_df,IMG_PX_SIZE,HM_SLICES)
much_data.append([img_data,label])
#much_data2.append([processed,label])
except:
print ('This is unlabeled data')
np.save('muchdata-{}-{}-{}.npy'.format(IMG_PX_SIZE,IMG_PX_SIZE,HM_SLICES),much_data)
#np.save('muchdata-{}-{}-{}.npy'.format(IMG_PX_SIZE,IMG_PX_SIZE,HM_SLICES),much_data2)
IMG_SIZE_PX = 50
SLICE_COUNT = 20
n_classes=2
batch_size=10
x = tf.placeholder('float')
y = tf.placeholder('float')
keep_rate = 0.8
def conv3d(x, W):
return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME')
def maxpool3d(x):
# size of window movement of window as you slide about
return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='SAME')
def convolutional_neural_network(x):
# # 5 x 5 x 5 patches, 1 channel, 32 features to compute.
weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])),
# 5 x 5 x 5 patches, 32 channels, 64 features to compute.
'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])),
# 64 features
'W_fc':tf.Variable(tf.random_normal([54080,1024])),
'out':tf.Variable(tf.random_normal([1024, n_classes]))}
biases = {'b_conv1':tf.Variable(tf.random_normal([32])),
'b_conv2':tf.Variable(tf.random_normal([64])),
'b_fc':tf.Variable(tf.random_normal([1024])),
'out':tf.Variable(tf.random_normal([n_classes]))}
# image X image Y image Z
x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])
conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])
conv1 = maxpool3d(conv1)
conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])
conv2 = maxpool3d(conv2)
fc = tf.reshape(conv2,[-1, 54080])
fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])
fc = tf.nn.dropout(fc, keep_rate)
output = tf.matmul(fc, weights['out'])+biases['out']
return output
much_data = np.load('muchdata-50-50-20.npy')
# If you are working with the basic sample data, use maybe 2 instead of 100 here... you don't have enough data to really do this
train_data = much_data[:-4]
validation_data = much_data[-4:]
def train_neural_network(x):
print ('..........1.........')
prediction = convolutional_neural_network(x)
print ('..........2.........')
#cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction,labels=y))
print ('..........3.........')
optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)
print ('..........4.........')
hm_epochs = 20
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
successful_runs = 0
total_runs = 0
print ('..........5.........')
for epoch in range(hm_epochs):
epoch_loss = 0
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
epoch_loss += c
successful_runs += 1
except Exception as e:
# I am passing for the sake of notebook space, but we are getting 1 shaping issue from one
# input tensor. Not sure why, will have to look into it. Guessing it's
# one of the depths that doesn't come to 20.
pass
#print(str(e))
print ('..........6.........')
print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)
print ('..........7.........')
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('Done. Finishing accuracy:')
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
print('fitment percent:',successful_runs/total_runs)
print (x)
# Run this locally:
train_neural_network(x)
P.S : resample() , segment_lung_mask() methods can be found from link 1.
For training you have
for data in train_data:
total_runs += 1
try:
X = data[0]
Y = data[1]
_, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})
So x and y are, respectively, the first two elements of a single row of train_data.
However, when calculating the accuracy you have
print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))
So x is the first element of all rows of validation_data, which gives it dimensions of (20,310,310), which can't be broadcast to a placeholder of dimension (20). Ditto for y. (Broadcasting means that if you gave it a tensor of dimensions (20, 310) it would know to take each of the 310 columns and feed it to the placeholder separately. It can't figure out what to do with a tensor of (20, 310, 310).)
Incidentally, when you declare your placeholders it's a good idea to specify their dimensions, using None for the dimension depending on the number of separate examples. This way the program can warn you when dimensions don't match up.
The error message seems to indicate that the placeholder tensors x and y have not been defined correctly. They should have the same shape as the input values X = data[0] and Y = data[1], such as
x = tf.placeholder(shape=[20,310,310], dtype=tf.float32)
# if y is a scalar:
y = tf.placeholder(shape=[], dtype=tf.float32)

Python numpy ValueError when I load image data and transfer it to an array

I couldn't find the solution.My image shape is 128*128*3,it has three channel,but it also cause the error
File "E:/ML/keras_test/vgg.py", line 30, in load_data data[i,:,:,:] = arr
ValueError: could not broadcast input array from shape (128,128) into
shape (128,128,3)
My code as below:
def load_data(path):
data = np.empty((12755,128,128,3),dtype="uint8")
label = np.empty((12755,),dtype="uint8")
imgs = []
imgs_name = []
for each_person in os.listdir(path):
temp = os.path.join(path,each_person)
for each_image in os.listdir(temp):
imgs.append(temp + "\\" + each_image)
imgs_name.append(each_image)
num = len(imgs)
for i in range(num):
img = Image.open(imgs[i])
arr = np.asarray(img,dtype="uint8")
print arr.shape
data[i,:,:,:] = arr
label[i] = int(imgs_name[i].split('.')[0])
print 'load_data is ok!' + str(data.shape[0])
return data,label
You are trying to put the original data into a small package, which is impossible. I think you are trying to transfer a image which has RGB channel into a gray scale image which has one channel. Try
datum = (imgs.sum(axis=2) / 3).reshape((12755, -1))
The resulting datum is a 12755 x 16384 array.

TypeError message in Pythons PIL (v2.6): integer expected, got float

first time I'm in the forum. Hope I'm specific enough.
Using ImageChops inside PIL, I'm trying to multiply two images (both mode="L") but I always get the same error message. I've looked everywhere but couldn't find anything useful. I'd greatly appreciate any helpful ideas!
The relevant part of the code is attached.
def point(self, f, searchImage, technique): # technique - inpaint or bicubic
dimx, dimy = searchImage.size
reader = csv.reader(f)
for line in reader: #f.readlines():
coord = line
print coord
if searchImage.size[0] > float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,float(coord[1])+95.5) # left upper right
elif searchImage.size[0] < float(coord[0])+95.5 and searchImage.size[1]\
> float(coord[1])+95.5:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
searchImage.size[0]-0.5,float(coord[1])+95.5) # size of box
# depends on pixel size. A pixel size of 14 micrometer results in a
# cross size of 189 pixels
else:
box = (float(coord[0])-93.5,float(coord[1])-93.5,\
float(coord[0])+95.5,searchImage.size[1]-0.5)
box = (math.floor(box[0]), math.floor(box[1]), math.floor(box[2]),\
math.floor(box[3]))
searchCrop = searchImage.crop(box)
c_x = int(float(coord[1]))
c_y = int(float(coord[0]))
abst_y = c_x - int(math.floor(box[1])) - 1 # x shift
center = num.asarray(searchImage)[c_x,c_y]
if center == 0:
center = center + 0.00001 # to avoid division by zero
val = [num.asarray(searchImage)[c_x-1,c_y+1], num.asarray(searchImage)\
[c_x-1,c_y-1], num.asarray(searchImage)[c_x+1,c_y-1], \
num.asarray(searchImage)[c_x+1,c_y+1]] # ERDAS upper right,
# upper left, lower left, lower right
val_dict = {0:1,1:-1,2:-1,3:1}
flag = val_dict[val.index(min(val))]
if float(min(val))/center > 2. or min(val) > 100:
flag = 0
newima = num.zeros( (searchCrop.size[1], searchCrop.size[0]),\
dtype = "float")
Ayo = num.array(int(searchCrop.size[0])*[255])
Ay = num.array((abst_y + flag)*[255] + 3*[0] + ((int(searchCrop.size[0]\
)-3-abst_y)-flag)*[255])
Ax = num.array(int(searchCrop.size[0])*[0])
Kx = num.array(3*[Ayo] + ((int(searchCrop.size[1])-9)/2+flag)*[Ay] + 3*[Ax] \
+ ((int(searchCrop.size[1])-9)/2-flag)*[Ay] + 3*[Ayo])
Kxlist = list(itertools.chain(*Kx))
i=0
for y in range(int(searchCrop.size[1])):
for x in range(int(searchCrop.size[0])):
newima[y,x] = Kxlist[i+y+x]
i=i+x
kernel = Image.fromarray(newima)
kernel = kernel.convert(mode="L")
# -----
modified = ImageChops.multiply(searchCrop,kernel) # Results in an image
# where the pixels along the cross axes will get a value of 0
# ---
The error message is the following:
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 58, in
crossInterpolation filledImage = self.p_model.point(f, searchImage, method)
File "D:\GIS_dbase\Data\hma_cci\hexagon\KH9_Python\interpolate_cross.py", line 207, in
point modified = ImageChops.multiply(searchCrop,kernel) # Results in an image where
the pixels along the cross axes will get a value of 0
File "C:\Python27\lib\site-packages\PIL\ImageChops.py", line 119, in multiply
image1.load()
File "C:\Python27\lib\site-packages\PIL\Image.py", line 1730, in load
self.im = self.im.crop(self.__crop)
TypeError: integer argument expected, got float
The issue is that PIL's crop method takes a tuple of 4 integer values but you are passing it floats. This should work:
box = tuple([int(math.floor(x)) for x in box])

Chromatic Aberration Estimation in python

Hi this code estimates chromatic aberration in an image by giving the center of distortion (x,y) and magnitude of distortion (alpha) between the red and green channels and also between the blue and green channels. I have an error in the WarpRegion function
File "CAfeb.py", line 217, in warpRegion
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
File "/usr/lib/python2.7/dist-packages/scipy/interpolate/interpolate.py", line 109, in __init__
'quintic' : 5}[kind]
TypeError: unhashable type: 'numpy.ndarray'
Below is the complete code - Any help will be greatly appreciated-Thank you. Areej
import math
from PIL import Image
import numpy as np
from decimal import Decimal
import scipy as sp
from scipy import interpolate
from scitools.std import ndgrid
from scipy import ogrid, sin, mgrid, ndimage, array
def ldimage():
#load image
global im
im = Image.open("/home/areej/Desktop/mandril_color.tif")
def analyzeCA(mode, im):
n_regions = 10;
reg_size = [300, 300];
overlap = 0.5;
levels = 9;
steps = 2;
edge_width = 10;
hist_sz = 128;
# alpha_1 and alpha_2 are assumed to be between these values
w_data = [0.9985, 1.0015];
reg_list=[]
#creating an array of pixels so that we can access them
pix=im.load()
#
#Analyze full image
if mode=='full':
print "Doing a full analysis"
# mx_shift is the third argument in 'full' mode
mx_shift = n_regions;
# [ydim,xdim,zdim]= size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim=3
print "Image dimensions: [ydim, xdim, zdim]= "+str([ydim,xdim,zdim])
global alpha_mx, alpha_mn
alpha_mx = 1 + 4*mx_shift / math.sqrt( xdim*xdim + ydim*ydim );
alpha_mn = 1.0/alpha_mx;
print "alpha_mx= "+str(alpha_mx)
print "alpha_mn= "+str(alpha_mn)
#recompute alpha_1 and alpha_2 to be between
#these new values
w_data = [alpha_mn, alpha_mx];
ew = edge_width;
#take the image minus a ew-wide edge
roi = [ew+1, xdim-ew, ew+1, ydim-ew];
print "edge_width= "+str(ew)
print "roi= "+str(roi)
#Analyze blue to green chromatic aberration
bg_params = parameterSearch( im, [3, 2], roi, ew, hist_sz, w_data);
# Analyze red to green chromatic aberration
rg_params = parameterSearch( im, [1, 2], roi, ew, hist_sz, w_data );
elif mode=='reg':
print "we should do a regional analysis here"
else:
print "unsupported call"
#def estimateCARegions( im, [3, 2], reg_list, settings ):
def parameterSearch( im, colour_space, roi, ew, hist_sz, w_data):
#levels is number of iterations
levels = 8;
steps = 2;
#[ydim,xdim,zdim] = size(im);
ydim=im.size[0]
xdim=im.size[1]
zdim= 3
x_data = [1, xdim];
y_data = [1, ydim];
xlim = x_data;
ylim = y_data;
zlim = w_data;
#work out which of height and width is the bigger
dim = max(xdim,ydim)
print "The highest dimension is : "+str(dim)
#check that roi falls within expected boundries
if ((roi[0] <= ew) or (roi[1] > xdim-ew) or (roi[2] <= ew) or (roi[3] > ydim-ew)):
print "ROI is too close to image edges"
return -1 # TODO: terminate here with an error
#Get image regions
source = im.split()
Cfixed = source[2]
Cwarp = source[1]
#[ydim,xdim,zdim] = size(im);
ydimCwarp=Cwarp.size[0]
xdimCwarp=Cwarp.size[1]
print 'xdimCwarp'+str(xdimCwarp)
roi_pad = [roi[0]-ew, roi[1]+ew, roi[2]-ew, roi[3]+ew];
for levels in range(1,8):
#Guess at a center and then compute best warp
#user defined function linear_space used to generate linearly spaced vectors
x_coords = np.linspace(0,511,steps+2)
y_coords = np.linspace(0,511,steps+2)
z_coords = np.linspace(alpha_mn,alpha_mx,steps+2)
step_x=(xlim[1]-xlim[0])/(steps+1)
start_x=xlim[0]+step_x
end_x=xlim[1]-step_x+0.5
step_y=(ylim[1]-ylim[0])/(steps+1)
start_y=ylim[0]+step_y
end_y=ylim[1]-step_y+0.5
step_z=(zlim[1]-zlim[0])/(steps+1)
start_z=zlim[0]+step_z
fudge_z=step_z/2.0
end_z=zlim[1]-step_z+fudge_z
#Do not include end points in search;
centers_x, centers_y, warps= np.mgrid[start_x:end_x:step_x,start_y:end_y:step_y,start_z:end_z:step_z]
centers_x=centers_x.flatten()
centers_y=centers_y.flatten()
warps=warps.flatten()
mi = np.zeros(centers_x.size)
for k in range(0,centers_x.size):
cx = centers_x[k]
cy = centers_y[k]
wz = warps[k]
#Warp the region
temp_im = warpRegion(Cwarp, roi_pad, [cx, cy, wz])
#correlation
mi[k] = np.corrcoef(Cfixed, temp_im)
#Now pick the best quadrant
v, max_ix = math.max(mi)
ix, jx, kx = arrayInd(mi.size, max_ix);
##The coordinates of err are off by 1 from x_coords and y_coords because
##we did not include the end point
xlim = x_coords([jx, jx+2]);
ylim = y_coords([ix, ix+2]);
zlim = z_coords([kx, kx+2]);
cx = math.mean(xlim);
cy = math.mean(ylim);
wz = math.mean(zlim);
print "x= "+str(cx)
print "y= "+str(cy)
print "z= "+str(wz)
def warpRegion(Cwarp, roi_pad, (cx, cy, wz)):
#Unpack region indices
sx, ex, sy, ey = roi_pad
xramp, yramp = np.mgrid[sx:ex+1, sy:ey+1]
xrampc = xramp - cx;
yrampc = yramp - cy;
xramp1 = 1/wz*xrampc;
yramp1 = 1/wz*yrampc;
xrampf = xrampc.flatten()
yrampf = yrampc.flatten()
xramp1f = xramp1.flatten()
yramp1f = yramp1.flatten()
reg_w = sp.interpolate.interp2d(yrampf,xrampf,Cwarp, yramp1f, xramp1f,'cubic');
ldimage()
analyzeCA('full', im)
As DSM states correctly this is not the correct calling syntax for interp2d which can be viewed at scipy.interp2d. If you would read the calling syntax and then your error message again (or the module itself whichever you prefer) you would recognize that you are trying to use an array as index for a dictionary which will naturally throw an exception.
I think what you are trying to do is an interpolation of the grid given by the arrays xrampf, yrampf at the new positions xrampf1, yrampf1. The scipy documentation also gives an exact same usage example which translate as following to your code:
interp_func = sp.interpolate.interp2d(yrampf, xrampf, Cwarp, kind='cubic')
reg_w = interp_func(yramp1f, xramp1f)
I hope that was your intention to do.
Kind regards

Categories