Multi Class and Multi Label Simple Model - python

I have been working for a few weeks to complete a project. The project seems simple , but there's a lot of moving parts. So
- Create a simple CNN model that has 4 input images. The images are of trees. Images are the trunk of a tree, branches , leafs and the roots of the tree.
- There are 8 different trees.
- The goal feed the model the four images and have the model detect which tree it is.
- I must Create the raw data from google images (search)
As I work through this problem for a school project , I am struggling on how to pre-process the images for the hot-label and multi class. Then insure my model architecture is accurate.
# import the necessary packages
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense , LSTM, concatenate, Input, Flatten
# define four inputs ( Different photos of trees)
inputA = Input(shape=(308, 308, 1)) # Trunk
inputB = Input(shape=(308, 308, 1)) # Branch
inputC = Input(shape=(308, 308, 1)) # Leafs
inputD = Input(shape=(308, 308, 1)) # Roots
# the first branch operates on the first input
a = Conv2D(16, (2, 2), activation='relu')(inputA)
a = Conv2D(16, (2, 2), activation='relu')(a)
a = Model(inputs=inputA, outputs=a)
# the second branch opreates on the second input
b = Conv2D(16, (2, 2), activation='relu')(inputB)
b = Conv2D(16, (2, 2), activation='relu')(b)
b = Model(inputs=inputB, outputs=b)
# the third branch opreates on the third input
c = Conv2D(16, (2, 2), activation='relu')(inputC)
c = Conv2D(16, (2, 2), activation='relu')(c)
c = Model(inputs=inputC, outputs=c)
# the fourth branch opreates on the fourth input
d = Conv2D(16, (2, 2), activation='relu')(inputD)
d = Conv2D(16, (2, 2), activation='relu')(d)
d = Model(inputs=inputD, outputs=d)
# combine the output of the four branches
combined = concatenate([a.output, b.output, c.output, d.output])
# apply a FC layer and then a regression prediction on the
# combined outputs
z = Dense(128, activation="relu")(combined)
z = Dense(4, activation="softmax")(z)
# our model will accept the inputs of the four branches and then output a single value
model = Model(inputs=[a.input, b.input, c.input, d.input], outputs=z)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer="adam", metrics=['accuracy'])
This is how I have been trying to hot-label for pre-processing data.
from tqdm import tqdm
import tensorflow as tf
import matplotlib.pyplot as plt
%matplotlib inline
train_data = ""
test_data = ""
def one_hot_label(img):
label = img.split(".")[0]
if label == "Trunk";
ohl = np.array([1,0,0,0])
elif label == "Branch":
ohl = np.array([0,1,0,0])
elif label == "Leaf":
ohl = np.array([0,0,1,0])
elif label == "Root":
ohl = np.array([0,0,0,1])
return ohl
def train_data_with_label();
train_images = []
for i in tqdm(os.listdir(train_data)):
path = os.path.join(train_data, i)
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (308, 308))
train_images.append([np.array(img), one_hot_label(i)])
shuffle(train_images)
return train_images
def test_data_with_label();
test_images = []
for i in tqdm(os.listdir(test_data)):
path = os.path.join(test_data, i)
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (308, 308))
test_images.append([np.array(img), one_hot_label(i)])
shuffle(test_images)
return test_images
This is a huge post. I know I am a mess and really struggling. When you run the data its accuracy is so low , I know something is wrong. (around 18%)
If anyone can help !! pleaseeee . It doesnt have to be the exact answer even a direction is helpful.
I am grateful for this site and the people on it.
Thanks
Noob Coder Dyl

Related

fix long text and distorted Text in manim

I have a long text in manim and it renderes very distorted. in fact, it not readable when it renders.
can you fix it please
# fast test for manim
from manim import *
from manim_ml.neural_network.layers import FeedForwardLayer
from manim_ml.neural_network.neural_network import NeuralNetwork
config.pixel_height = 900
config.pixel_width = 1400
config.frame_height = 7.0
config.frame_width = 7.0
class test(Scene):
def construct(self):
# Make the text
NN_text ="""
import keras
from keras.models import Sequential
from keras.layers import Dense
model = Sequential ()
n_cols = concrete_data. shape [1]
model.add (Dense (5, activation=\'relu\',
model.add (Dense(5, activations'relu\', input_shape=(n_ (cols, )))
model.add (Dense (1))
model.compile (optimizer=\'adam\', loss=\'mean_squared_error\')
model.fit (predictors, target)
predictions = model.predict(test_data)}
"""
desc = Text(NN_text,font_size=7,
t2c={"import": YELLOW, "from":RED,"add":GREEN,"model":BLUE,"compile":PURPLE,"fit":YELLOW,"predict":RED},
disable_ligatures=True,)
desc=desc.next_to(ORIGIN)
self.add(desc)
self.play(Write(desc))
The screenshot of rendered file
proper text with nice alignment.
There is a function code in manim for handling the text for codes.
NN_text ="""
import keras
from keras.models import Sequential
from keras. layers import Dense
model = Sequential ()
n_cols = concrete_data. shape [1]
model. add (Dense (5, activation='relu',
model. add (Dense(5, activations' reluj, input_shape=(n_ (cols, )))
model.add(Dense (1))
model. compile (optimizer='adam', loss='mean_squared_error')
model.fit (predictors, target)
predictions = model.predict(test_data)
"""
class code(Scene):
def construct(self):
codeText = Code(
code = NN_text,
tab_width=4,
background_stroke_width=1,
background_stroke_color=WHITE,
insert_line_no=False,
style=Code.styles_list[15],
background="window",
language="python",
font="consolas",
font_size=18,
)
self.play(Write(codeText), run_time=5)
self.wait()
for obj in codeText[2]:
self.play(Wiggle(obj))

InvalidArgumentError while Building a deep-CNN [duplicate]

This question already has an answer here:
InvalidArgumentError: Received a label value of 8825 which is outside the valid range of [0, 8825) SEQ2SEQ model
(1 answer)
Closed last year.
I’m new to TensorFlow and python...
I’m trying to build a deep CNN for cell image classification Hep-2 dataset. The data set consists of 13596 images and I’m using 8701 images as my training data for CNN. Also, I have.CSV file which consists of image ID and its cell-type. I extracted the content and using image_ID from .CSV file as my labels. Both training data and Image ID has been converted to .astype(‘float32’). But, somehow I’m getting InvalidArgumentError which I have no idea what’s going on in there.
I’ve posted my code and error, any tips or help would be highly appreciated. Thank you in advance :)
I'm new to Stack Overflow as well. sorry for my messy formatting.
My CODE:
from PIL import Image
import glob
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
from keras.optimizers import SGD
def extract_labels(image_names, Original_Labels):
temp = np.array([image.split('.')[0] for image in image_names])
temp2 = np.array([j[0] for i in temp for j in Original_Labels if(int(i) == int(j[0]))])
return temp2
def get_Labels():
df=pd.read_csv('gt_training.csv', sep=',')
labels = np.asarray(df)
path = 'path..../training/'
image_names_train = [f for f in os.listdir(path) if os.path.splitext(f)[-1] == '.png']
return labels, image_names_train
Train_images = glob.glob('path.../training/*.png')
train_data = np.array([np.array(Image.open(fname)) for fname in Train_images])
train_data = train_data.astype('float32')
train_data /= 255
#getting labels from .csv file for training data
labels, image_names_train = get_Labels()
train_labels = extract_labels(image_names_train, labels)
train_labels = train_labels.astype('float32')
print(train_labels.shape)
train_data = train_data.reshape(train_data.shape[0],78,78,1) #reshaping into 4-Dim
input_shape = (78, 78, 1) #1 because the provided dataset is in grey scale
#Adding pooling, dense layers to an an non-optimized empty CNN
model = Sequential()
model.add(Conv2D(6, kernel_size=(7,7),activation = tf.nn.tanh, input_shape = input_shape))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Conv2D(16, kernel_size=(4,4),activation = tf.nn.tanh))
model.add(MaxPooling2D(pool_size = (3, 3)))
model.add(Conv2D(32, kernel_size=(3,3),activation = tf.nn.tanh))
model.add(MaxPooling2D(pool_size = (3, 3)))
model.add(Flatten())
model.add(Dense(150, activation = tf.nn.tanh, kernel_regularizer = keras.regularizers.l2(0.00005)))
model.add(Dropout(0.5))
model.add(Dense(6, activation = tf.nn.softmax))
#setting an optimizer with a given loss function
opt = SGD(lr = 0.01, momentum = 0.9)
model.compile(optimizer = opt, loss = 'sparse_categorical_crossentropy', metrics = ['accuracy'])
model.fit(x = train_data, y = train_labels, epochs = 10, batch_size = 77)
The error message I got:
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
InvalidArgumentError: Received a label value of 13269 which is outside the valid range of [0, 6). Label values: 8823 3208 9410 5223 8817 3799 6588 1779 1371 5017 9788 9886 3345 1815 5943 37 675 2396 4485 9528 11082 12457 13269 5488 3250 12896 13251 1854 10942 6287 6232 2944
[[node loss_24/dense_55_loss/sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits (defined at C:\Users\vardh\Anaconda3\envs\tf\lib\site-packages\keras\backend\tensorflow_backend.py:3009) ]] [Op:__inference_keras_scratch_graph_676176]
Function call stack:
keras_scratch_graph
Somehow I realized that my question is related to this Question InvalidArgumentError: Received a label value of 8825.....
Solution From that post:
#shaili posted,
In the last layer for eg you used model.add(Dense(1, activation='softmax')). Here 1 restricts its value from [0, 1) change its shape to the maximum output label. For eg your output is from label [0,7) then use model.add(Dense(7, activation='softmax'))
input_text = Input(shape=(max_len,), dtype=tf.string)
embedding = Lambda(ElmoEmbedding, output_shape=(max_len, 1024))(input_text)
x = Bidirectional(LSTM(units=512, return_sequences=True,
recurrent_dropout=0.2, dropout=0.2))(embedding)
x_rnn = Bidirectional(LSTM(units=512, return_sequences=True,
recurrent_dropout=0.2, dropout=0.2))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
out = TimeDistributed(Dense(n_tags, activation="softmax"))(x)
Here in TimeDistributed layer n_tags is the length of tags from which I want to classify.
If I predict some other quantity such as q_tag whose length is different from n_tags i.e suppose 10 and length of n_tags is 7 and I received 8 as output label it will give the invalid argument error Received a label value of 8 which is outside the valid range of [0, 7).
From my experience,
Usually, this error generates due to the, no of classifications to be classified, inaccurately. Here in my code, model.add(Dense(6, activation = tf.nn.softmax) I gave 6 types of classification to be generated instead of 13596. However, this is not a fully working code, at least it gets my code running.

Keras Graph disconnected

Well, I have a problem setting up a network consisting of a CNN + Autoencoder for a classification task. The main idea is to use CNN-generated embedding as the input of an autoencoder for the embedding reconstruction process. Well, I was able to define both architectures, but I couldn't merge them into a single graph.
def autoencoder(cnn_out):
xreal = keras.layers.Input(tensor=cnn_out)
(...)
xhat = keras.layers.Dense(cnn_out.shape[1], activation='sigmoid')(dec)
ae = keras.models.Model(inputs=xreal, outputs=xhat)
loss_mse = mse_loss(xreal, xhat)
ae.add_loss(loss_mse)
return ae
def cnnae_model(input_shape):
h1 = keras.layers.Conv2D(8,strides=(1,1), kernel_size=kernel, kernel_regularizer=r.l2(kl), padding='same')(X)
(...)
h5 = keras.layers.AveragePooling2D(pool_size = (2, 2))(h5)
xreal = keras.layers.Flatten()(h5)
cnn = keras.models.Model(inputs=X, outputs=xreal)
cnn_ae = keras.models.Model(inputs=cnn.input, outputs=autoencoder(cnn.output).output)
return cnn_ae
input_shape = (128, 64, 3)
model = cnnae_siamesa(input_shape)
model.compile(loss=contrastve_loss,bacth_size = 16, optimizer=rms, metrics=[accuracy], callbacks=[reduce_lr])
The following error message appears when I try to compile the model:
ValueError: Graph disconnected: cannot obtain value for tensor Tensor("flatten_11/Identity:0", shape=(None, 2048), dtype=float32) at layer "input_50". The following previous layers were accessed without issue: []
I did some modifications to your code and produced a working version (one without the error you reported). There are a few changes that have to do with how the output layers are called when connecting up the different submodels, but hopefully you can relate it back to your original model. There is some additional information here that might help clarify: https://www.tensorflow.org/guide/keras/functional#using_the_same_graph_of_layers_to_define_multiple_models. I hope this helps. :
import tensorflow as tf
import numpy as np
print(tf.__version__)
tf.keras.backend.clear_session()
# Code with issue:
def autoencoder(cnn_out):
xreal = cnn_out # tf.keras.layers.Input(tensor=cnn_out)
dec = xreal
xhat = tf.keras.layers.Dense(cnn_out.shape[1], activation='sigmoid', name='AE_Dense')(dec)
# ae = tf.keras.models.Model(inputs=xreal, outputs=xhat, name='AE_Model')
# loss_mse = mse_loss(xreal, xhat)
# ae.add_loss(loss_mse)
return xhat # return last layer of model
def cnnae_model(input_shape):
#CNN model start:
X = tf.keras.layers.Input(input_shape, name='CNN_Input')
h1 = tf.keras.layers.Conv2D(8,kernel_size=(2,2), padding='same', name='CNN_Conv2D')(X)
h5 = h1
h5 = tf.keras.layers.AveragePooling2D(pool_size = (2, 2), name='CNN_AvgPooling2D')(h5)
xreal = tf.keras.layers.Flatten(name='CNN_myFlatten')(h5)
cnn = tf.keras.models.Model(inputs=X, outputs=xreal, name='CNN_Model')
#CNN model end:
ae_model = autoencoder(xreal)
cnn_ae = tf.keras.models.Model(inputs=cnn.input, outputs=ae_model, name='cnn_ae_model')
return cnn_ae
input_shape = (128, 64, 3)
model = cnnae_model(input_shape)
print('model.summary():')
print(model.summary())
model.compile(optimizer='rmsprop', loss='mse')
x_train=np.random.random((2,128,64,3))
y_train=np.random.random((2,16384))
print('x_train.shape:')
print(x_train.shape)
print('y_train.shape:')
print(y_train.shape)
model.fit(x_train, y_train, epochs=1)

keras model equivalent of tf.depth_to_space

I want to accomplish the equivalent of tf.depth_to_space in a Keras model. Specifically, the data in the Keras model is shaped H x W x 4 (i.e., depth of 4) and I want to permute the data so that the output is sized H x W x 1, with the mapping done as viewing the 4 input channels as 2x2 blocks; i.e.,
input location is y, x, k
output location is 2*y+(k//2), 2*x+(k%2), 1
I know that I can get the correct shape with:
outputs = keras.layers.Reshape((H*2,W*2,1), input_shape=(H,W,4))(inputs)
But I think that the mapping will be
input location is y, x, k
Linear_addess is y*W*4+x*4+k
output location is Linear_addess//(H*2), Linear_addess % (H*2), 1
which is not what I want
I tried directly using the
outputs = tf.depth_to_space(inputs, 2)
but that lead to an error:
TypeError: Output tensors to a Model must be Keras tensors. Found Tensor("DepthToSpace:0", shape=(?, 1024, 1024, 1), dtype=float32)
the problem can be seen with this simple function
def simple_net(H=512, W=512):
inputs = keras.layers.Input((H, W, 4))
# gets the correct shape but not the correct order
outputs = keras.layers.Reshape((H*2,W*2,1), input_shape=(H,W,4))(inputs)
# Run time error message
#outputs = tf.depth_to_space(output_planes, 2)
model = keras.models.Model(inputs, outputs)
return model
you should use Keras Lamda layer
from keras.layers import Lambda
import tensorflow as tf
Subpixel_layer = Lambda(lambda x:tf.nn.depth_to_space(x,scale))
x = Subpixel_layer(inputs=x)
MINIMAL MODEL
import tensorflow as tf
from keras.layers import Input,Lambda
in=Input(shape=(32,32,3))
x = Conv2D(32, (3,3), activation='relu')(in)
x = Conv2D(32, (3,3), activation='relu')(x)
sub_layer = Lambda(lambda x:tf.nn.depth_to_space(x,2))
x = sub_layer(inputs=x)
model = Model(inputs=in, outputs=x)
# model.compile(optimizer = Adam(), loss = mean_squared_error)
model.summary()
Summary

4-dimensions data became 2 dimensions after going throught convolutional neuron network

I am training a neuron network with two types of input: image and BR (blue over red, it's kind of a non-image feature like height, weight...). To do that, i use fit function in keras, and convert image to list for input. But I don't know why the image list, which have 4 dimensions shape became 2 dimemsions when going into fit, and I got the error as below:
Error when checking input: expected dense_1_input to have 3
dimensions, but got array with shape (1630, 1)
When I converted the image list to array, I had checked the shape of image_array and it has exactly 4 dimensions (particularly its shape is 1630, 60, 60, 3). Even right before the fit function, it still has the same shape. So I really don't know why the shape became (1630,1). Could anyone explain for me?
Here is my code:
from keras.utils.np_utils import to_categorical
import pandas as pd
import numpy as np
import os
from keras.applications.vgg16 import VGG16
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential, Model
from keras.layers import Input, Activation, Dropout, Flatten, Dense,Concatenate, concatenate,Reshape, BatchNormalization, Merge
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.optimizers import Adagrad
from sklearn import preprocessing
from scipy.misc import imread
import time
from PIL import Image
import cv2
img_width, img_height = 60, 60
img_list = []
BR_list = []
label_list = []
data_num = 1630
folder1 = "cut2/train/sugi/"
folder2 = "cut2/train/hinoki/"
def imgConvert(file_path):
img = imread(file_path,flatten = True)
img = np.arange(1*3*60*60).reshape((60,60,3))
img = np.array(img).reshape(60,60,3)
img = img.astype("float32")
return img
def B_and_R(img_path):
img = cv2.imread(img_path)
B = 0
R = 0
for i in range(25,35):
#print(i)
for j in range(25,35):
B = B+img[i,j,0]
R = R+img[i,j,2]
#(j)
#(img[i,j])
ave_B = B/100
ave_R = R/100
BR = ave_B/ave_R
return BR
def getData(path,pollen):
for the_file in os.listdir(path):
#print(the_file)
file_path = os.path.join(path, the_file)
B_over_R = B_and_R(file_path)
img_arr = imgConvert(file_path)
#writer.writerow([img_arr,B_over_R,"sugi"])
img_list.append(img_arr)
BR_list.append(B_over_R)
lb = np.zeros(2)
if pollen == "sugi":
lb[0] +=1
else:
lb[1] +=1
label_list.append(lb)
if __name__ == '__main__':
getData(folder1,"sugi")
getData(folder2,"hinoki")
img_arr = np.array(img_list)
print(img_arr.shape)
#.reshape(img_list[0],1,img_width,img_height)
img_arr.astype("float32")
img_arr /= 255
print(img_arr.shape)
img_array = np.expand_dims(img_arr, axis = 0)
img_array = img_array[0,:,:,:,:]
print(img_array.shape)
"""
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(img_array)
"""
#img_array = img_array.reshape(img_array[0],1,img_width,img_height)
print(img_array.shape)
label_arr = np.array(label_list)
print(label_arr.shape)
#label_array = np.expand_dims(label_arr, axis = 0)
#label_array = label_array[0,:,:,:,:]
BR_arr = np.array(BR_list)
print(BR_arr.shape)
#BR_array = np.expand_dims(BR_arr, axis = 0)
#BR_array = BR_array[0,:,:,:,:]
#print(len([img_arr,BR_arr]))
input_tensor = Input(shape=(img_width, img_height,3))
vgg16 = VGG16(include_top=False, weights='imagenet', input_tensor=input_tensor)
# FC層の作成
top_model = Sequential()
top_model.add(Flatten(input_shape=vgg16.output_shape[1:]))
#print(top_model.summary())
# VGG16とFC層を結合してモデルを作成
branch1 = Model(input=vgg16.input, output=top_model(vgg16.output))
#model.summary()
print(branch1.summary())
branch2 = Sequential()
branch2.add(Dense(1, input_shape=(data_num,1), activation='sigmoid'))
#branch1.add(Reshape(BR.shape, input_shape = BR.shape))
branch2.add(BatchNormalization())
branch2.add(Flatten())
print(branch2.summary())
merged = Merge([branch1, branch2], mode = "concat")
model = Sequential()
model.add(merged)
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='softmax'))
#last_model = Model(input = [branch1.input,branch2.input],output=model())
print(model.summary())
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr=1e-3, momentum=0.9),
metrics=['accuracy'])
print(img_array.shape)
model.fit([img_array,BR_arr], label_arr,
epochs=5, batch_size=100, verbose=1)
Ok, then the problem is the input shape.
While your data for branch 2 is 2D (batch, 1), your model should also have a 2D input: input_shape = (1,). (Batch sizes are ignored in input_shape)

Categories