enter image description hereI am trying to run this part of the code and I am getting the TypeError
data_dir = '../Cat_Dog_data/train'
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset = datasets.ImageFolder(data_dir, transform = transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size = 32, shuffle = True)
# Run this to test your data loader
images, labels = next(iter(dataloader))
helper.imshow(images[0], normalize=False)
Error:
TypeError Traceback (most recent call last)
<ipython-input-21-d9e81831faed> in <module>()
1 # Run this to test your data loader
----> 2 images, labels = next(iter(dataloader))
3 helper.imshow(images[0], normalize=False)
.
.
.
TypeError: 'module' object is not callable
Related
I'm new to CRNN model and I'm currently working on a text recognition project. However when using model.fit() method I got AttributeError: 'str' object has no attribute 'shape'. As I know the new version of model.fit() supports generator as input. But I still got the problem.
I'm using Tensorflow version 2.6.4
This is my code
class DataGenerator(keras.callbacks.Callback):
def __init__(self, img_dirpath, img_w, img_h,
batch_size,n,output_labels,max_text_len=15):
# I put out some code
def build_data(self):
def next_data(self):
def next_batch(self):
while True:
X_data = np.ones([self.batch_size, self.img_w, self.img_h, 1])
Y_data = np.ones([self.batch_size, self.max_text_len])
input_length = np.ones((self.batch_size, 1)) * 40
label_length = np.zeros((self.batch_size, 1))
source_str=[]
for i in range(self.batch_size):
img, text = self.next_data() #getting the image and text data pointed by current index
img=img.T
img = np.expand_dims(img, -1)
X_data[i] = img
label=encode_words_labels(text)
lbl_len=len(label)
Y_data[i,0:lbl_len] = label
label_length[i] = len(label)
source_str.append(text)
#Preparing the input for the Model
inputs = {
'img_input': X_data,
'ground_truth_labels': Y_data,
'input_length': input_length,
'label_length': label_length,
'source_str': source_str # used for visualization only
}
#Preparing output for the Model and intializing to zeros
outputs = {'ctc': np.zeros([self.batch_size])}
print(type(inputs))
yield (inputs, outputs) # Return the Prepared input and output to the Model
This is the part where the error appears:
img_text_recog.fit(x = train_gene.next_batch(),
steps_per_epoch=int(train_gene.n / batch_size),
epochs=20,
callbacks=[viz_cb_train,viz_cb_val,train_gene,val_gen,tensorboard_callback,early_stop,model_chk_pt],
validation_data=val_gen.next_batch(),
validation_steps=int(val_gen.n / batch_size))
The error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
/tmp/ipykernel_54/2615878982.py in <module>
4 callbacks=[viz_cb_train,viz_cb_val,train_gene,val_gen,tensorboard_callback,early_stop,model_chk_pt],
5 validation_data=val_gen.next_batch(),
----> 6 validation_steps=int(val_gen.n / batch_size))
...
AttributeError: 'str' object has no attribute 'shape'
I think the problem lies in the input/output but I cannot find out how to solve it
I want to test a federated learning model with non-IID using this API tff.simulation.datasets.build_single_label_dataset(), following these posts:
TensorFlow Federated: How to tune non-IIDness in federated dataset?
AttributeError: 'MapDataset' object has no attribute 'preprocess' in tensorflow_federated tff
AttributeError: 'MapDataset' object has no attribute 'client_ids' in tensorflow_federated TFF
But when I train the model, I got this error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-55-c558ef10b0d6> in <module>()
10
11 train(federated_averaging_process=federated_averaging, num_rounds=10,
---> 12 num_clients_per_round= 100, summary_writer=summary_writer)
1 frames
<ipython-input-54-c93c7edf73db> in <listcomp>(.0)
19 sampled_train_data = [
20 fed_emnist_train.create_tf_dataset_for_client(client)
---> 21 for client in sampled_clients
22 ]
23
AttributeError: 'MapDataset' object has no attribute 'create_tf_dataset_for_client'
Here is the code:
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=False)
# for non-IID we use this API tff.simulation.datasets.build_single_label_dataset()
fed_emnist_train = tff.simulation.datasets.build_single_label_dataset(
emnist_train.create_tf_dataset_from_all_clients(),
label_key='label', desired_label=1)
print(fed_emnist_train.element_spec)
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
return (dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
.repeat(CLIENT_EPOCHS_PER_ROUND)
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
.map(reshape_emnist_element))
fed_emnist_train = preprocess_train_dataset(fed_emnist_train)
# for unbalanced dataset
import random
NUM_CLIENTS = 100
client_datasets = [
fed_emnist_train.take(random.randint(1, CLIENT_BATCH_SIZE))
for _ in range(NUM_CLIENTS)
]
# defining a model
def create_original_fedavg_cnn_model(only_digits=False):
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
input_spec = client_datasets[0].element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
# training the model
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
# utility function
def format_size(size):
size = float(size)
for unit in ['bit','Kibit','Mibit','Gibit']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
sizing_factory = tff.framework.sizing_executor_factory()
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
# trains the federated averaging process and output metrics
import tensorflow_datasets as tfds
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
# create a environment to get communication cost
environment = set_sizing_environment()
# initialize the FedAvg algorithm to get the initial server state
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# create a list of `tf.Dataset` instances from the data of sampled clients
sampled_train_data = [
fed_emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
state, metrics = federated_averaging_process.next(state, sampled_train_data)
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# add metrics to Tensorboard
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# first, clean the log directory to avoid conflicts
try:
tf.io.gfile.rmtree('/tmp/logs/scalars')
except tf.errors.OpError as e:
pass
# set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round= 100, summary_writer=summary_writer)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-55-c558ef10b0d6> in <module>()
10
11 train(federated_averaging_process=federated_averaging, num_rounds=10,
---> 12 num_clients_per_round= 100, summary_writer=summary_writer)
1 frames
<ipython-input-54-c93c7edf73db> in <listcomp>(.0)
19 sampled_train_data = [
20 fed_emnist_train.create_tf_dataset_for_client(client)
---> 21 for client in sampled_clients
22 ]
23
AttributeError: 'MapDataset' object has no attribute 'create_tf_dataset_for_client'
I don't know where my mistake is?
Does the defining of input_spec is the reason?
input_spec = client_datasets[0].element_spec
or my mistake is here?
sampled_train_data = [
fed_emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
Appreciate any help!
mnist_train = mnist_dataset['train']
mnist_test = mnist_dataset['test']
num_validation_sample =0.1* mnist_info.splits['train'].num_example
num_validation_sample =tf.cast(num_validation_samples, tf.init64)
num_test_samples = mnist_info.splits['test'].num_example
num_test_samples =tf.cast(num_test_samples, tf.init64)
def scale (image, label):
image = tf.cast(image, tf.float32)
image /=255.
return image, label
scaled_train_and_validation_data = mnist_train.map(scale)
test_data = mnist_test.map(scale)
BUFFER_SIZE = 10000
shuffled_train_and_validation_data = scaled_train_and_validation_data(BUFFER_SIZE)
validation_data = shuffled_train_and_validation_data.take(num_validation_sample)
train_data = shuffled_train_and_validation_data.skip(num_validation_sample)
BATCH_SIZE = 100
train_data = train_data.batch(BATCH_SIZE)
validation_data = validation_data.batch(num_validation_sample)
#test_data
validation_inputs, validation_targets = next(iter(validation_data))
I got the following error when I run the code:
Traceback (most recent call last) in
()
----> 3 num_validation_sample =0.1* mnist_info.splits['train'].num_example
AttributeError: 'dict' object has no attribute 'splits'
The error means that mnist_info is a dict and dicts do not have a split method, so there is a logic error in your code. You probably are looking for replacing it with:
mnist_info.get('train').num_example, but you should provide an example of what is mnist_info and what you want to do, so we can be sure what's wrong with the code.
**I'm trying to run this code below, where I'm getting an error 'file not found' **
I tried to check if I'm in right directory by using:
os.listdir('C:\\Users\\ruchi\\Desktop\\Car-Models-Classifier-master')
['.ipynb_checkpoints',
'bmw10_ims',
'cars_annos.mat',
'cars_test',
'cars_test_annos_withlabels.mat',
'cars_train',
'car_ims',
'car_models_classifier.ipynb',
'devkit',
'README.md']
This is the code which I'm trying to run for which I gave correct path still it shows file not found error.
**CODE:**
data_dir = '/Car-Models-Classifier-master'
train_dir = '/Car-Models-Classifier-master/cars_train'
valid_dir = data_dir + '/valid'
test_dir = '/Car-Models-Classifier-master/cars_test'
**# Training transform includes random rotation and flip to build a more robust model**
train_transforms = transforms.Compose([transforms.Resize((244,244)),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
**# The validation set will use the same transform as the test set**
test_transforms = transforms.Compose([transforms.Resize((244,244)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
validation_transforms = transforms.Compose([transforms.Resize((244,244)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
**# Load the datasets with ImageFolder**
train_data = datasets.ImageFolder(data_dir + '/Car-Models-Classifier-master/cars_train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/Car-Models-Classifier-master/cars_test', transform=test_transforms)
#valid_data = datasets.ImageFolder(data_dir + '/valid', transform=validation_transforms)
**# Using the image datasets and the trainforms, define the dataloaders
# The trainloader will have shuffle=True so that the order of the images do not affect the model**
trainloader = torch.utils.data.DataLoader(train_data, batch_size=128, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=32, shuffle=True)
validloader = torch.utils.data.DataLoader(valid_data, batch_size=32, shuffle=True)
This the error:
FileNotFoundError Traceback (most recent call last)
<ipython-input-16-fe5e781c7465> in <module>
20
21 # Load the datasets with ImageFolder
---> 22 train_data = datasets.ImageFolder(data_dir + '/Car-Models-Classifier-master/cars_train', transform=train_transforms)
23 test_data = datasets.ImageFolder(data_dir + '/Car-Models-Classifier-master/cars_test', transform=test_transforms)
24 #valid_data = datasets.ImageFolder(data_dir + '/valid', transform=validation_transforms)
Can someone help ? I'm not able to understand where I'm going wrong.
This
data_dir + '/Car-Models-Classifier-master/cars_train'
concatinates to
/Car-Models-Classifier-master/Car-Models-Classifier-master/cars_train
So the line must be
train_data = datasets.ImageFolder(data_dir + '/cars_train', transform=train_transforms)
I try to build a hidden layer of neural network with tensorflow but I keep getting error message
"Operation" object has no attribute "dtype".
This is where the code throws the error:
codings = tf.layers.dense(X, n_hidden, name="hidden")
This is the entire script
import numpy as np
import tensorflow as tf
from PIL import Image
data = []
test2 = Image.open("./ters/test2.jpg")
prepared_data = np.asarray(test2.resize((800, 1000), Image.ANTIALIAS))
data.append(prepared_data)
data = np.asarray(data)
saver = tf.train.import_meta_graph("./my_model.ckpt.meta")
batch_size, height, width, channels = data.shape
n_hidden = 400
X = tf.get_default_graph().get_operation_by_name("Placeholder")
training_op = tf.get_default_graph().get_operation_by_name("train/Adam")
codings = tf.layers.dense(X, n_hidden, tf.float32)
n_iterations = 5
with tf.Session() as sess:
saver.restore(sess, "./my_model.ckpt")
sess.run(training_op)
test_img = codings.eval(feed_dict={X: X_test})
print(test_img)
Note:
I have already trained the model and named it my_model.ckpt and I tried importing and using it.
This is the error message:
Traceback (most recent call last):
File "using_img_cleaner.py", line 36, in <module>
codings = tf.layers.dense(X, n_hidden, tf.float32)
File "/home/exceptions/env/lib/python3.5/site-packages/tensorflow/python/layers/core.py", line 250, in dense
dtype=inputs.dtype.base_dtype,
AttributeError: 'Operation' object has no attribute 'dtype'