Unable to import tensor flow and keras properly - python

I have wrote this code in python here. I have a dataset in json and i am trying to train that dataset using keras. I was able to load data perfectly but after writing the complete code to train my dataset and running it, it started giving me errors in imports of my program. I tried installing tensorflowjs and keras using pip again but everytime i try tensorflowjs it start giving me compatibilty error on kerasApplications version. I tried installing every version but it is still giving me the same error. How can i get out of this trouble?
import json
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
import tensorflowjs as tfjs
with open("C:\\Users\\TechProBox\\Desktop\\Model.json") as f:
data = json.load(f)
x1 = np.array(data['attiude.roll'])
y1 = np.array(data['attitude.pitch'])
z1 = np.array(data['attitude.yaw'])
x2 = np.array(data['gravity.x'])
y2 = np.array(data['gravity.y'])
z2 = np.array(data['gravity.z'])
x3 = np.array(data['rotationRate.x'])
y3 = np.array(data['rotationRate.y'])
z3 = np.array(data['rotationRate.z'])
x4 = np.array(data['userAcceleration.x'])
y4 = np.array(data['userAcceleration.y'])
z4 = np.array(data['userAcceleration.z'])
x1_train = x1[:-10000]
y1_train = y1[:-10000]
z1_train = z1[:-10000]
x2_train = x2[:-10000]
y2_train = y2[:-10000]
z2_train = z2[:-10000]
x3_train = x3[:-10000]
y3_train = y3[:-10000]
z3_train = z3[:-10000]
x4_train = x4[:-10000]
y4_train = y4[:-10000]
z4_train = z4[:-10000]
x1_test = x1[:-10000]
y1_test = y1[:-10000]
z1_test = z1[:-10000]
x2_test = x2[:-10000]
y2_test = y2[:-10000]
z2_test = z2[:-10000]
x3_test = x3[:-10000]
y3_test = y3[:-10000]
z3_test = z3[:-10000]
x4_test = x4[:-10000]
y4_test = y4[:-10000]
z4_test = z4[:-10000]
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=6))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
adam = keras.optimizers.Adam(lr=0.0001)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
model.fit(x1_train, y1_train, z1_train, x2_train, y2_train, z2_train,x3_train, y3_train, z3_train,
x4_train, y4_train, z4_train,
epochs=14,
batch_size=128)
score = model.evaluate(x1_test, y2_test, z3_test, x2_test, y2_test, z2_test, x3_test, y3_test, z3_test,
x4_test, y4_test, z4_test, batch_size=128)
print(score)
model.save("Keras-64*2-10epoch")
tfjs.converters.save_keras_model(model,"tfjsv3")
Here are the errors
Using TensorFlow backend.
Traceback (most recent call last):
File "C:\Users\TechProBox\Desktop\Python1.py", line 3, in <module>
import keras
File "C:\Program Files\Python36\lib\site-packages\keras\__init__.py", line 3, in <module>
from . import utils
File "C:\Program Files\Python36\lib\site-packages\keras\utils\__init__.py", line 6, in <module>
from . import conv_utils
File "C:\Program Files\Python36\lib\site-packages\keras\utils\conv_utils.py", line 9, in <module>
from .. import backend as K
File "C:\Program Files\Python36\lib\site-packages\keras\backend\__init__.py", line 89, in <module>
from .tensorflow_backend import *
File "C:\Program Files\Python36\lib\site-packages\keras\backend\tensorflow_backend.py", line 5, in <module>
import tensorflow as tf
File "C:\Program Files\Python36\lib\site-packages\tensorflow\__init__.py", line 22, in <module>
from tensorflow.python import pywrap_tensorflow # pylint: disable=unused-import
File "C:\Program Files\Python36\lib\site-packages\tensorflow\python\__init__.py", line 52, in <module>
from tensorflow.core.framework.graph_pb2 import *
File "C:\Program Files\Python36\lib\site-packages\tensorflow\core\framework\graph_pb2.py", line 6, in <module>
from google.protobuf import descriptor as _descriptor
File "C:\Program Files\Python36\lib\site-packages\google\protobuf\descriptor.py", line 47, in <module>
from google.protobuf.pyext import _message
ImportError: DLL load failed: The specified procedure could not be found.

Just needed to isntall protobuff and it worked
pip install protobuf==3.6.0

Related

Using PyTorch to utilise DBpedia - keyerror: content disposition

I am currently trying to download data from the torchtext.datasets module and it is not working.
Here is the following code that I have written (taken from https://analyticsindiamag.com/multi-class-text-classification-in-pytorch-using-torchtext/):
import torch
import torchtext
from torchtext.datasets import text_classification
import os
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import time
from torch.utils.data.dataset import random_split
import re
from torchtext.data.utils import ngrams_iterator
from torchtext.data.utils import get_tokenizer
ngrams = 2
batch_size = 16
if not os.path.isdir('./.data'):
os.mkdir('./.data')
train_dataset, test_dataset = text_classification.DATASETS['DBpedia'](root='./.data', ngrams=ngrams, vocab=None)
It produces the following error:
Traceback (most recent call last):
File "/Users/aidanpayne/Desktop/Scripts/Python/Neural Networks/text_classification_model.py", line 19, in <module>
train_dataset, test_dataset = text_classification.DATASETS['DBpedia'](root='./.data', ngrams=ngrams, vocab=None)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/datasets/text_classification.py", line 237, in DBpedia
return _setup_datasets(*(("DBpedia",) + args), **kwargs)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/datasets/text_classification.py", line 117, in _setup_datasets
dataset_tar = download_from_url(URLS[dataset_name], root=root)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/utils.py", line 100, in download_from_url
return _process_response(response, root, filename)
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/torchtext/utils.py", line 53, in _process_response
d = r.headers['content-disposition']
File "/Users/aidanpayne/opt/anaconda3/lib/python3.8/site-packages/requests/structures.py", line 54, in __getitem__
return self._store[key.lower()][1]
KeyError: 'content-disposition'
If anyone can help, that would be great!

Why can't I open a .h5 file in Python?

I am trying to open an.h5 file, but experiencing an OS error.
import sys
sys.path.append('..')
from unet3d.training import load_old_model
import tables
from train_model import config
model_file=config["model_file"] #config["model_file"] = os.path.abspath("mc_seg_model.h5")
hdf5_file=config["val_data_file"] #config['val_data_file'] = os.path.abspath("../data/val_data.h5")
model = load_old_model(model_file)
load_model function is as follows:
import math
from functools import partial
import pdb
from keras import backend as K
from keras.callbacks import ModelCheckpoint, CSVLogger, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
from keras.models import load_model
import tensorflow_addons as tfa
def load_old_model(model_file):
# pdb.set_trace()
print("Loading pre-trained model")
custom_objects = {'dice_coefficient_loss': dice_coefficient_loss, 'dice_coefficient': dice_coefficient,
'weighted_dice_coefficient': weighted_dice_coefficient,
'weighted_dice_coefficient_loss': weighted_dice_coefficient_loss}
try:
#from keras_contrib.layers import InstanceNormalization
from tfa.layers import InstanceNormalization
custom_objects["InstanceNormalization"] = InstanceNormalization
except ImportError:
pass
try:
return load_model(model_file, custom_objects=custom_objects)
except ValueError as error:
if 'InstanceNormalization' in str(error):
raise ValueError(str(error) + "\n\nPlease install keras-contrib to use InstanceNormalization:\n"
"'pip install git+https://www.github.com/keras-team/keras-contrib.git'")
else:
raise error
When I try to load the model, it throws the following OS error and it is an 'Input/output error'.
2021-06-16 14:31:38.354199: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
Traceback (most recent call last):
File "draft.py", line 35, in <module>
model = load_old_model(model_file)
File "../unet3d/training.py", line 50, in load_old_model
return load_model(model_file, custom_objects=custom_objects)
File "/share/apps/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/saving/save.py", line 182, in load_model
return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile)
File "/share/apps/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/saving/hdf5_format.py", line 173, in load_model_from_hdf5
model_config = f.attrs.get('model_config')
File "/share/apps/anaconda3/lib/python3.7/_collections_abc.py", line 660, in get
return self[key]
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "/share/apps/anaconda3/lib/python3.7/site-packages/h5py/_hl/attrs.py", line 81, in __getitem__
attr.read(arr, mtype=htype)
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "h5py/h5a.pyx", line 355, in h5py.h5a.AttrID.read
File "h5py/_proxy.pyx", line 58, in h5py._proxy.attr_rw
OSError: Unable to read attribute (file read failed: time = Wed Jun 16 14:31:42 2021
, filename = '/data/kfernando/brats20/demo_task3_mcmc/mc_seg_model.h5', file descriptor = 4, errno = 5, error message = 'Input/output error', buf = 0x56126c096440, total read size = 30352, bytes this sub-read = 30352, bytes actually read = 18446744073709551615, offset = 16384)
Can someone please tell me what is causing this error?
Based on your comments about successful h5py open/close, it appears you have a valid HDF5 file. There are 2 more issues to investigate: 1) problems reading attribute data, or 2) errors in TensorFlow load_model() function. I can't help with TF. However here is a bit of code to recursively descend the data hierarchy and output all attributes and values. See below:
def get_all_attrs(name, h5_obj):
if isinstance(h5_obj,h5py.Group):
print('\n{} is a Group'.format(name))
elif isinstance(h5_obj,h5py.Dataset):
print('\n{} is a Dataset'.format(name))
print('number of attributes:',len( h5_obj.attrs.keys() ))
for k in h5_obj.attrs.keys():
print('{} => {}'.format(k, h5_obj.attrs[k]))
with h5py.File(file_path, 'r') as h5r:
print('number of root level attributes:',len( h5r.attrs.keys() ))
for k in h5r.attrs.keys():
print('{} => {}'.format(k, h5r.attrs[k]))
h5r.visititems(get_all_attrs)
Run this with your TF file. It might find a error reading one of the attributes. Example output from my test file looks like this:
number of root level attributes: 2
OS => Windows
User => Me
Base_Group is a Group
number of attributes: 2
Date => today
Time => now
Base_Group/default is a Dataset
number of attributes: 2
attr1 => 1.0
attr2 => 22.2
Group1 is a Group
number of attributes: 0
Group1/default1 is a Dataset
number of attributes: 0
This should help determine the source of the error. If h5py can read the attributes, you need to investigate TF load_data() function. If you get an error reading the attributes....well, that's your problem, but I don't know how to identify the root cause.

tensorflow.python.framework.errors_impl.UnimplementedError: Cast string to int32 is not supported

I only wrote a simple model in model.py, and when I ran it, it gave the following error.
2021-02-08 22:20:11.872409: E tensorflow/core/common_runtime/executor.cc:641] Executor failed to create kernel. Unimplemented: Cast string to int32 is not supported
[[{{node embedding/Cast}}]]
Traceback (most recent call last):
File "C:\Users\xiaoc\Anaconda3\lib\runpy.py", line 193, in _run_module_as_main
"main", mod_spec)
File "C:\Users\xiaoc\Anaconda3\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\xiaoc\AppData\Local\Google\Cloud SDK\trainer\task.py", line 55, in
train_model(args)
File "C:\Users\xiaoc\AppData\Local\Google\Cloud SDK\trainer\task.py", line 43, in train_model
validation_data=(eval_data, eval_labels))
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 780, in fit
steps_name='steps_per_epoch')
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py", line 363, in model_iteration
batch_outs = f(ins_batch)
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\keras\backend.py", line 3289, in call
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\keras\backend.py", line 3222, in _make_callable
callable_fn = session._make_callable_from_options(callable_opts)
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1489, in _make_callable_from_options
return BaseSession._Callable(self, callable_options)
File "C:\Users\xiaoc\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1446, in init
session._session, options_ptr)
tensorflow.python.framework.errors_impl.UnimplementedError: Cast string to int32 is not supported
[[{{node embedding/Cast}}]]
What is the problem? The requirement is to only make changes in model.py, not others. Thanks in advance!
Following are the three python files.
model.py
import tensorflow as tf
from tensorflow.keras.layers import Dense,Embedding,LSTM, Activation,Dropout
from tensorflow.keras import Model
def get_batch_size(): #size of training 8056 number of batches = 8056/128
return 128
def get_epochs():
return 50
def solution(input_layer):
max_len = 150
max_words = 200
# inputs = Input(name='inputs',shape=[max_len])
layer = Embedding(max_words,output_dim = 64, input_length=max_len)(input_layer)
# layer = LSTM(64,return_sequences=True)(input_layer)
layer = tf.expand_dims(layer, axis=-1)
layer = LSTM(64,return_sequences=True)(layer)
layer = Dense(256,)(layer)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Dense(5)(layer)
# layer = Activation('softmax')(layer)
model = Model(inputs=input_layer,outputs=layer)
model.compile(loss='sparse_categorical_crossentropy',optimizer=tf.keras.optimizers.Adam(),metrics=['accuracy'])
return model
data.py
import csv
import numpy as np
label_map = {
0: 'A',
1: 'B',
2: 'C',
3: 'D',
4: 'E',
}
label_map_inv = dict(map(reversed, label_map.items()))
def load_dataset(dataset_file):
data = []
labels = []
with open(dataset_file, "r", encoding="utf-8") as f:
data_reader = csv.reader(f, delimiter=",", quotechar='"')
next(data_reader)
for lbl, desc in data_reader:
data.append(desc)
labels.append(label_map_inv[lbl])
return np.array(data), np.array(labels)
task.py
import os
import argparse
import logging
import numpy as np
import tensorflow as tf
import tensorflow.keras
import trainer.data as data
import trainer.model as model
def train_model(params):
(train_data, train_labels) = data.load_dataset("data/train.csv")
(eval_data, eval_labels) = data.load_dataset("data/eval.csv")
input_layer = tf.keras.Input(shape=(), name='input_text', dtype=tf.string)
ml_model = model.solution(input_layer)
if ml_model is None:
print("No model found. You need to implement one in model.py")
else:
ml_model.fit(train_data, train_labels,
batch_size=model.get_batch_size(),
epochs=model.get_epochs(),
validation_data=(eval_data, eval_labels))
_ = ml_model.evaluate(eval_data, eval_labels, verbose=1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = parser.parse_args()
tf_logger = logging.getLogger("tensorflow")
tf_logger.setLevel(logging.INFO)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(tf_logger.level // 10)
train_model(args)

ValueError: as_list() is not defined on an unknown TensorShape

i work on thhe example based in this web and here is i got after this
jobs_train, jobs_test = jobs_df.randomSplit([0.6, 0.4])
>>> zuckerberg_train, zuckerberg_test = zuckerberg_df.randomSplit([0.6, 0.4])
>>> train_df = jobs_train.unionAll(zuckerberg_train)
>>> test_df = jobs_test.unionAll(zuckerberg_test)
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml import Pipeline
>>> from sparkdl import DeepImageFeaturizer
>>> featurizer = DeepImageFeaturizer(inputCol="image", outputCol="features", modelName="InceptionV3")
>>> lr = LogisticRegression(maxIter=20, regParam=0.05, elasticNetParam=0.3, labelCol="label")
>>> p = Pipeline(stages=[featurizer, lr])
>>> p_model = p.fit(train_df)
and this is appeared
2018-06-08 20:57:18.985543: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
INFO:tensorflow:Froze 376 variables.
Converted 376 variables to const ops.
Using TensorFlow backend.
Using TensorFlow backend.
INFO:tensorflow:Froze 0 variables.
Converted 0 variables to const ops.
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/spark/python/pyspark/ml/base.py", line 64, in fit
return self._fit(dataset)
File "/opt/spark/python/pyspark/ml/pipeline.py", line 106, in _fit
dataset = stage.transform(dataset)
File "/opt/spark/python/pyspark/ml/base.py", line 105, in transform
return self._transform(dataset)
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_spark-deep-learning-0.1.0-spark2.1-s_2.11.jar/sparkdl/transformers/named_image.py", line 159, in _transform
File "/opt/spark/python/pyspark/ml/base.py", line 105, in transform
return self._transform(dataset)
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_spark-deep-learning-0.1.0-spark2.1-s_2.11.jar/sparkdl/transformers/named_image.py", line 222, in _transform
File "/opt/spark/python/pyspark/ml/base.py", line 105, in transform
return self._transform(dataset)
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_spark-deep-learning-0.1.0-spark2.1-s_2.11.jar/sparkdl/transformers/tf_image.py", line 142, in _transform
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_tensorframes-0.2.8-s_2.11.jar/tensorframes/core.py", line 211, in map_rows
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_tensorframes-0.2.8-s_2.11.jar/tensorframes/core.py", line 132, in _map
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_tensorframes-0.2.8-s_2.11.jar/tensorframes/core.py", line 66, in _add_shapes
File "/tmp/spark-74707b69-e8c9-498b-b0f2-b38828e5ad21/userFiles-ca1eb7cf-9785-441d-a098-54b62380bcee/databricks_tensorframes-0.2.8-s_2.11.jar/tensorframes/core.py", line 35, in _get_shape
File "/home/sulistyo/tensorflow/lib/python3.6/site-packages/tensorflow/python/framework/tensor_shape.py", line 900, in as_list
raise ValueError("as_list() is not defined on an unknown TensorShape.")
ValueError: as_list() is not defined on an unknown TensorShape.
please kindly help, thanks
Use the following to read images and create your training & testing sets
from pyspark.sql.functions import lit
from sparkdl.image import imageIO
img_dir = "/PATH/TO/personalities/"
jobs_df = imageIO.readImagesWithCustomFn(img_dir + "/jobs",decode_f=imageIO.PIL_decode).withColumn("label", lit(1))
zuckerberg_df = imageIO.readImagesWithCustomFn(img_dir + "/zuckerberg", decode_f=imageIO.PIL_decode).withColumn("label", lit(0))

Problems with combining Keras 2.0 and pymc3

I am trying to combine keras 2.0 with pymc3 to build a neural network. It is a modification of the code from Thomas Weicki's Bayesian deep learning II
This is the code I have:
import numpy as np
import pymc3 as pm
import theano
import theano.tensor as T
from keras.layers import Input, Dense
from keras import backend as K
from sklearn import datasets
from sklearn.preprocessing import scale
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_moons
from scipy.stats import mode
X, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)
X = scale(X)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.6)
ann_input = theano.shared(X_train.astype(np.float32))
ann_output = theano.shared(Y_train.astype(np.float32))
print (X_train.shape)
print (Y_train.shape)
class GaussWeights(object):
def __init__(self):
self.count = 0
def __call__(self, shape, name='w',dtype=None):
return pm.Normal(
name, mu=0, sd=.1,
testval=K.random_normal(shape,dtype=dtype),
shape=shape)
n_hidden = 16
def build_ann(x, y, init):
b = (T.ones_like(x[:]))
rows = b.shape.eval()[0]
cols = b.shape.eval()[1]
with pm.Model() as m:
i = Input(tensor=x, shape=(rows,cols))
layer1 = Dense(16,kernel_initializer=init, activation='tanh')(i)
layer2 = Dense(1, kernel_initializer=init, activation='sigmoid')(layer1)
layer2 = layer2.reshape((rows,))
out = pm.Bernoulli('out', layer2, observed=y)
return m, out
#m,out = build_ann(ann_input, ann_output)
m,out = build_ann(ann_input, ann_output, GaussWeights())
with m:
#Run ADVI which returns posterior means, standard deviations, and the evidence lower bound (ELBO)
ann_input.set_value(X_train.astype(np.float32))
ann_output.set_value(Y_train.astype(np.float32))
v_params = pm.variational.advi(n=50000)
trace = pm.variational.sample_vp(v_params, draws=5000)
# Replace shared variables with testing set
ann_input.set_value(X_test.astype(np.float32))
ann_output.set_value(Y_test.astype(np.float32))
with m:
ppc = pm.sample_ppc(trace, samples=500)
# Use probability of > 0.5 to assume prediction of class 1
pred = ppc['out'].mean(axis=0) > 0.5
pred_mode = mode(ppc['out'], axis=0).mode[0, :]
print (pred.shape)
print('Accuracy = {}%'.format((Y_test == pred).mean() * 100))
But I get the following error which I don't know how to fix:
Traceback (most recent call last):
File "keras_deep_learning.py", line 50, in <module>
m,out = build_ann(ann_input, ann_output, GaussWeights())
File "keras_deep_learning.py", line 43, in build_ann
layer1 = Dense(16,kernel_initializer=init, activation='tanh')(i)
File "/home/gbenga/.local/lib/python3.5/site-packages/keras/engine/topology.py", line 558, in __call__
self.build(input_shapes[0])
File "/home/gbenga/.local/lib/python3.5/site-packages/keras/layers/core.py", line 827, in build
constraint=self.kernel_constraint)
File "/home/gbenga/.local/lib/python3.5/site-packages/keras/legacy/interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "/home/gbenga/.local/lib/python3.5/site-packages/keras/engine/topology.py", line 391, in add_weight
weight = K.variable(initializer(shape), dtype=dtype, name=name)
File "/home/gbenga/.local/lib/python3.5/site-packages/keras/backend/theano_backend.py", line 143, in variable
value = value.eval()
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/gof/graph.py", line 516, in eval
self._fn_cache[inputs] = theano.function(inputs, self)
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/compile/function.py", line 326, in function
output_keys=output_keys)
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/compile/pfunc.py", line 486, in pfunc
output_keys=output_keys)
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/compile/function_module.py", line 1794, in orig_function
output_keys=output_keys).create(
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/compile/function_module.py", line 1446, in __init__
accept_inplace)
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/compile/function_module.py", line 177, in std_fgraph
update_mapping=update_mapping)
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/gof/fg.py", line 180, in __init__
self.__import_r__(output, reason="init")
File "/home/gbenga/.local/lib/python3.5/site-packages/theano/gof/fg.py", line 361, in __import_r__
raise MissingInputError("Undeclared input", variable=variable)
theano.gof.fg.MissingInputError: Undeclared input
Unfortunately with Keras 2.0 you can no longer use a symbolic initializer for the weights. Try downgrading to Keras 1.2 it will work then.
See the following issues for reference:
https://github.com/fchollet/keras/issues/6546
https://github.com/fchollet/keras/issues/6551

Categories