I'm trying to create an ai chatbox in python. I tried following this tutorial: https://techwithtim.net/tutorials/ai-chatbot/part-1/ but I'm getting a lot of errors of deprecations and getting some Traceback error.
Here's the code:
import json
import random
import tensorflow
import tflearn
import numpy
import sys
import pickle
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
nltk.download('punkt')
with open("trainingData.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w != "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat():
print("Start talking with the bot (type quit to stop)!")
while True:
inp = input("You: ")
if inp.lower() == "quit":
break
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(random.choice(responses))
chat()
Here are the errors I'm getting. How can I fix the deprecation errors, the traceback error?
Here's the text of the error:
Run id: VOB3W4
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 20
Validation samples: 0
--
--
Traceback (most recent call last):
File "script.py", line 91, in <module>
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
File "/usr/local/lib/python2.7/site-packages/tflearn/models/dnn.py", line 216, in fit
callbacks=callbacks)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 339, in fit
show_metric)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 816, in _train
tflearn.is_training(True, session=self.session)
File "/usr/local/lib/python2.7/site-packages/tflearn/config.py", line 95, in is_training
tf.get_collection('is_training_ops')[0].eval(session=session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 731, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 5579, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1096, in _run
raise RuntimeError('Attempted to use a closed Session.')
RuntimeError: Attempted to use a closed Session.
At start file "model.tflearn" doesn't exist and try/except should catch error when code try to load this file and run fit() and save()
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
but it seems this error closes tf.session() so it can't run fit()correctly.
If you remove try/except with load() and keep only fit() and save() then it has no problem to create model and save it in file.
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
After creating file "model.ftlearn" you can use again try/except with load() and it should work if you don't delete file with model.
Better solution should check if file exists - but it saves data in few files "model.tflearn.index", "model.tflearn.meta" and "model.tflearn.data-00000-of-00001" so it should check one of this file instead of "model.tflearn"
Use
import os
if os.path.exists("model.tflearn.meta"):
model.load("model.tflearn")
else:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
instead of
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
EDIT: It seems this problem exists at least 2 years: RuntimeError: Attempted to use a closed Session in tflearn
try doing this in this:
try:
model.load("model3.tflearn")
except:
model = tflearn.DNN(net)
model.fit(training,output, n_epoch = 1000, batch_size = 8, show_metric = True)
model.save("model3.tflearn")
Related
I am trying to make a chatbot from website : https://hashdork.com/create-a-deep-learning-chatbot-with-python/
however the code is not working for some reason. It is maybe because of my formatting python.
I have tried to add a model.tflearn file, not working.
Had to remove data.pickle for every single run.
I have no idea what the exception is told me to do.
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import numpy
import tflearn
import tensorflow
import random
import json
import pickle
with open("intents.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w != "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.compat.v1.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=100, batch_size=8, show_metric=True)
model.save("model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat():
print("Start talking with the bot (type quit to stop)!")
while True:
inp = input("You: ")
if inp.lower() == "quit":
break
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(random.choice(responses))
chat()
Terminal:
tflearn.is_training(True, session=self.session) File "/usr/local/lib/python3.7/site-packages/tflearn/config.py", line 95, in is_training tf.get_collection('is_training_ops')[0].eval(session=session) File "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 913, in eval return _eval_using_default_session(self, feed_dict, self.graph, session) File "/usr/local/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 5512, in _eval_using_default_session return session.run(tensors, feed_dict) File "/usr/local/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 958, in run run_metadata_ptr) File "/usr/local/lib/python3.7/site-packages/tensorflow/python/client/session.py", line 1104, in _run raise RuntimeError('Attempted to use a closed Session.') RuntimeError: Attempted to use a closed Session.
Any thoughts?
I am trying to figure out sentiment classification on movie reviews using BERT, transformers and tensorflow. This is the code I currently have:
def read_dataset(filename, model_name="bert-base-uncased"):
"""Reads a dataset from the specified path and returns sentences and labels"""
tokenizer = BertTokenizer.from_pretrained(model_name)
with open(filename, "r", encoding="utf-8") as f:
lines = f.readlines()
# preallocate memory for the data
sents, labels = list(), np.empty((len(lines), 1), dtype=int)
for i, line in enumerate(lines):
text, str_label, _ = line.split("\t")
labels[i] = int(str_label.split("=")[1] == "POS")
sents.append(text)
return dict(tokenizer(sents, padding=True, truncation=True, return_tensors="tf")), labels
class BertMLP(tf.keras.Model):
def __init__(self, embed_batch_size=100, model_name="bert-base-cased"):
super(BertMLP, self).__init__()
self.bs = embed_batch_size
self.model = TFBertModel.from_pretrained(model_name)
self.classification_head = tf.keras.models.Sequential(
layers = [
tf.keras.Input(shape=(self.model.config.hidden_size,)),
tf.keras.layers.Dense(350, activation="tanh"),
tf.keras.layers.Dense(200, activation="tanh"),
tf.keras.layers.Dense(50, activation="tanh"),
tf.keras.layers.Dense(1, activation="sigmoid", use_bias=False)
]
)
def call(self, inputs):
outputs = self.model(inputs)
return outputs
def evaluate(model, inputs, labels, loss_func):
mean_loss = tf.keras.metrics.Mean(name="train_loss")
accuracy = tf.keras.metrics.BinaryAccuracy(name="train_accuracy")
predictions = model(inputs)
mean_loss(loss_func(labels, predictions))
accuracy(labels, predictions)
return mean_loss.result(), accuracy.result() * 100
if __name__ == "__main__":
train = read_dataset("datasets/rt-polarity.train.vecs")
dev = read_dataset("datasets/rt-polarity.dev.vecs")
test = read_dataset("datasets/rt-polarity.test.vecs")
mlp = BertMLP()
mlp.compile(tf.keras.optimizers.SGD(learning_rate=0.01), loss='mse')
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
print("Before training:", f"Dev Loss: {dev_loss}, Dev Acc: {dev_acc}")
mlp.fit(*train, epochs=10, batch_size=10)
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
print("After training:", f"Dev Loss: {dev_loss}, Dev Acc: {dev_acc}")
However, when I run this code, I get an error:
Traceback (most recent call last):
File "C:\Users\home\anaconda3\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\home\downloads\mlp.py", line 60, in <module>
dev_loss, dev_acc = evaluate(mlp, *dev, tf.keras.losses.MeanSquaredError())
File "c:\users\home\downloads\mlp.py", line 46, in evaluate
predictions = model(inputs)
File "C:\Users\home\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "c:\users\home\downloads\mlp.py", line 39, in call
outputs = self.model(inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py", line 409, in run_call_with_unpacked_inputs
return func(self, **unpacked_inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 1108, in call
outputs = self.bert(
File "C:\Users\home\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py", line 409, in run_call_with_unpacked_inputs
return func(self, **unpacked_inputs)
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 781, in call
embedding_output = self.embeddings(
File "C:\Users\home\anaconda3\lib\site-packages\transformers\models\bert\modeling_tf_bert.py", line 203, in call
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
InvalidArgumentError: Exception encountered when calling layer "embeddings" (type TFBertEmbeddings).
indices[1174,8] = 29550 is not in [0, 28996) [Op:ResourceGather]
Call arguments received:
• input_ids=tf.Tensor(shape=(1599, 73), dtype=int32)
• position_ids=None
• token_type_ids=tf.Tensor(shape=(1599, 73), dtype=int32)
• inputs_embeds=None
• past_key_values_length=0
• training=False
I googled for a while, and I can't find anything conclusive. I am pretty sure it has something to do with this part:
def call(self, inputs):
outputs = self.model(inputs)
return outputs
But again, I have tried a lot of different things, including limiting dataset size and installing different versions of transformers and tensorflow, but to no avail. Please let me know what I'm doing wrong. Thank you!
OP was using bert-base-cased for their model, and bert-base-uncased for their tokenizer, causing issues during training when the vocab size of the model and the tokenized data differed.
I'm trying to load a saved Tensorflow ELMO model in a different function than I trained it, because I want to do multiple predictions with the model without having to train it every time. My (simplified) code is as follows:
(builder.py)
from word_classifier import train_word_classifier, predict_labels
def builder(lines):
train_word_classifier()
for lst in lines:
print('PRED_LABELS: ', predict_labels(lst))
(word_classifier.py)
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.python.keras import backend as K
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model, Sequential, model_from_json
from tensorflow.keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional, Lambda, add, Input
def train_word_classifier():
"""
Input data preparation excluded for readability
"""
sess = tf.compat.v1.Session()
K.set_session(sess)
elmo_model = hub.Module("https://tfhub.dev/google/elmo/3", trainable=True)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
input_text = Input(shape=(MAX_LEN,), dtype=tf.string)
def elmo_embedding(inData):
return \
elmo_model(inputs={"tokens": tf.squeeze(tf.cast(inData, tf.string)),
"sequence_len": tf.constant(BATCH_SIZE * [MAX_LEN])},
signature="tokens", as_dict=True)["elmo"]
embedding = Lambda(lambda text, : elmo_embedding(text), output_shape=(MAX_LEN, 1024))(input_text, )
x = Bidirectional(LSTM(units=LSTM_UNITS, return_sequences=LSTM_RETURN_SEQ,
recurrent_dropout=LSTM_RO_DROPOUT, dropout=LSTM_DROPOUT))(embedding)
x_rnn = Bidirectional(LSTM(units=LSTM_UNITS, return_sequences=LSTM_RETURN_SEQ,
recurrent_dropout=LSTM_RO_DROPOUT, dropout=LSTM_DROPOUT))(x)
x = add([x, x_rnn]) # residual connection to the first biLSTM
out = TimeDistributed(Dense(n_tags, activation="softmax"))(x)
model = Model(input_text, out)
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
line_count_training_data = count_lines_in_file(CLASSIFIER_SENTENCE_FILE, 10)
size_train, size_test = get_count_for_batch_train_test_data(line_count_training_data)
print(size_train)
mode_dict = {
"train": size_train,
"test": size_test,
}
x_tr, x_val = x_tr[:mode_dict["train"] * BATCH_SIZE], x_tr[-mode_dict["test"] * BATCH_SIZE:]
y_tr, y_val = y_tr[:mode_dict["train"] * BATCH_SIZE], y_tr[-mode_dict["test"] * BATCH_SIZE:]
y_tr = y_tr.reshape(y_tr.shape[0], y_tr.shape[1], 1)
y_val = y_val.reshape(y_val.shape[0], y_val.shape[1], 1)
history = model.fit(np.array(x_tr),
y_tr,
validation_data=(np.array(x_val), y_val),
batch_size=BATCH_SIZE,
epochs=NUM_EPOCHS,
verbose=VERBOSE_VALUE)
model_json = model.to_json()
with open("resources/SavedModel/word_classifier/model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("resources/SavedModel/word_classifier/model.h5")
def predict_labels(input_data_list):
with open('resources/SavedModel/word_classifier/model.json', 'r') as json_file:
loaded_model_json = json_file.read()
def elmo_embedding(inData):
return \
elmo_model(inputs={"tokens": tf.squeeze(tf.cast(inData, tf.string)),
"sequence_len": tf.constant(BATCH_SIZE * [MAX_LEN])},
signature="tokens", as_dict=True)["elmo"]
loaded_model = tf.keras.models.model_from_json(loaded_model_json, custom_objects={'elmo_embedding': elmo_embedding})
# load weights into new model
loaded_model.load_weights("resources/SavedModel/word_classifier/model.h5")
print("Loaded model from disk")
In the end, after training the model, this gives me the error "TypeError: 'str' object is not callable" with the following traceback:
Traceback (most recent call last):
File "usc_coordinator.py", line 62, in <module>
run_usc_coordinator(fIn, fOut, mode)
File "usc_coordinator.py", line 32, in run_usc_coordinator
user_story_builder(fast_mode, file_in)
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/builder.py", line 45, in builder
print('PRED_LABELS: ', predict_labels(lst))
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/word_classifier.py", line 161, in predict_labels
loaded_model = tf.keras.models.model_from_json(loaded_model_json, custom_objects={'elmo_embedding': elmo_embedding})
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/saving/model_config.py", line 122, in model_from_json
return deserialize(config, custom_objects=custom_objects)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/serialization.py", line 171, in deserialize
return generic_utils.deserialize_keras_object(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/utils/generic_utils.py", line 354, in deserialize_keras_object
return cls.from_config(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 616, in from_config
input_tensors, output_tensors, created_layers = reconstruct_from_config(
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 1214, in reconstruct_from_config
process_node(layer, node_data)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py", line 1162, in process_node
output_tensors = layer(input_tensors, **kwargs)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer_v1.py", line 776, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/home/ubuntu/.local/lib/python3.8/site-packages/tensorflow/python/keras/layers/core.py", line 903, in call
result = self.function(inputs, **kwargs)
File "/home/ubuntu/PA/PA_AI4US/PythonVersion/src/word_classifier.py", line 101, in <lambda>
embedding = Lambda(lambda text, : elmo_embedding(text), output_shape=(MAX_LEN, 1024))(input_text, )
TypeError: 'str' object is not callable
My versions are:
Python 3.8.10
Keras 2.3.0
Tensorflow 2.3.1
Tensorflow-hub 0.10.0
My guess is that the error is caused by the variable input_text that is set as a dtype tf.string. However, I don't how what to do about that without breaking the training sequence.
I hope that somebody can help!
It is a bug in tensorflow v2.3.1:
Loading a model with a Lambda layer causes a 'str' object is not callable exception #46659
https://github.com/tensorflow/tensorflow/issues/46659
You should change the string to something else like if I have to calculate 2+2, this would be the wrong code:
x = "2"
print(x + 2)
This shows:
Traceback (most recent call):
File "main.py", line 2, in <module>
print(x + 2)
TypeError: can only concatenate str (not "int") to str
So what do we do?
Convert it to something else
x = "2"
y = int(x)
print(y + 2)
Output: 4
int = integer,
float = float.
Edit: You can not convert some things to something else, like
x = "This is an example"
y = int(x)
This would show:
Traceback (most recent call last):
File "main.py", line 2, in <module>
y = int(x)
ValueError: invalid literal for int() with base10: 'This is an example'
#For a string to array
x = "1-2"
y = x.split("-")
print(y)
y would be ["1", "2"]
Or if you do not know short forms you can use full forms
Line #32,
'str' is not callable
fast_mode and file_In would be strings
And same for other lines
I'm trying to regenerate tensorflow tutorial code using my dataset which one dimensional
N_of_features=3000
N_of_samples=4500
it exists as a Matlab file
After applying this code I got the error
I'm looking for any suggestions to solve this problem,
Thanks
The problem mainly exist in the train the classifier
Train the Model. classifier.train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import scipy.io
import pandas as pd
import tensorflow as tf
import numpy as np
file = "train.mat"
myfile1 = scipy.io.loadmat(file)
train=myfile1['train']
file = "test.mat"
myfile1 = scipy.io.loadmat(file)
test=myfile1['test']
train_np = np.asarray(train, np.float32)
test_np = np.asarray(test, np.float32)
test_np
n = train_np.shape[0]
p = train_np.shape[1]
train = dict(enumerate(train_np, 1))
test = dict(enumerate(test_np, 1))
train=pd.DataFrame.from_dict(train,orient='index')
test=pd.DataFrame.from_dict(test,orient='index')
def load_data(y_name=p-1):
train_x, train_y = train, train.pop(y_name)
test_x, test_y = test, test.pop(y_name)
train_y=train_y.astype(np.int64)
test_y = test_y.astype(np.int64)
return (train_x, train_y), (test_x, test_y)
def train_input_fn(features, labels, batch_size):
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
dataset = dataset.shuffle(1000).repeat().batch(batch_size)
return dataset
def eval_input_fn(features, labels, batch_size):
features=dict(features)
if labels is None:
# No labels, use only features.
inputs = features
else:
inputs = (features, labels)
# Convert the inputs to a Dataset.
dataset = tf.data.Dataset.from_tensor_slices(inputs)
# Batch the examples
assert batch_size is not None, "batch_size must not be None"
dataset = dataset.batch(batch_size)
# Return the dataset.
return dataset
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', default=100, type=int, help='batch size')
parser.add_argument('--train_steps', default=1000, type=int,
help='number of training steps')
def main(argv):
args = parser.parse_args(argv[1:])
(train_x, train_y), (test_x, test_y) = load_data()
# Feature columns describe how to use the input.
my_feature_columns = []
for key in train_x.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# Build 2 hidden layer DNN with 10, 10 units respectively.
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# Two hidden layers of 10 nodes each.
hidden_units=[10, 10],
# The model must choose between 2 classes.
n_classes=2)
# Train the Model.
classifier.train(
input_fn=lambda:train_input_fn(train_x, train_y,
args.batch_size),
steps=args.train_steps)
# Evaluate the model.
eval_result = classifier.evaluate(
input_fn=lambda:eval_input_fn(test_x, test_y,
args.batch_size))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
And I got this error
File "C:/Users/77781455/AppData/Local/Programs/Python/Python36/Scripts/models/samples /core/get_started/update_pre.py", line 138, in <module>
tf.app.run(main)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\platform\app.py", line 124, in run
_sys.exit(main(argv))
File "C:/Users/77781455/AppData/Local/Programs/Python/Python36/Scripts/models/samples/core/get_started/update_pre.py", line 124, in main
steps=args.train_steps)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 314, in train
loss = self._train_model(input_fn, hooks, saving_listeners)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 743, in _train_model
features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\estimator.py", line 725, in _call_model_fn
model_fn_results = self._model_fn(features=features, **kwargs)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\canned\dnn.py", line 324, in _model_fn
config=config)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\canned\dnn.py", line 176, in _dnn_model_fn
logits = logit_fn(features=features, mode=mode)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\estimator\canned\dnn.py", line 90, in dnn_logit_fn
features=features, feature_columns=feature_columns)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\feature_column\feature_column.py", line 280, in input_layer
trainable, cols_to_vars)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\feature_column\feature_column.py", line 193, in _internal_input_layer
None, default_name=column._var_scope_name): # pylint: disable=protected-access
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\ops\variable_scope.py", line 1929, in __enter__
unique_default_name = _get_unique_variable_scope(self._default_name)
File "C:\Users\77781455\AppData\Local\Programs\Python\Python36\lib\site- packages\tensorflow\python\ops\variable_scope.py", line 1656, in _get_unique_variable_scope
name = current_scope.name + "/" + prefix if current_scope.name else prefix
TypeError: must be str, not int
The answer to this problem for me was to set the column names to strings as they were defaulting to integer:
df.columns = df.columns.astype(str)
This is discussed here: Tensorflow TypeError: Can't convert 'numpy.int64' object to str implicitly
I'm modifying code from this github (https://github.com/hehefan/Video-Classification) so it can accept my own input. When I try to run it one video at a time for 5 epoch, it run with no problem. But when I try to get it run from multiple video in succession it run to Memory Error in sess.run() line.
Code:
import sys
import os
import numpy as np
import tensorflow as tf
import gzip
#import cPickle
import _pickle as cPickle
import random
from config import FLAGS
from models import DynamicRNN
from models import AveragePooling
filename = ['D2N2Sur', 'H2N2A', 'H2N2C', 'H2N2D', 'H2N2S', 'N2A', 'N2C', 'N2D', 'N2H', 'N2S', 'N2Sur', 'S2N2H']
#TRAINING LABEL
batch_label = list(range(12))
#DATA PROCESSING
data = []
batch_length = []
for name in filename:
#READ DATA
counter = 0
frame = 0
video=[]
l = ""
f = open("Train1/"+name+".txt", "r")
for line in f:
l = l + line[:-1]
counter += 1
if (counter == 365):
l = list(l)
video.append(l)
l = ""
counter = 0
frame += 1
#MAKE SURE ALL VIDEO HAVE SAME LENGTH
#PAD BY 0
frame = FLAGS.max_video_length - frame
for number in range(frame):
video.append([0]*FLAGS.feature_size)
#APPEND VIDEO TO DATA
data.append(video)
batch_length.append(FLAGS.max_video_length)
training_steps_per_epoch = len(data) // FLAGS.batch_size
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
model = AveragePooling(feature_size=FLAGS.feature_size, max_video_length=FLAGS.max_video_length,
num_classes=FLAGS.num_classes, cell_size=FLAGS.size, use_lstm=FLAGS.use_lstm,
learning_rate=FLAGS.learning_rate, learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
min_learning_rate=FLAGS.min_learning_rate, training_steps_per_epoch=training_steps_per_epoch,
max_gradient_norm=FLAGS.max_gradient_norm, keep_prob=FLAGS.keep_prob, is_training=True)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(sess, ckpt.model_checkpoint_path)
step = int(ckpt.model_checkpoint_path.split('-')[1])
else:
sess.run(tf.global_variables_initializer())
step = 0
for epoch in range(1, FLAGS.num_epochs+1):
random.shuffle(data)
batch_feature = []
batch_feature.append(data)
feed_dict = {model.frame_feature_ph: batch_feature, model.video_length_ph:batch_length, model.video_label_ph:batch_label}
loss, _ = sess.run([model.loss, model.train_op], feed_dict=feed_dict)
step += 1
if step % FLAGS.steps_per_checkpoint == 0:
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, "ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
print ("%5d: %3d, %.3f"%(step, epoch, loss))
sys.stdout.flush()
Error:
Traceback (most recent call last):
File "/root/Documents/EmotionRecognition/masstrain.py", line 114, in <module>
loss, _ = sess.run([model.loss, model.train_op], feed_dict=feed_dict)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 895, in run
run_metadata_ptr)
File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 1093, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/usr/local/lib/python3.5/dist-packages/numpy/core/numeric.py", line 482, in asarray
return array(a, dtype, copy=False, order=order)
MemoryError
Process finished with exit code 1
Anyone have a clue about this ?