I am getting WARNING:tensorflow:From in python - python

I am learning ML from AI Adventures but I have a problem with that code
WARNING:tensorflow:From Ai1.py:12: load_csv_with_header (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.data instead.
Traceback (most recent call last):
File "Ai1.py", line 12, in <module>
target_dtype=np.int)
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\util\deprecation.py", line 324, in new_func
return func(*args, **kwargs)
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\contrib\learn\python\learn\datasets\base.py", line 53, in load_csv_with_header
header = next(data_file)
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 220, in __next__
return self.next()
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 214, in next
retval = self.readline()
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 184, in readline
return self._prepare_value(self._read_buf.ReadLineAsString())
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\lib\io\file_io.py", line 100, in _prepare_value
return compat.as_str_any(val)
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\util\compat.py", line 107, in as_str_any
return as_str(value)
File "C:\Users\Ardit\AppData\Local\Programs\Python\Python36\lib\site-packages\tensorflow\python\util\compat.py", line 80, in as_text
return bytes_or_text.decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 8: invalid start byte
I am using python 3.6
Can anyone help me

Sorry, here is the code
import tensorflow as tf
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
# Data files
IRIS_TRAINING = "iris_training.csv"
IRIS_TEST = "iris_test.csv"
# Load datasets.
training_set = base.load_csv_with_header(filename=IRIS_TRAINING,
features_dtype=np.float32,
target_dtype=np.int)
test_set = base.load_csv_with_header(filename=IRIS_TEST,
features_dtype=np.float32,
target_dtype=np.int)
print(training_set.data)
print(training_set.target)

Related

anaconda navigator stuck on loading applications

(base) C:\Windows\System32>anaconda-navigator
2022-10-25 21:01:52,124 - ERROR init.global_exception_logger:19
'utf-8' codec can't decode byte 0xbb in position 0: invalid start byte
Traceback (most recent call last):
File "D:\anaconda3\lib\site-packages\anaconda_navigator\widgets\main_window_init_.py", line 497, in setup
self.post_setup(conda_data=output)
File "D:\anaconda3\lib\site-packages\anaconda_navigator\widgets\main_window_init_.py", line 525, in post_setup
self.tab_home.setup(conda_data)
File "D:\anaconda3\lib\site-packages\anaconda_navigator\widgets\tabs\home.py", line 253, in setup
self.update_applications()
File "D:\anaconda3\lib\site-packages\anaconda_navigator\widgets\tabs\home.py", line 292, in update_applications
self.api.process_apps(self._applications, prefix=self.current_prefix).values(),
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\anaconda_api.py", line 561, in process_apps
collected_applications: external_apps.ApplicationCollection = external_apps.get_applications(
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps_init.py", line 49, in get_applications
apps: typing.Sequence[typing.Union[BaseApp, AppPatch]] = config_utils.load_configuration(context=context)
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\config_utils.py", line 217, in load_configuration
return apply_configuration(
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\config_utils.py", line 198, in apply_configuration
addition: typing.Union[None, base.BaseApp, base.AppPatch] = base.BaseApp.parse_configuration(
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\base.py", line 233, in parse_configuration
return target_cls._parse_configuration( # pylint: disable=protected-access
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\base.py", line 458, in _parse_configuration
result: BaseInstallableApp = BaseInstallableApp(
File "", line 17, in init
self.attrs_post_init()
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\base.py", line 378, in attrs_post_init
for location in self._detector(context=context):
File "D:\anaconda3\lib\site-packages\anaconda_navigator\api\external_apps\bundle\vscode_utils.py", line 58, in call
stdout, _, _ = conda_launch_utils.run_process([application.executable, '--version'])
File "D:\anaconda3\lib\site-packages\anaconda_navigator\utils\conda\launch.py", line 45, in run_process
stdout = ansi_utlils.escape_ansi(raw_stdout.decode())
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xbb in position 0: invalid start byte
can anyone help me to solve this issue?

(Casting) errors using extract_(relevant_)features from tsfresh

Trying out Python package tsfresh I run into issues in the first steps. Given a series how to (automatically) make features for it? This snippet produces different errors based on which part I try.
import tsfresh
import pandas as pd
import numpy as np
#tfX, tfy = tsfresh.utilities.dataframe_functions.make_forecasting_frame(pd.Series(np.random.randn(1000)/50), kind='float64', max_timeshift=50, rolling_direction=1)
#rf = tsfresh.extract_relevant_features(tfX, y=tfy, n_jobs=1, column_id='id')
tfX, tfy = tsfresh.utilities.dataframe_functions.make_forecasting_frame(pd.Series(np.random.randn(1000)/50), kind=1, max_timeshift=50, rolling_direction=1)
rf = tsfresh.extract_relevant_features(tfX, y=tfy, n_jobs=1, column_id='id')
The errors are in the first case
""" Traceback (most recent call last): File "C:\Users\user\Anaconda3\envs\env1\lib\multiprocessing\pool.py", line
119, in worker
result = (True, func(*args, **kwds)) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\utilities\distribution.py",
line 38, in _function_with_partly_reduce
results = list(itertools.chain.from_iterable(results)) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\utilities\distribution.py",
line 37, in
results = (map_function(chunk, **kwargs) for chunk in chunk_list) File
"C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\extraction.py",
line 358, in _do_extraction_on_chunk
return list(_f()) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\extraction.py",
line 350, in _f
result = [("", func(data))] File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\feature_calculators.py",
line 193, in variance_larger_than_standard_deviation
y = np.var(x) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\numpy\core\fromnumeric.py",
line 3157, in var
**kwargs) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\numpy\core_methods.py",
line 110, in _var
arrmean, rcount, out=arrmean, casting='unsafe', subok=False) TypeError: unsupported operand type(s) for /: 'str' and 'int' """
and in the second case
""" Traceback (most recent call last): File
"C:\Users\user\Anaconda3\envs\env1\lib\multiprocessing\pool.py", line
119, in worker
result = (True, func(*args, **kwds)) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\utilities\distribution.py",
line 38, in _function_with_partly_reduce
results = list(itertools.chain.from_iterable(results)) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\utilities\distribution.py",
line 37, in
results = (map_function(chunk, **kwargs) for chunk in chunk_list) File
"C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\extraction.py",
line 358, in _do_extraction_on_chunk
return list(_f()) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\extraction.py",
line 345, in _f
result = func(data, param=parameter_list) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\feature_calculators.py",
line 1752, in friedrich_coefficients
coeff = _estimate_friedrich_coefficients(x, m, r) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\tsfresh\feature_extraction\feature_calculators.py",
line 145, in _estimate_friedrich_coefficients
result.dropna(inplace=True) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\frame.py",
line 4598, in dropna
result = self.loc(axis=axis)[mask] File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexing.py",
line 1500, in getitem
return self._getitem_axis(maybe_callable, axis=axis) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexing.py",
line 1859, in _getitem_axis
if is_iterator(key): File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\dtypes\inference.py",
line 157, in is_iterator
return hasattr(obj, 'next') File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\generic.py",
line 5065, in getattr
if self._info_axis._can_hold_identifiers_and_holds_name(name): File
"C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\base.py",
line 3984, in _can_hold_identifiers_and_holds_name
return name in self File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\category.py",
line 327, in contains
return contains(self, key, container=self._engine) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\arrays\categorical.py",
line 188, in contains
loc = cat.categories.get_loc(key) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\interval.py",
line 770, in get_loc
start, stop = self._find_non_overlapping_monotonic_bounds(key) File
"C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\interval.py",
line 717, in _find_non_overlapping_monotonic_bounds
start = self._searchsorted_monotonic(key, 'left') File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\interval.py",
line 681, in _searchsorted_monotonic
return sub_idx._searchsorted_monotonic(label, side) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\indexes\base.py",
line 4755, in _searchsorted_monotonic
return self.searchsorted(label, side=side) File "C:\Users\user\Anaconda3\envs\env1\lib\site-packages\pandas\core\base.py",
line 1501, in searchsorted
return self._values.searchsorted(value, side=side, sorter=sorter) TypeError: Cannot cast array data from dtype('float64') to
dtype('
np.version, tsfresh.version are ('1.15.4', 'unknown'). I installed tsfresh using conda, probably from conda-forge. I am on Windows 10. Using another kernel with np.version, tsfresh.version ('1.15.4', '0.11.2') lead to the same results.
Trying the first couple of cells from timeseries_forecasting_basic_example.ipynb yields the casting error as well.
Fixed it. Either the version on conda(-forge) or one of the dependencies was the issue. So using "conda uninstall tsfresh", "conda install patsy future six tqdm" and "pip install tsfresh" combined did the trick.

TensorBoard Metadata UnicodeDecodeError

I have a TensorBoard embedding that is created and saved like this from a gensim Doc2Vec model:
embedding = model.docvecs.vectors_docs
tf.reset_default_graph()
sess = tf.InteractiveSession()
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
summary_writer = tf.summary.FileWriter('log', sess.graph)
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = 'embedding:0'
embedding_conf.metadata_path = os.path.join('log','metadata.tsv')
projector.visualize_embeddings(summary_writer, config)
saver = tf.train.Saver([X])
saver.save(sess, os.path.join('log', 'model.ckpt'), 1)
Results in this error:
TensorBoard 1.5.1 at http://COMPUTER_NAME:6006 (Press CTRL+C to quit)
E0222 17:15:20.231085 Thread-1 _internal.py:88] Error on request:
Traceback (most recent call last):
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\werkzeug
\serving.py", line 270, in run_wsgi
execute(self.server.app)
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\werkzeug
\serving.py", line 258, in execute
application_iter = app(environ, start_response)
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorbo
ard\backend\application.py", line 271, in __call__
return self.data_applications[clean_path](environ, start_response)
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\werkzeug
\wrappers.py", line 308, in application
resp = f(*args[:-2] + (request,))
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorbo
ard\plugins\projector\projector_plugin.py", line 514, in _serve_metadata
for line in f:
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\lib\io\file_io.py", line 214, in __next__
return self.next()
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\lib\io\file_io.py", line 208, in next
retval = self.readline()
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\lib\io\file_io.py", line 178, in readline
return self._prepare_value(self._read_buf.ReadLineAsString())
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\lib\io\file_io.py", line 94, in _prepare_value
return compat.as_str_any(val)
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\util\compat.py", line 106, in as_str_any
return as_str(value)
File "c:\users\user\_installed\anaconda\envs\docsim\lib\site-packages\tensorfl
ow\python\util\compat.py", line 84, in as_text
return bytes_or_text.decode(encoding)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x92 in position 84: invalid
start byte
When I remove the embedding_conf.metadata_path = os.path.join('log','metadata.tsv') line, no error occurs. In addition, I can then select the very same metadata file that TensorFlow was attempting to bind to the embedding when the error occurred.
Why is this error occurring?
Is it possible that the data inside your metadata.tsv file isn't UTF-8 encoded?

utf-8 encoding error in seq2seq model

Hi I'm working on Language translation using Keras. I have a text file with English text and a file with Hindi text.
I'm facing "UnicodeDecodeError:". And I believe maybe its because it is unable to convert non-unicode to unicode.
Please let me know how to go about it. The github link is below
https://github.com/shashankg7/Seq2Seq/tree/master/seq2seq
Code Snippet:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import codecs
import pdb
import numpy as np
#from utils import preprocess_text, text2seq_generator
def preprocess_text(file_path_src, file_path_tar, max_feats):
f_src = open(file_path_src)
f_tar = open(file_path_tar)
vocab = defaultdict(int)
freq_src = defaultdict(int)
freq_tar = defaultdict(int)
sents_src = [line.rstrip() for line in f_src.readlines()]
sents_tar = [line.rstrip() for line in f_tar.readlines()]
def preprocess(self):
# Preprocessing source and target text sequence files
self.vocab_src, self.vocab_tar, self.sents_src, self.sents_tar =
preprocess_text(self.path_src, self.path_tar, self.max_feat)
if __name__ == "__main__":
pre = preprocess('C:\\Users\\anagha\\Desktop\\Language-Translation\\Seq2Seq-master\\Seq2Seq-master\\seq2seq\\training.hi-en.hi', 'C:\\Users\\anagha\\Desktop\\Language-Translation\\Seq2Seq-master\\Seq2Seq-master\\seq2seq\\training.hi-en.en', 5500, 15)
pre.preprocess()
for e in range(1):
print("epoch no %d"%e)
for X,Y in pre.gen_batch():
print(X)
Error :
Using TensorFlow backend.
Traceback (most recent call last):
File "C:\Users\anagha\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2898, in run_code
self.showtraceback()
File "C:\Users\anagha\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 1807, in showtraceback
self.showsyntaxerror(filename)
File "C:\Users\anagha\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 1864, in showsyntaxerror
stb = self.SyntaxTB.structured_traceback(etype, value, [])
File "C:\Users\anagha\Anaconda3\lib\site-packages\IPython\core\ultratb.py", line 1441, in structured_traceback
newtext = ulinecache.getline(value.filename, value.lineno)
File "C:\Users\anagha\Anaconda3\lib\linecache.py", line 16, in getline
lines = getlines(filename, module_globals)
File "C:\Users\anagha\Anaconda3\lib\linecache.py", line 47, in getlines
return updatecache(filename, module_globals)
File "C:\Users\anagha\Anaconda3\lib\linecache.py", line 137, in updatecache
lines = fp.readlines()
File "C:\Users\anagha\Anaconda3\lib\codecs.py", line 321, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa1 in position 7588: invalid start byte

when i use the to_ categorical ,but have a MemoryError

when i use the lstm to comlete the multi-class label,i use the y_train as my label input.
the code below:
y_train = yuantrain['LOCF']
labels = to_categorical(np.array(y_train),286)
the error code hint:
Traceback (most recent call last):
File "<ipython-input-55-4ae3f21f520f>", line 1, in <module>
labels = to_categorical(np.array(y_train))
File "C:\ProgramData\Anaconda2\lib\site-packages\keras\utils\np_utils.py",
line 24, in to_categorical
categorical = np.zeros((n, num_classes))
MemoryError
below is the y_train(5) :
In [65]:
y_train[0:5]
Out[65]:
0 620245
1 282
2 500004
3 620193
4 60119
Name: LOCF, dtype: int64
THAT I HAVE A TRY:
# biao qian chu li
# 处理标签为二进制,以及思考二进制的解码
labels = yuantrain["LOCF"].drop_duplicates()
#labels is the y_train's unique label
num_labels = len(labels) # (all of the unique labels)
one_hot = np.zeros((num_labels, num_labels), int)
np.fill_diagonal(one_hot, 1)
label_dict = dict(zip(labels, one_hot))
y_train = yuantrain['LOCF']
y_train = y_train.apply(lambda y: label_dict[y])
BUT I feel it is not easy for me, that i have to decode in only my way.
when i use my one-hot way,the keras also have error like below:
Train...
ERROR (theano.gof.opt): SeqOptimizer apply <theano.tensor.opt.FusionOptimizer object at 0x000000000D87B0F0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\opt.py", line 87, in optimize
ret = self.apply(fgraph, *args, **kwargs)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\tensor\opt.py", line 7289, in apply
new_outputs = self.optimizer(node)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\tensor\opt.py", line 7122, in local_fuse
tv = gof.op.get_test_value(ii)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\op.py", line 987, in get_test_value
return PureOp._get_test_value(v_var)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\op.py", line 580, in _get_test_value
detailed_err_msg = utils.get_variable_trace_string(v)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\utils.py", line 134, in get_variable_trace_string
return sio.getvalue()
File "C:\ProgramData\Anaconda2\lib\StringIO.py", line 271, in getvalue
self.buf += ''.join(self.buflist)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe8 in position 27: ordinal not in range(128)
ERROR (theano.gof.opt): SeqOptimizer apply <theano.tensor.opt.FusionOptimizer object at 0x000000000D87B0F0>
ERROR (theano.gof.opt): Traceback:
ERROR (theano.gof.opt): Traceback (most recent call last):
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\opt.py", line 235, in apply
sub_prof = optimizer.optimize(fgraph)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\opt.py", line 87, in optimize
ret = self.apply(fgraph, *args, **kwargs)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\tensor\opt.py", line 7289, in apply
new_outputs = self.optimizer(node)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\tensor\opt.py", line 7122, in local_fuse
tv = gof.op.get_test_value(ii)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\op.py", line 987, in get_test_value
return PureOp._get_test_value(v_var)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\op.py", line 580, in _get_test_value
detailed_err_msg = utils.get_variable_trace_string(v)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\utils.py", line 134, in get_variable_trace_string
return sio.getvalue()
File "C:\ProgramData\Anaconda2\lib\StringIO.py", line 271, in getvalue
self.buf += ''.join(self.buflist)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe8 in position 27: ordinal not in range(128)
Train on 100000 samples, validate on 77963 samples
Epoch 1/5
Traceback (most recent call last):
File "<ipython-input-67-5ce4b6739b03>", line 1, in <module>
runfile('E:/XIAMENproject/Prediction_Guo/count.py', wdir='E:/XIAMENproject/Prediction_Guo')
File "C:\ProgramData\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 866, in runfile
execfile(filename, namespace)
File "C:\ProgramData\Anaconda2\lib\site-packages\spyder\utils\site\sitecustomize.py", line 87, in execfile
exec(compile(scripttext, filename, 'exec'), glob, loc)
File "E:/XIAMENproject/Prediction_Guo/count.py", line 150, in <module>
validation_data=(x_val, y_val))
File "C:\ProgramData\Anaconda2\lib\site-packages\keras\models.py", line 870, in fit
initial_epoch=initial_epoch)
File "C:\ProgramData\Anaconda2\lib\site-packages\keras\engine\training.py", line 1507, in fit
initial_epoch=initial_epoch)
File "C:\ProgramData\Anaconda2\lib\site-packages\keras\engine\training.py", line 1156, in _fit_loop
outs = f(ins_batch)
File "C:\ProgramData\Anaconda2\lib\site-packages\keras\backend\theano_backend.py", line 1196, in __call__
return self.function(*inputs)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\compile\function_module.py", line 805, in __call__
self.maker.inputs[i].variable)
File "C:\ProgramData\Anaconda2\lib\site-packages\theano\gof\utils.py", line 134, in get_variable_trace_string
return sio.getvalue()
File "C:\ProgramData\Anaconda2\lib\StringIO.py", line 271, in getvalue
self.buf += ''.join(self.buflist)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe8 in position 27: ordinal not in range(128)
I reset the tag ID to the continuous ID,and then use the to_categorical.

Categories