I am a begginer in machine learning and i am trying to train a ViT model to categorical classes with my own dataset. I am following this code: https://keras.io/examples/vision/image_classification_with_vision_transformer/
It's working fine when I use the accuracy metric, but I want to use recall and precision as well, but I keep getting this error when I add them:
/usr/local/lib/python3.8/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
InvalidArgumentError: Graph execution error:
Detected at node 'assert_greater_equal/Assert/AssertGuard/Assert' defined at (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.8/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.8/dist-packages/traitlets/config/application.py", line 992, in launch_instance
app.start()
File "/usr/local/lib/python3.8/dist-packages/ipykernel/kernelapp.py", line 612, in start
self.io_loop.start()
File "/usr/local/lib/python3.8/dist-packages/tornado/platform/asyncio.py", line 149, in start
self.asyncio_loop.run_forever()
...
(1) INVALID_ARGUMENT: assertion failed: [predictions must be >= 0] [Condition x >= y did not hold element-wise:] [x (model_3/dense_79/BiasAdd:0) = ] [[251.636795 -233.491394 322.750397...]...] [y (Cast_4/x:0) = ] [0]
[[{{node assert_greater_equal/Assert/AssertGuard/Assert}}]]
0 successful operations.
0 derived errors ignored. [Op:__inference_train_function_348766]
I also encoded my y_train to one_hot so I could use the Categorical_Crossentropy loss instead of SparseCategorical_Crossentropy loss
Here is the shape of the arrays now:
x_train shape: (6179, 336, 336, 3) - y_train shape: (6179, 9) x_test shape: (2060, 336, 336, 3) - y_test shape: (2060, 9)
I just changed a few things on the compile from the original code:
optimizer = tfa.optimizers.AdamW(
learning_rate=learning_rate, weight_decay=weight_decay
)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
metrics = [ 'accuracy', tf.keras.metrics.Recall(name = 'recall')]
)
checkpoint_filepath = "/content/drive/MyDrive/DATASET/checkpoints/checkpoint.h5"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_accuracy",
save_best_only=True,
save_weights_only=True,
)
r = model.fit(
x=x_train,
y=y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=0.25,
)
model.load_weights(checkpoint_filepath)
_, accuracy = model.evaluate(x_test, y_test)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
Im using colab
Related
''''
#x_train format = [samples, timesteps, features]
#y_train format = [samples, timesteps]
num_timesteps = len(x_train[0])
num_features = len(x_train[0, 0])
num_classes = 3
model = Sequential()
model.add(Dense(50, input_shape = (num_timesteps, num_features)))
model.add(Dense(50))
model.add(Dense(50))
model.add(LSTM(300, return_sequences=True))
model.add(Dense(50))
model.add(Dense(50))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'],
sample_weight_mode='temporal')
model.fit(x_train, y_train, epochs=1, batch_size=500, class_weight = class_weights)
''''
I get an error that seems to indicate something is wrong with indices. The code works if I remove class_weights.
Does anyone know what I am doing wrong?
Error:
Traceback (most recent call last):
File "C:/Users/Documents/Neural Network for probabilities/keras_test_real_data_non_stateful_return_sequences_with_class_weights.py", line 129, in
model.fit(x_train, y_train, epochs=1, batch_size=500, class_weight = class_weights)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 66, in _method_wrapper
return method(self, *args, **kwargs)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 848, in fit
tmp_logs = train_function(iterator)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 580, in call
result = self._call(*args, **kwds)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\def_function.py", line 644, in _call
return self._stateless_fn(*args, **kwds)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 2420, in call
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1665, in _filtered_call
self.captured_inputs)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 1746, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\function.py", line 598, in call
ctx=ctx)
File "C:\Users\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\eager\execute.py", line 60, in quick_execute
inputs, attrs, num_outputs)
tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[1] = 4 is not in [0, 3)
[[{{node GatherV2}}]]
[[IteratorGetNext]] [Op:__inference_train_function_4037]
Function call stack:
train_function
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = TFBertModel.from_pretrained("bert-base-uncased")
.
.
.
train = df2[:5000]
test = df2[5000:]
def convert_data_to_examples(train, test, text, Airline_Cat):
train_InputExamples = train.apply(lambda x: InputExample(guid=None,
text_a = x[text],
label = x[Airline_Cat]), axis = 1)
validation_InputExamples = test.apply(lambda x: InputExample(guid=None,
text_a = x[text],
label = x[Airline_Cat]), axis = 1)
return train_InputExamples, validation_InputExamples
train_InputExamples, validation_InputExamples = convert_data_to_examples(train, test, 'text', 'Airline_Cat')
def convert_examples_to_tf_dataset(examples, tokenizer, max_length=128):
features = []
for e in tqdm(examples):
input_dict = tokenizer.encode_plus(
e.text_a,
add_special_tokens=True,
max_length=max_length,
return_token_type_ids=True,
return_attention_mask=True,
pad_to_max_length=True,
truncation=True
)
input_ids, token_type_ids, attention_mask = (input_dict["input_ids"],input_dict["token_type_ids"], input_dict['attention_mask'])
features.append(InputFeatures( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=e.label) )
def gen():
for f in features:
yield (
{
"input_ids": f.input_ids,
"attention_mask": f.attention_mask,
"token_type_ids": f.token_type_ids,
},
f.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
DATA_COLUMN = 'text'
LABEL_COLUMN = 'Airline_Cat'
train_data = convert_examples_to_tf_dataset(list(train_InputExamples), tokenizer)
train_data = train_data.shuffle(100).batch(32).repeat(2)
validation_data = convert_examples_to_tf_dataset(list (validation_InputExamples), tokenizer)
validation_data = validation_data.batch(32)
This is my code. I've used bert model for sentiment analysis and the next cell keeps throwing this error,
code:
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.CategoricalAccuracy('accuracy')
model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
model.fit(train_data, epochs=2, validation_data=validation_data)
output:
Epoch 1/2
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Input In [41], in <cell line: 6>()
3 metric = tf.keras.metrics.CategoricalAccuracy('accuracy')
4 model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
----> 6 model.fit(train_data, epochs=2, validation_data=validation_data)
File ~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File C:\Users\JAYALA~1\AppData\Local\Temp\__autograph_generated_fileb_1mantj.py:15, in outer_factory.<locals>.inner_factory.<locals>.tf__train_function(iterator)
13 try:
14 do_return = True
---> 15 retval_ = ag__.converted_call(ag__.ld(step_function), (ag__.ld(self), ag__.ld(iterator)), None, fscope)
16 except:
17 do_return = False
File ~\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py:1437, in TFPreTrainedModel.train_step(self, data)
1434 y_pred = y_pred[0]
1436 if loss is None:
-> 1437 loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
1439 # Run backwards pass.
1440 self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
ValueError: in user code:
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\engine\training.py", line 1160, in train_function *
return step_function(self, iterator)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\engine\training.py", line 1146, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\engine\training.py", line 1135, in run_step **
outputs = model.train_step(data)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\transformers\modeling_tf_utils.py", line 1437, in train_step
loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\losses.py", line 152, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\losses.py", line 272, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\losses.py", line 1990, in categorical_crossentropy
return backend.categorical_crossentropy(
File "C:\Users\Jayalakshmi\anaconda3\lib\site-packages\keras\backend.py", line 5529, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 1) and (None, None, 768) are incompatible
The airline sentiment column has three values positive, negative and neutral which I changed to numerics as 0,1 and 2. I tried many things like changing the loss type changing the sentiment values to only positive and negative by dropping neutral, etc. But it wouldn't work. And I also tried reshaping but couldn't find a way to reshape a bert model. I'm a very beginner to the domain and would like to get some help. Thanks in advance.
I am using keras.utils.text_dataset_from_directory (see code). when I reach model.fit I get a warning about input dimentions and a error (that I think has something to do with the output).
code:
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Rescaling
MAX_VAL = 0.5
num_classes = 2
train_ds = keras.utils.text_dataset_from_directory(
directory='.../training_data/',
labels='inferred',
label_mode='categorical',
class_names=None,
batch_size=32,
max_length=None,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False)
validation_ds = keras.utils.text_dataset_from_directory(
directory='.../validation_data/',
labels='inferred',
label_mode='categorical',
class_names=None,
batch_size=32,
max_length=None,
shuffle=True,
seed=None,
validation_split=None,
subset=None,
follow_links=False)
inputs = keras.Input(shape=(None,))
x = layers.Reshape((-1, 1))(inputs)
x = Rescaling(scale=1.0 / MAX_VAL)(x)
x = layers.Dense(32, activation="softmax")(x)
outputs = layers.Dense(num_classes, activation="softmax")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
keras.utils.plot_model(model, "my_first_model.png")
model.compile(optimizer='Adam', loss='categorical_crossentropy')
history = model.fit(train_ds, epochs=10, validation_data=validation_ds)
output:
Epoch 1/10
WARNING:tensorflow:Model was constructed with shape (None, None) for input KerasTensor(type_spec=TensorSpec(shape=(None, None), dtype=tf.float32, name='input_1'), name='input_1', description="created by layer 'input_1'"), but it was called on an input with incompatible shape (None,).
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-3656f32a72a9> in <module>
1 model.compile(optimizer='Adam', loss='categorical_crossentropy')
----> 2 history = model.fit(train_ds, epochs=10, validation_data=validation_ds)
~\AppData\Roaming\Python\Python38\site-packages\keras\utils\traceback_utils.py in error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\framework\func_graph.py in autograph_handler(*args, **kwargs)
1145 except Exception as e: # pylint:disable=broad-except
1146 if hasattr(e, "ag_error_metadata"):
-> 1147 raise e.ag_error_metadata.to_exception(e)
1148 else:
1149 raise
ValueError: in user code:
File "...\Python\Python38\site-packages\keras\engine\training.py", line 1021, in train_function *
return step_function(self, iterator)
File "...\Python\Python38\site-packages\keras\engine\training.py", line 1010, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "...\Python\Python38\site-packages\keras\engine\training.py", line 1000, in run_step **
outputs = model.train_step(data)
File "...\Python\Python38\site-packages\keras\engine\training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "...\Python\Python38\site-packages\keras\engine\training.py", line 918, in compute_loss
return self.compiled_loss(
File "...\Python\Python38\site-packages\keras\engine\compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "...\Python\Python38\site-packages\keras\losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "...\Python\Python38\site-packages\keras\losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "...\Python\Python38\site-packages\keras\losses.py", line 1789, in categorical_crossentropy
return backend.categorical_crossentropy(
File "...\Python\Python38\site-packages\keras\backend.py", line 5083, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (None, 2) and (None, 1, 2) are incompatible
The inputs to the model are 1d vectors (of length ~27000) saved in .txt files:
0.101471743,0.099917953,0.103334975,0.099364908,0.099035715,...,0.097369999,0.099680934
readin dataset frome dir formated as:
/training_data/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
/validation_data/
...class_a/
......a_text_1.txt
......a_text_2.txt
...class_b/
......b_text_1.txt
......b_text_2.txt
How can I get the dimensions right?
EDIT:
I saved the data as a .jpg file and loaded it using image_dataset_from_directory. this fixed the issue. but I would still like to understand why I cant get the data from .txt files properly. (I lose a lot of data transferring from float data to 8bit int in .jpg. and using image_dataset_from_directory requires all img size to be the same length, I wand my data to be different sizes).
I do have a question regarding the CNN in Keras if you would like to help me I would really appreciate this.
Disclaimer: I'm a noob in CNN and Keras, I'm just learning them right now.
My Data:
2 Classes (dogs and cats)
Traing: 30 pics each category
Test: 14 pics each category
Valid: 30 pics each category
My code:
data_path = Path("../data")
train_path = data_path / "train"
test_path = data_path / "test"
valid_path = data_path / "valid"
train_batch = ImageDataGenerator().flow_from_directory(directory=train_path,
target_size=(200, 200),
classes=animals,
batch_size=10)
valid_batch = ImageDataGenerator().flow_from_directory(directory=valid_path,
target_size=(200, 200),
classes=animals,
batch_size=10)
test_path = ImageDataGenerator().flow_from_directory(directory=test_path,
target_size=(200, 200),
classes=animals,
batch_size=4)
imgs, labels = next(train_batch)
model = Sequential(
[Conv2D(32, (3, 3), activation="relu", input_shape=(200, 200, 3)), Flatten(),
Dense(len(animals), activation='softmax')])
model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy'])
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
Here it's my error message:
I've replaced the paths with ""
Traceback (most recent call last):
File "", line 191, in <module>
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
File "y", line 91, in wrapper
return func(*args, **kwargs)
File "", line 1732, in fit_generator
initial_epoch=initial_epoch)
File "", line 185, in fit_generator
generator_output = next(output_generator)
File "", line 742, in get
six.reraise(*sys.exc_info())
File "", line 693, in reraise
raise value
File "", line 711, in get
inputs = future.get(timeout=30)
File "", line 657, in get
raise self._value
File "", line 121, in worker
result = (True, func(*args, **kwds))
File "", line 650, in next_sample
return six.next(_SHARED_SEQUENCES[uid])
TypeError: 'PosixPath' object is not an iterator
Could anyone explain to me what I'm doing wrong please? Also if this is an off-topic question just let me know where I can ask it.
The issue you are having is that you are NOT passing the generator for the training, but the path for the files (you are using train_path instead of train_batch.
Whereas you need to pass a generator for object when using .fit_generator():
model.fit_generator(train_batch, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
This line isn't necessary
imgs, labels = next(train_batch)
from the docs fit_generator first argument is a generator object no a string as you have supplied. Like this
model.fit_generator(train_path, steps_per_epoch=4, validation_data=valid_batch, validation_steps=3, epochs=5, verbose=2)
I wanted to test my network on a toy data set - a few examples with two imbalanced classes (0 and 1). Unfortunately, there are problems when using the class_weight parameter to improve the balance. It looks like I forget something.
import tensorflow as tf
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.applications.xception import Xception, preprocess_input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
# parsing images from TFRecords
def parse_function(proto):
example = {'image_raw': tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.int64)}
parsed_example = tf.parse_single_example(proto, example)
image = tf.decode_raw(parsed_example['image_raw'], tf.uint8)
image = tf.reshape(image, [HEIGHT, WIDTH, DEPTH])
image = preprocess_input(tf.cast(image, tf.float32))
return image, parsed_example['label']
def get_data(filepath, schuffle_size=32, batch_size=8, prefetch=1, repeat=None, num_parallel_calls=1):
dataset = tf.data.TFRecordDataset(filepath)
if schuffle_size != 0:
dataset = dataset.shuffle(schuffle_size)
dataset = dataset.repeat(repeat)
dataset = dataset.map(parse_function, num_parallel_calls=num_parallel_calls)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(prefetch)
iterator = dataset.make_one_shot_iterator()
return iterator
def build_model(number_of_neurons_in_dense_layer, dropout, learning_rate):
base_model = Xception(weights='imagenet', include_top=False, pooling='avg', input_shape=(HEIGHT, WIDTH, 3))
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = Dropout(dropout)(x)
x = Dense(number_of_neurons_in_dense_layer, activation='relu')(x)
x = Dropout(dropout)(x)
logits = Dense(NUMBER_OF_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=logits)
model.compile(optimizer=Adam(lr=learning_rate), loss='sparse_categorical_crossentropy', metrics=['categorical_accuracy'])
return model
global NUMBER_OF_CLASSES, HEIGHT, WIDTH, DEPTH
NUMBER_OF_CLASSES = 2
...
CLASS_WEIGHTS = {
0: 1,
1: 7
}
model = build_model(64, 0.4, 0.001)
train = get_data(..., 8, 2, num_parallel_calls=8)
val = get_data(...., 0, 4, num_parallel_calls=8)
model.fit(train, validation_data=val, epochs=3,steps_per_epoch=8//2,
validation_steps=8//4, shuffle=False,
class_weight=CLASS_WEIGHTS)
I am getting the following errors
Original exception was:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 51, in _wrapfunc
return getattr(obj, method)(*args, **kwds)
AttributeError: 'Tensor' object has no attribute 'reshape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/usr/model.py", line 147, in main
class_weight=CLASS_WEIGHTS)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 776, in fit
shuffle=shuffle)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2432, in _standardize_user_data
feed_sample_weight_modes)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 2431, in <listcomp>
for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training_utils.py", line 758, in standardize_weights
y_classes = np.reshape(y, y.shape[0])
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 279, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 61, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "/usr/local/lib/python3.6/dist-packages/numpy/core/fromnumeric.py", line 41, in _wrapit
result = getattr(asarray(obj), method)(*args, **kwds)
TypeError: __index__ returned non-int (type NoneType)
Without the class_weight parameter, the fit function works correctly.
Just for a future reference:
I ran into this error to and was able to resolve it by passing an array instead of a dictionary.
e.g.
CLASS_WEIGHTS = np.array([1,7])
instead of:
CLASS_WEIGHTS = {
0: 1,
1: 7
}