I wrote a program using keras that detects real texts from fake (I used 5000 training data and 10,000 test data), I used Transformer and 'distilbert-base-uncased' model for detection. Now I decide to hyperparameters tuning using the grid search , which I encountered the following error:
TuneError Traceback (most recent call last)
<ipython-input-15-c4a44a2180d8> in <module>()
156 tune_iris,
157 verbose=1,
--> 158 config=hyperparameter_space,
159 )
160
/usr/local/lib/python3.6/dist-packages/ray/tune/tune.py in run(run_or_experiment, name, stop, config, resources_per_trial, num_samples, local_dir, upload_dir, trial_name_creator, loggers, sync_to_cloud, sync_to_driver, checkpoint_freq, checkpoint_at_end, sync_on_checkpoint, keep_checkpoints_num, checkpoint_score_attr, global_checkpoint_period, export_formats, max_failures, fail_fast, restore, search_alg, scheduler, with_server, server_port, verbose, progress_reporter, resume, queue_trials, reuse_actors, trial_executor, raise_on_failed_trial, return_trials, ray_auto_init)
354 if incomplete_trials:
355 if raise_on_failed_trial:
--> 356 raise TuneError("Trials did not complete", incomplete_trials)
357 else:
358 logger.error("Trials did not complete: %s", incomplete_trials)
TuneError: ('Trials did not complete', [tune_iris_83131_00000, tune_iris_83131_00001, tune_iris_83131_00002, tune_iris_83131_00003, tune_iris_83131_00004, tune_iris_83131_00005, tune_iris_83131_00006, tune_iris_83131_00007, tune_iris_83131_00008, tune_iris_83131_00009, tune_iris_83131_00010, tune_iris_83131_00011, tune_iris_83131_00012, tune_iris_83131_00013, tune_iris_83131_00014, tune_iris_83131_00015, tune_iris_83131_00016, tune_iris_83131_00017])
The program I wrote is as follows:
data = pd.concat([train_webtext,train_gen,valid_webtext,valid_gen])
sentences=data['text']
labels=labels1+labels2
len(sentences),len(labels)
DistilBertTokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-cased",do_lower_case=False)
input_ids=[]
attention_masks=[]
for sent in sentences:
bert_inp=DistilBertTokenizer.encode_plus(sent,add_special_tokens = True,max_length =64,pad_to_max_length = True,return_attention_mask = True)
input_ids.append(bert_inp['input_ids'])
attention_masks.append(bert_inp['attention_mask'])
input_ids=np.asarray(input_ids)
attention_masks=np.array(attention_masks)
labels=np.array(labels)
class TuneReporterCallback(keras.callbacks.Callback):
"""Tune Callback for Keras.
The callback is invoked every epoch.
"""
def __init__(self, logs={}):
self.iteration = 0
super(TuneReporterCallback, self).__init__()
def on_epoch_end(self, batch, logs={}):
self.iteration += 1
tune.report(keras_info=logs, mean_accuracy=logs.get("accuracy"), mean_loss=logs.get("loss"))
def tune_gpt(config):
train_inp,val_inp,train_label,val_label,train_mask,val_mask=train_test_split(input_ids,labels,attention_masks,test_size=0.6666666666666666)
DistilBert_model = TFDistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased',num_labels=2)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy')
optimizer = tf.keras.optimizers.Adam(learning_rate=config["learning_rate"],epsilon=1e-08)
DistilBert_model.compile(loss=loss,optimizer=optimizer,metrics=[metric])
checkpoint_callback = [tf.keras.callbacks.ModelCheckpoint( "DistilBert_model.h5",monitor='val_loss',mode='min',save_best_only=True)]
callbacks = [checkpoint_callback, TuneReporterCallback()]
history=DistilBert_model.fit([train_inp,train_mask],train_label,batch_size=config["batch_size"],epochs=config["epochs"],validation_data=([val_inp,val_mask],val_label),callbacks=callbacks)
assert len(inspect.getargspec(tune_gpt).args) == 1, "The `tune_gpt` function needs to take in the arg `config`."
hyperparameter_space ={
"batch_size": tune.grid_search([16, 32]),
"learning_rate": tune.grid_search([2e-5, 3e-5, 5e-5]),
"epochs": tune.grid_search([2, 3, 4])
}
analysis = tune.run(
tune_gpt,
verbose=1,
config=hyperparameter_space,
)
It seems your code has some errors, but the detailed error messages do not appear due to the verbose option.
Please change the verbose option
verbose=1
to
verbose=3
to see the detailed error.
( Verbosity mode. 0 = silent, 1 = only status updates, 2 = status and brief trial results, 3 = status and detailed trial results. Defaults to 3.)
Related
Currently, I'm exploring machine learning interpretability tools for one of my project. I found Shapash quite a new tool and many people suggesting to use it to create a few easily interpretable charts for ML model. When I tried it with RandomForestClassifier it worked fine and generate a webpage full of different charts but the same I cannot achieve while using SVM(just exploring this library, not focusing on the perfect ML model for a problem).
Note - using Shapash link here
#Fit blackbox model
svc = svm.SVC()
svc.fit(X_train_smote, y_train_smote)
y_pred = svc.predict(X_test)
print(f"F1 Score {f1_score(y_test, y_pred, average='macro')}")
print(f"Accuracy {accuracy_score(y_test, y_pred)}")
from shapash import SmartExplainer
xpl = SmartExplainer(model=svc)
error which I'm getting -
---------------------------------------------------------------------------
Exception Traceback (most recent call last)
/tmp/ipykernel_13648/1233939729.py in <module>
----> 1 xpl = SmartExplainer(model=svc)
~/Python_AI/ai_env/lib/python3.8/site-packages/shapash/explainer/smart_explainer.py in __init__(self, model, backend, preprocessing, postprocessing, features_groups, features_dict, label_dict, title_story, palette_name, colors_dict, **kwargs)
194 if isinstance(backend, str):
195 backend_cls = get_backend_cls_from_name(backend)
--> 196 self.backend = backend_cls(
197 model=self.model, preprocessing=preprocessing, **kwargs)
198 elif isinstance(backend, BaseBackend):
~/Python_AI/ai_env/lib/python3.8/site-packages/shapash/backend/shap_backend.py in __init__(self, model, preprocessing, explainer_args, explainer_compute_args)
16 self.explainer_args = explainer_args if explainer_args else {}
17 self.explainer_compute_args = explainer_compute_args if explainer_compute_args else {}
---> 18 self.explainer = shap.Explainer(model=model, **self.explainer_args)
19
20 def run_explainer(self, x: pd.DataFrame) -> dict:
~/Python_AI/ai_env/lib/python3.8/site-packages/shap/explainers/_explainer.py in __init__(self, model, masker, link, algorithm, output_names, feature_names, **kwargs)
166 # if we get here then we don't know how to handle what was given to us
167 else:
--> 168 raise Exception("The passed model is not callable and cannot be analyzed directly with the given masker! Model: " + str(model))
169
170 # build the right subclass
Exception: The passed model is not callable and cannot be analyzed directly with the given masker! Model: SVC()
I am new to Pytorch and trying to implement ViT on a spectrograms of raw audio . My training input consists of tensors [1,80,128] (almost 1M) of them and I am exploring AMP to speed up my training on a V100(16GB).
My training loop is as below
scaler = torch.cuda.amp.GradScaler(enabled = True)
for e in range(config_pytorch.epochs):
for idx,train_bat in enumerate(train_dl):
with autocast(enabled=True):
y_pred = model(x).float()
loss = criterion(y_pred, y.float())
scaler.scale(loss).backward()
train_loss += loss.detach().item()
scaler.step(optimiser)
scaler.update()
optimiser.zero_grad()
I print out the losses at each step just to check their values and they are very small (~1e-5) and after a few steps the loss becomes (0) .
The code errors out with the following AssertionError: No inf checks were recorded prior to update .
The entire stack trace is as below.
AssertionError Traceback (most recent call last)
/tmp/ipykernel_972350/3829185638.py in <module>
----> 1 model = train_model_ast(train_dl , val_dl )
/tmp/ipykernel_972350/3546603516.py in train_model_ast(train_dl, val_dl, model)
130 bat_duration = bat_finish_time - start_time
131 print("&&&& BATCH TRAIN DURATION = " + str(bat_duration/60))
--> 132 scaler.update()
133 #removing all instances of 999
134
/opt/conda/lib/python3.8/site-packages/torch/cuda/amp/grad_scaler.py in update(self, new_scale)
384 for found_inf in state["found_inf_per_device"].values()]
385
--> 386 assert len(found_infs) > 0, "No inf checks were recorded prior to update."
387
388 found_inf_combined = found_infs[0]
AssertionError: No inf checks were recorded prior to update.
The code however runs without any issues if I don’t use AMP.Appreciate if anyone could provide any pointers.
Thanks in advance..
I'm trying to export a PyTorch model to TorchScript via scripting and I am stuck. I've created a toy class to showcase the issue:
import torch
from torch import nn
class SadModule(nn.Module):
"""Takes a (*, 2) input and runs it thorugh a linear layer. Can optionally
use a skip connection. The usage of the skip connection or not is an
architectural choice.
"""
def __init__(self, use_skip: bool):
nn.Module.__init__(self)
self.use_skip = use_skip
self.layer = nn.Linear(2, 2)
def forward(self, x):
if self.use_skip:
x_input = x
x = self.layer(x)
if self.use_skip:
x = x + x_input
return x
It basically consists of only a linear layer and an optional skip connection. If I try to script the model using
mod1 = SadModule(False)
scripted_mod1 = torch.jit.script(mod)
I get the following error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-10-a7ebc7af32c7> in <module>
----> 1 scripted_mod1 = torch.jit.script(mod)
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_script.py in script(obj, optimize, _frames_up, _rcb)
895
896 if isinstance(obj, torch.nn.Module):
--> 897 return torch.jit._recursive.create_script_module(
898 obj, torch.jit._recursive.infer_methods_to_compile
899 )
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_script_module(nn_module, stubs_fn, share_types)
350 check_module_initialized(nn_module)
351 concrete_type = get_module_concrete_type(nn_module, share_types)
--> 352 return create_script_module_impl(nn_module, concrete_type, stubs_fn)
353
354 def create_script_module_impl(nn_module, concrete_type, stubs_fn):
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_script_module_impl(nn_module, concrete_type, stubs_fn)
408 # Compile methods if necessary
409 if concrete_type not in concrete_type_store.methods_compiled:
--> 410 create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
411 torch._C._run_emit_module_hook(cpp_module)
412 concrete_type_store.methods_compiled.add(concrete_type)
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
302 property_rcbs = [p.resolution_callback for p in property_stubs]
303
--> 304 concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
305
306
RuntimeError:
x_input is not defined in the false branch:
File "<ipython-input-7-d08ed7ff42ec>", line 12
def forward(self, x):
if self.use_skip:
~~~~~~~~~~~~~~~~~
x_input = x
~~~~~~~~~~~ <--- HERE
x = self.layer(x)
if self.use_skip:
and was used here:
File "<ipython-input-7-d08ed7ff42ec>", line 16
x = self.layer(x)
if self.use_skip:
x = x + x_input
~~~~~~~ <--- HERE
return x
So, basically TorchScript isn't able to recognise that for mod1 the True branch of either if statement won't ever be used. Moreover, if we create an instance that actually uses the skip connection,
mod2 = SadModule(True)
scripted_mod2 = torch.jit.script(mod2)
we will get another error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-21-b5ca61d8aa73> in <module>
----> 1 scripted_mod2 = torch.jit.script(mod2)
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_script.py in script(obj, optimize, _frames_up, _rcb)
895
896 if isinstance(obj, torch.nn.Module):
--> 897 return torch.jit._recursive.create_script_module(
898 obj, torch.jit._recursive.infer_methods_to_compile
899 )
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_script_module(nn_module, stubs_fn, share_types)
350 check_module_initialized(nn_module)
351 concrete_type = get_module_concrete_type(nn_module, share_types)
--> 352 return create_script_module_impl(nn_module, concrete_type, stubs_fn)
353
354 def create_script_module_impl(nn_module, concrete_type, stubs_fn):
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_script_module_impl(nn_module, concrete_type, stubs_fn)
408 # Compile methods if necessary
409 if concrete_type not in concrete_type_store.methods_compiled:
--> 410 create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
411 torch._C._run_emit_module_hook(cpp_module)
412 concrete_type_store.methods_compiled.add(concrete_type)
~/Software/miniconda3/envs/pytorch3d/lib/python3.8/site-packages/torch/jit/_recursive.py in create_methods_and_properties_from_stubs(concrete_type, method_stubs, property_stubs)
302 property_rcbs = [p.resolution_callback for p in property_stubs]
303
--> 304 concrete_type._create_methods_and_properties(property_defs, property_rcbs, method_defs, method_rcbs, method_defaults)
305
306
RuntimeError:
x_input is not defined in the false branch:
File "<ipython-input-18-ac8b9713c789>", line 17
def forward(self, x):
if self.use_skip:
~~~~~~~~~~~~~~~~~
x_input = x
~~~~~~~~~~~ <--- HERE
x = self.layer(x)
if self.use_skip:
and was used here:
File "<ipython-input-18-ac8b9713c789>", line 21
x = self.layer(x)
if self.use_skip:
x = x + x_input
~~~~~~~ <--- HERE
return x
So in this case TorchScript doesn't understand that both ifs will always be true and that in fact x_input is well defined.
To avoid the issue, I could split the class into two subclasses, as in:
class SadModuleNoSkip(nn.Module):
"""Takes a (*, 2) input and runs it thorugh a linear layer. Can optionally
use a skip connection. The usage of the skip connection or not is an
architectural choice.
"""
def __init__(self):
nn.Module.__init__(self)
self.layer = nn.Linear(2, 2)
def forward(self, x):
x = self.layer(x)
return x
class SadModuleSkip(nn.Module):
"""Takes a (*, 2) input and runs it thorugh a linear layer. Can optionally
use a skip connection. The usage of the skip connection or not is an
architectural choice.
"""
def __init__(self):
nn.Module.__init__(self)
self.layer = nn.Linear(2, 2)
def forward(self, x):
x_input = x
x = self.layer(x)
x = x + x_input
return x
However, I am working on a huge code base and I would have to repeat the process for many classes, which is time consuming and could introduce bugs. Moreover, often the modules I'm working on are huge convolutional nets and the ifs just control the presence of an additional batch normalization. It seems to me undesirable to have to classes that are identical in 99% of the blocks, save for a single batch norm layer.
Is there a way in which I can help TorchScript with its handling of branches?
Edit: added a minimum viable example.
Update: doesn't work even if I type hint use_skip as constant
from typing import Final
class SadModule(nn.Module):
use_skip: Final[bool]
...
I've opened an issue on GitHub. The project maintainers explained that using Final is the way to go. Be careful though, because as of today (May 7, 2021) this feature is still in development (abeit in its final stages, see here for the feature tracker).
Even though it's not yet available in the official releases, it is present in the nightly versions of PyTorch, so you can either install the pytorch-nighly builds as explained in the website (scroll down to Install PyTorch, then choose Preview (Nightly), or wait for the next release.
For anybody reading this answer a few months from now, this feature should be already integrated in the main releases of PyTorch.
OS Platform and Distribution:ubuntu 20.04
TensorFlow version:2.1.0
Python version:3.7.6
I wanted to write a simple layer that would work on the output of tf.experiment.make_csv_dataset and i could use to impute the missing values in numeric dtypes with batch mean, maintain a moving mean to be used at test time, create embeddings for categorical columns and keep the dimensions dependent on the predefined list of unique values.
Below is the code i wrote:
import tensorflow as tf
import pandas as pd
import numpy as np
from tensorflow.keras import layers
from tensorflow import feature_column
class NUM_TO_DENSE(layers.Layer):
def __init__(self,num_cols):
super().__init__()
self.keys = num_cols
self.keys_all = self.keys+[str(i)+'__nullcol' for i in self.keys]
def build(self,input_shape):
def create_moving_mean_vars():
return tf.Variable(initial_value=0.,shape=(),dtype=tf.float32,trainable=False)
self.moving_means_total = {t:create_moving_mean_vars() for t in self.keys}
self.layer_global_counter = tf.Variable(initial_value=0.,shape=(),dtype=tf.float32,trainable=False)
def call(self,inputs, training = True):
null_cols = {k:tf.math.is_finite(inputs[k]) for k in self.keys}
current_means = {}
def compute_update_current_means(t):
current_mean = tf.math.divide_no_nan(tf.reduce_sum(tf.where(null_cols[t],inputs[t],0.),axis=0),\
tf.reduce_sum(tf.cast(tf.math.is_finite(inputs[t]),tf.float32),axis=0))
self.moving_means_total[t].assign_add(current_mean)
return current_mean
if training:
current_means = {t:compute_update_current_means(t) for t in self.keys}
outputs = {t:tf.where(null_cols[t],inputs[t],current_means[t]) for t in self.keys}
outputs.update({str(k)+'__nullcol':tf.cast(null_cols[k],tf.float32) for k in self.keys})
self.layer_global_counter.assign_add(1.)
else:
outputs = {t:tf.where(null_cols[t],inputs[t],(self.moving_means_total[t]/self.layer_global_counter))\
for t in self.keys}
outputs.update({str(k)+'__nullcol':tf.cast(null_cols[k],tf.float32) for k in self.keys})
return outputs
class PREPROCESS_MONSOON(layers.Layer):
def __init__(self,cat_cols_with_unique_values,num_cols):
'''cat_cols_with_unqiue_values: (dict) {'col_cat':[unique_values_list]}
num_cols: (list) [num_cols_name_list]'''
super().__init__()
self.cat_cols = cat_cols_with_unique_values
self.num_cols = num_cols
def build(self,input_shape):
self.ntd = NUM_TO_DENSE(self.num_cols)
self.num_colnames = self.ntd.keys_all
self.ctd = {k:layers.DenseFeatures\
(feature_column.embedding_column\
(feature_column.categorical_column_with_vocabulary_list\
(k,v),tf.cast(tf.math.ceil(tf.math.log(tf.cast(len(self.cat_cols[k]),tf.float32))),tf.int32).numpy()))\
for k,v in self.cat_cols.items()}
self.cat_colnames = [i for i in self.cat_cols]
self.dense_colnames = self.num_colnames+self.cat_colnames
def call(self,inputs,training=True):
dense_num_d = self.ntd(inputs,training=training)
dense_cat_d = {k:self.ctd[k](inputs) for k in self.cat_colnames}
dense_num = tf.stack([dense_num_d[k] for k in self.num_colnames],axis=1)
dense_cat = tf.concat([dense_cat_d[k] for k in self.cat_colnames],axis=1)
dense_all = tf.concat([dense_num,dense_cat],axis=1)
return dense_all
creating data to test this
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train_ = pd.DataFrame(x_train.reshape(60000,-1),columns = ['col_'+str(i) for i in range(28*28)])
x_test_ = pd.DataFrame(x_test.reshape(10000,-1),columns = ['col_'+str(i) for i in range(28*28)])
x_train_['col_cat1'] = [np.random.choice(['a','b','c','d','e','f','g','h','i']) for i in range(x_train_.shape[0])]
x_test_['col_cat1'] = [np.random.choice(['a','b','c','d','e','f','g','h','i','j']) for i in range(x_test_.shape[0])]
x_train_['col_cat2'] = [np.random.choice(['a','b','c','d','e','f','g','h','i']) for i in range(x_train_.shape[0])]
x_test_['col_cat2'] = [np.random.choice(['a','b','c','d','e','f','g','h','i','j']) for i in range(x_test_.shape[0])]
x_train_[np.random.choice([True,False],size = x_train_.shape,p=[0.05,0.95]).reshape(x_train_.shape)] = np.nan
x_test_[np.random.choice([True,False],size = x_test_.shape,p=[0.05,0.95]).reshape(x_test_.shape)] = np.nan
x_train_.to_csv('data/x_train.csv',index=False)
x_test_.to_csv('data/x_test.csv',index=False)
getting one batch of created in ram
cdtypes = pd.read_csv('data/x_train.csv',nrows=2).dtypes
xtb = tf.data.experimental.make_csv_dataset('data/x_train.csv',32,header=True,prefetch_buffer_size=1,
column_defaults=[np.nan if i == (float or int) else '__missing__' for i in cdtypes])
for i in xtb:
break
dd = pd.read_csv('data/x_train.csv',nrows=2).head()
num_cols = [i for i in dd.columns if i not in ['col_cat1','col_cat2']]
cat_cols = [i for i in dd.columns if i in ['col_cat1','col_cat2']]
col_cat1_unique = ['a','b','c','d','e','f','g','h','i']
col_cat2_unique = ['a','b','c','d','e','f','g','h','i']
col_cat_unique = [col_cat1_unique,col_cat2_unique]
catcoldict = {k:v for k,v in zip(cat_cols,col_cat_unique)}
testing it:
this works:
pm = PREPROCESS_MONSOON(catcoldict,num_cols)
pm(i)
this works with a bug report
pm = PREPROCESS_MONSOON(catcoldict,num_cols)
#tf.function
def p(i):
return pm(i)
p(i)
output: (along with the expected preprocessed batch)
WARNING:tensorflow:AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458a0ec50>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
WARNING: AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458a0ec50>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
WARNING:tensorflow:AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458a0ec50>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
WARNING: AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458a0ec50>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
this fails
pm = PREPROCESS_MONSOON(catcoldict,num_cols)
inputs = tf.keras.Input(shape=(None,786))
x = pm(inputs)
output:
WARNING:tensorflow:AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458aa3a90>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
WARNING: AutoGraph could not transform <bound method NUM_TO_DENSE.call of <__main__.NUM_TO_DENSE object at 0x7f6458aa3a90>> and will run it as-is.
Please report this to the TensorFlow team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output.
Cause: unexpected indent (<unknown>, line 10)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-78-64c553138beb> in <module>
2
3 inputs = tf.keras.Input(shape=(None,786))
----> 4 x = pm(inputs)
5 # x = tf.keras.layers.Dense(500,tf.keras.layers.ReLU(100.,0.01,0.))
6 # output = tf.keras.layers.Dense(10,tf.keras.layers.Softmax())
~/anaconda3/envs/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
771 not base_layer_utils.is_in_eager_or_tf_function()):
772 with auto_control_deps.AutomaticControlDependencies() as acd:
--> 773 outputs = call_fn(cast_inputs, *args, **kwargs)
774 # Wrap Tensors in `outputs` in `tf.identity` to avoid
775 # circular dependencies.
~/anaconda3/envs/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/autograph/impl/api.py in wrapper(*args, **kwargs)
235 except Exception as e: # pylint:disable=broad-except
236 if hasattr(e, 'ag_error_metadata'):
--> 237 raise e.ag_error_metadata.to_exception(e)
238 else:
239 raise
TypeError: in converted code:
<ipython-input-66-936477fe8a70>:62 call *
dense_num_d = self.ntd(inputs,training=training)
/home/nitin/anaconda3/envs/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py:773 __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
<ipython-input-66-936477fe8a70>:20 call
null_cols = {k:tf.math.is_finite(inputs[k]) for k in self.keys}
<ipython-input-66-936477fe8a70>:20 <dictcomp>
null_cols = {k:tf.math.is_finite(inputs[k]) for k in self.keys}
/home/nitin/anaconda3/envs/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py:862 _slice_helper
_check_index(s)
/home/nitin/anaconda3/envs/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/ops/array_ops.py:752 _check_index
raise TypeError(_SLICE_TYPE_ERROR + ", got {!r}".format(idx))
TypeError: Only integers, slices (`:`), ellipsis (`...`), tf.newaxis (`None`) and scalar tf.int32/tf.int64 tensors are valid indices, got 'col_0'
Could someone help me undetstand what is happening here and how to go about achieving the intended behavior
I don't think this problem is related to AutoGraph. As visible from the stack trace you are trying to slice your layer inputs using a string key in your call method in NUM_TO_DENSE:
null_cols = {k:tf.math.is_finite(inputs[k]) for k in self.keys}
However, slicing in TensorFlow can only be done with int32 or int64.
I am running the following code without problem:
churn_dmatrix = xgb.DMatrix(data = class_data.iloc[:, :-1], label = class_data.Churn)
params = {'objective' : 'binary:logistic' , 'max_depth' : 4}
cv_results = xgb.cv(dtrain = churn_dmatrix, params = params, nfold = 4, num_boost_round = 1, metrics = 'error', \
as_pandas = True)
print(cv_results)
train-error-mean train-error-std test-error-mean test-error-std
0 0.395833 0.108253 0.375 0.414578
However, when I change the metric to 'auc' I get an error message:
cv_results = xgb.cv(dtrain = churn_dmatrix, params = params, nfold = 4, num_boost_round = 5, metrics = 'auc', \
as_pandas = True)
---------------------------------------------------------------------------
XGBoostError Traceback (most recent call last)
<ipython-input-102-ea99ef0705b5> in <module>()
----> 1 cv_results = xgb.cv(dtrain = churn_dmatrix, params = params, nfold = 4, num_boost_round = 5, metrics = 'auc', as_pandas = True)
C:\ProgramData\Anaconda3\lib\site-packages\xgboost\training.py in cv(params, dtrain, num_boost_round, nfold, stratified, folds, metrics, obj, feval, maximize, early_stopping_rounds, fpreproc, as_pandas, verbose_eval, show_stdv, seed, callbacks, shuffle)
405 for fold in cvfolds:
406 fold.update(i, obj)
--> 407 res = aggcv([f.eval(i, feval) for f in cvfolds])
408
409 for key, mean, std in res:
C:\ProgramData\Anaconda3\lib\site-packages\xgboost\training.py in <listcomp>(.0)
405 for fold in cvfolds:
406 fold.update(i, obj)
--> 407 res = aggcv([f.eval(i, feval) for f in cvfolds])
408
409 for key, mean, std in res:
C:\ProgramData\Anaconda3\lib\site-packages\xgboost\training.py in eval(self, iteration, feval)
220 def eval(self, iteration, feval):
221 """"Evaluate the CVPack for one iteration."""
--> 222 return self.bst.eval_set(self.watchlist, iteration, feval)
223
224
C:\ProgramData\Anaconda3\lib\site-packages\xgboost\core.py in eval_set(self, evals, iteration, feval)
953 dmats, evnames,
954 c_bst_ulong(len(evals)),
--> 955 ctypes.byref(msg)))
956 res = msg.value.decode()
957 if feval is not None:
C:\ProgramData\Anaconda3\lib\site-packages\xgboost\core.py in _check_call(ret)
128 """
129 if ret != 0:
--> 130 raise XGBoostError(_LIB.XGBGetLastError())
131
132
XGBoostError: b'[14:27:23] src/metric/rank_metric.cc:135: Check failed: !auc_error AUC: the dataset only contains pos or neg samples'
It seems that all the predictions are positive or negative. Am I correct? Is there something I can do about it?
The problem rises when xgboost tries to split to train/validation and in one of the splits it has no negatives or positives examples (either in the train set or the validation set).
I see 2 quick approaches you can take:
You can check how many positives examples and negative examples you
have, and get more examples of what you miss. It'll be even easier and
faster for you, to duplicate those examples you lack. For example, if you have a 99% negative examples and 1% positive examples, you might want to duplicate each positive example, 99 times (which is the product of 99/1).
You can create the cross validation yourself, thus, gain control on the split, and force negatives and positive examples for each split.