I want to run torchinfo on BertClassifier and can't do it without errors:
class BertClassifier(nn.Module):
def __init__(self, dropout=0.5):
super(BertClassifier, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-cased')
self.dropout = nn.Dropout(dropout)
self.linear = nn.Linear(768, 3)
self.relu = nn.ReLU()
def forward(self, input_id, mask):
vEmbeddingToken, pooled_output = self.bert(input_ids= input_id, attention_mask=mask,return_dict=False)
dropout_output = self.dropout(pooled_output)
linear_output = self.linear(dropout_output)
final_layer = self.relu(linear_output)
return final_layer
torchinfo.summary(BertClassifier(), ((4, 512),(4, 1, 512)))
Getting error:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torchinfo/torchinfo.py:272, in forward_pass(model, x, batch_dim, cache_forward_pass, device, **kwargs)
271 if isinstance(x, (list, tuple)):
--> 272 _ = model.to(device)(*x, **kwargs)
273 elif isinstance(x, dict):
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/modules/module.py:1110, in Module._call_impl(self, *input, **kwargs)
1108 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1109 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1110 return forward_call(*input, **kwargs)
1111 # Do not call functions when jit is used
Input In [24], in BertClassifier.forward(self, input_id, mask)
12 def forward(self, input_id, mask):
13
14 #
15 # pooled_output - embedding vector of [CLS] token
16 #
---> 17 vEmbeddingToken, pooled_output = self.bert(input_ids= input_id, attention_mask=mask,return_dict=False)
18 dropout_output = self.dropout(pooled_output)
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/modules/module.py:1128, in Module._call_impl(self, *input, **kwargs)
1126 input = bw_hook.setup_input_hook(input)
-> 1128 result = forward_call(*input, **kwargs)
1129 if _global_forward_hooks or self._forward_hooks:
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/transformers/models/bert/modeling_bert.py:1010, in BertModel.forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
1008 head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
-> 1010 embedding_output = self.embeddings(
1011 input_ids=input_ids,
1012 position_ids=position_ids,
1013 token_type_ids=token_type_ids,
1014 inputs_embeds=inputs_embeds,
1015 past_key_values_length=past_key_values_length,
1016 )
1017 encoder_outputs = self.encoder(
1018 embedding_output,
1019 attention_mask=extended_attention_mask,
(...)
1027 return_dict=return_dict,
1028 )
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/modules/module.py:1128, in Module._call_impl(self, *input, **kwargs)
1126 input = bw_hook.setup_input_hook(input)
-> 1128 result = forward_call(*input, **kwargs)
1129 if _global_forward_hooks or self._forward_hooks:
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/transformers/models/bert/modeling_bert.py:235, in BertEmbeddings.forward(self, input_ids, token_type_ids, position_ids, inputs_embeds, past_key_values_length)
234 if inputs_embeds is None:
--> 235 inputs_embeds = self.word_embeddings(input_ids)
236 token_type_embeddings = self.token_type_embeddings(token_type_ids)
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/modules/module.py:1128, in Module._call_impl(self, *input, **kwargs)
1126 input = bw_hook.setup_input_hook(input)
-> 1128 result = forward_call(*input, **kwargs)
1129 if _global_forward_hooks or self._forward_hooks:
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/modules/sparse.py:158, in Embedding.forward(self, input)
157 def forward(self, input: Tensor) -> Tensor:
--> 158 return F.embedding(
159 input, self.weight, self.padding_idx, self.max_nm,
160 self.norm_type, self.scale_grad_by_freq, self.sparse)
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torch/nn/functional.py:2183, in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
2182 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 2183 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
RuntimeError: Expected tensor for argument #1 'indices' to have one of the following scalar types: Long, Int; but got torch.cuda.FloatTensor instead (while checking arguments for embedding)
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
Input In [25], in <cell line: 1>()
----> 1 torchinfo.summary(BertClassifier(), ((4, 512),(4, 1, 512)))
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torchinfo/torchinfo.py:201, in summary(model, input_size, input_data, batch_dim, cache_forward_pass, col_names, col_width, depth, device, dtypes, row_settings, verbose, **kwargs)
196 validate_user_params(input_data, input_size, columns, col_width, verbose)
198 x, correct_input_size = process_input(
199 input_data, input_size, batch_dim, device, dtypes
200 )
--> 201 summary_list = forward_pass(
202 model, x, batch_dim, cache_forward_pass, device, **kwargs
203 )
204 formatting = FormattingOptions(depth, verbose, columns, col_width, rows)
205 results = ModelStatistics(
206 summary_list, correct_input_size, get_total_memory_used(x), formatting
207 )
File ~/abWrk/abWrkVenv/lib/python3.8/site-packages/torchinfo/torchinfo.py:281, in forward_pass(model, x, batch_dim, cache_forward_pass, device, **kwargs)
279 except Exception as e:
280 executed_layers = [layer for layer in summary_list if layer.executed]
--> 281 raise RuntimeError(
282 "Failed to run torchinfo. See above stack traces for more details. "
283 f"Executed layers up to: {executed_layers}"
284 ) from e
285 finally:
286 if hooks is not None:
RuntimeError: Failed to run torchinfo. See above stack traces for more details. Executed layers up to: []
How can I use torchinfo on BertClassifier ?
Related
I'm trying to perform an image semantic segmentation (segment mining fields) using lightning-flash. My images are all RGB/uint8/512x512 and the masks are L/uint8/512x512.
When I run the code, I get an error when fitting.
My code is this one:
import torch
import flash
from flash.image import SemanticSegmentation, SemanticSegmentationData
import os
from google.colab import drive
import ssl
drive.mount("/content/drive")
DATA_DIR = '/content/drive/MyDrive/data/'
x_train_dir = os.path.join(DATA_DIR, 'train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, 'val_images')
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, 'test_images')
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
datamodule = SemanticSegmentationData.from_folders(
train_folder=x_train_dir,
train_target_folder=y_train_dir,
val_folder=x_valid_dir,
val_target_folder=y_valid_dir,
test_folder=x_test_dir,
test_target_folder=y_test_dir,
transform_kwargs=dict(image_size=(256, 256)),
num_classes=1,
batch_size=16,
)
#avoid ssl error
ssl._create_default_https_context = ssl._create_unverified_context
model = SemanticSegmentation(
head="unetplusplus",
backbone="densenet169",
pretrained="imagenet",
num_classes=datamodule.num_classes
)
GPUS = torch.cuda.device_count()
if GPUS > 0:
trainer = flash.Trainer(max_epochs=2, gpus=torch.cuda.device_count())
else:
trainer = flash.Trainer(max_epochs=2)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
trainer.save_checkpoint("semantic_segmentation_model.pt")
When I run the code, I get this error:
IndexError Traceback (most recent call last)
<ipython-input-7-11e2ce087ca0> in <module>
6
7 #trainer.fit(model, datamodule=datamodule)
----> 8 trainer.finetune(model, datamodule=datamodule, strategy="freeze")
9 trainer.save_checkpoint("semantic_segmentation_model.pt")
19 frames
/usr/local/lib/python3.7/dist-packages/flash/core/trainer.py in finetune(self, model, train_dataloader, val_dataloaders, datamodule, strategy, train_bn)
162 """
163 self._resolve_callbacks(model, strategy, train_bn=train_bn)
--> 164 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
165
166 def predict(
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
695 self.strategy.model = model
696 self._call_and_handle_interrupt(
--> 697 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
698 )
699
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
648 return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
649 else:
--> 650 return trainer_fn(*args, **kwargs)
651 # TODO(awaelchli): Unify both exceptions below, where `KeyboardError` doesn't re-raise
652 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
735 ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
736 )
--> 737 results = self._run(model, ckpt_path=self.ckpt_path)
738
739 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1166 self._checkpoint_connector.resume_end()
1167
-> 1168 results = self._run_stage()
1169
1170 log.detail(f"{self.__class__.__name__}: trainer tearing down")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_stage(self)
1252 if self.predicting:
1253 return self._run_predict()
-> 1254 return self._run_train()
1255
1256 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1274
1275 with isolate_rng():
-> 1276 self._run_sanity_check()
1277
1278 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self)
1343 # run eval step
1344 with torch.no_grad():
-> 1345 val_loop.run()
1346
1347 self._call_callback_hooks("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
153 if self.num_dataloaders > 1:
154 kwargs["dataloader_idx"] = dataloader_idx
--> 155 dl_outputs = self.epoch_loop.run(self._data_fetcher, dl_max_batches, kwargs)
156
157 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dl_max_batches, kwargs)
141
142 # lightning module methods
--> 143 output = self._evaluation_step(**kwargs)
144 output = self._evaluation_step_end(output)
145
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, **kwargs)
238 """
239 hook_name = "test_step" if self.trainer.testing else "validation_step"
--> 240 output = self.trainer._call_strategy_hook(hook_name, *kwargs.values())
241
242 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_strategy_hook(self, hook_name, *args, **kwargs)
1704
1705 with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
-> 1706 output = fn(*args, **kwargs)
1707
1708 # restore current_fx when nested context
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/strategies/strategy.py in validation_step(self, *args, **kwargs)
368 with self.precision_plugin.val_step_context():
369 assert isinstance(self.model, ValidationStep)
--> 370 return self.model.validation_step(*args, **kwargs)
371
372 def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/flash/image/segmentation/model.py in validation_step(self, batch, batch_idx)
151 def validation_step(self, batch: Any, batch_idx: int) -> Any:
152 batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
--> 153 return super().validation_step(batch, batch_idx)
154
155 def test_step(self, batch: Any, batch_idx: int) -> Any:
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in validation_step(self, batch, batch_idx)
423
424 def validation_step(self, batch: Any, batch_idx: int) -> None:
--> 425 output = self.step(batch, batch_idx, self.val_metrics)
426 log_kwargs = {"batch_size": output.get(OutputKeys.BATCH_SIZE, None)} if _PL_GREATER_EQUAL_1_5_0 else {}
427 self.log_dict(
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in step(self, batch, batch_idx, metrics)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in <dictcomp>(.0)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
3012 if size_average is not None or reduce is not None:
3013 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3014 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
3015
3016
IndexError: Target 255 is out of bounds.
How can I solve this problem? I researched others issues on stackoverflow and they were all related to the number of classes. But in my case, I only want to segment mining fields.
I'm trying to rerun code from this github link, when it comes to the chunck of code below:
tf.keras.backend.clear_session()
# SET SEED FOR REPRODUCIBILITY
np.random.seed(seed)
n_past = 30
batch_size = 64
n_dims = input_df.shape[1]
mat_X_train, mat_y_train = windowed_dataset(X_train, y_train, n_past)
# CONSTRUCTING MULTIVARIATE BIDIRECTIONAL LSTM NN
lstm_5 = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=[n_past, n_dims]),
# BATCH NORMALIZATION
tf.keras.layers.BatchNormalization(),
# ADDING 1st LSTM LAYER
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Dropout(0.1),
# ADDING 2nd LSTM LAYER
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(16)),
tf.keras.layers.Dropout(0.1),
# DENSE OUTPUT LAYER
tf.keras.layers.Dense(1)
])
lstm_5.compile(loss='mse',
optimizer="adam",
metrics=[rmspe])
checkpoint_cb = ModelCheckpoint('lstm_5.h5',
save_best_only=True,
monitor='val_rmspe')
# STOPPING THE TRAINING IF VALIDATION RMSPE IS NOT IMPROVING
early_stopping_cb = EarlyStopping(patience=30,
restore_best_weights=True,
monitor='val_rmspe')
print(lstm_5.summary())
It raises the following type error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-345-9065b1b40a1c> in <module>
11
12 # CONSTRUCTING MULTIVARIATE BIDIRECTIONAL LSTM NN
---> 13 lstm_5 = tf.keras.models.Sequential([
14 tf.keras.layers.InputLayer(input_shape=[n_past, n_dims]),
15 # BATCH NORMALIZATION
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
585 with no_automatic_dependency_tracking_scope(model):
586 model.arr2 = [] # Creates a regular, untracked python list
--> 587 ```
588
589 Args:
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\sequential.py in __init__(self, layers, name)
132 layers = [layers]
133 for layer in layers:
--> 134 self.add(layer)
135
136 #property
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
585 with no_automatic_dependency_tracking_scope(model):
586 model.arr2 = [] # Creates a regular, untracked python list
--> 587 ```
588
589 Args:
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\sequential.py in add(self, layer)
215 # If the model is being built continuously on top of an input layer:
216 # refresh its output.
--> 217 output_tensor = layer(self.outputs[0])
218 if len(tf.nest.flatten(output_tensor)) != 1:
219 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\wrappers.py in __call__(self, inputs, initial_state, constants, **kwargs)
581
582 if initial_state is None and constants is None:
--> 583 return super(Bidirectional, self).__call__(inputs, **kwargs)
584
585 # Applies the same workaround as in `RNN.__call__`
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
974 # >> model = tf.keras.Model(inputs, outputs)
975 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 976 return self._functional_construction_call(inputs, args, kwargs,
977 input_list)
978
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1112 layer=self, inputs=inputs, build_graph=True, training=training_value):
1113 # Check input assumptions set after layer building, e.g. input shape.
-> 1114 outputs = self._keras_tensor_symbolic_call(
1115 inputs, input_masks, args, kwargs)
1116
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
846 return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
847 else:
--> 848 return self._infer_output_signature(inputs, args, kwargs, input_masks)
849
850 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
886 self._maybe_build(inputs)
887 inputs = self._maybe_cast_inputs(inputs)
--> 888 outputs = call_fn(inputs, *args, **kwargs)
889
890 self._handle_activity_regularization(inputs, outputs)
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\wrappers.py in call(self, inputs, training, mask, initial_state, constants)
696 forward_state, backward_state = None, None
697
--> 698 y = self.forward_layer(forward_inputs,
699 initial_state=forward_state, **kwargs)
700 y_rev = self.backward_layer(backward_inputs,
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
657
658 if initial_state is None and constants is None:
--> 659 return super(RNN, self).__call__(inputs, **kwargs)
660
661 # If any of `initial_state` or `constants` are specified and are Keras
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1035 with autocast_variable.enable_auto_cast_variables(
1036 self._compute_dtype_object):
-> 1037 outputs = call_fn(inputs, *args, **kwargs)
1038
1039 if self._activity_regularizer:
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\recurrent_v2.py in call(self, inputs, mask, training, initial_state)
1263
1264 if self.return_sequences:
-> 1265 output = backend.maybe_convert_to_ragged(
1266 is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)
1267 else:
TypeError: maybe_convert_to_ragged() got an unexpected keyword argument 'go_backwards'
Does someone could help to deal with this issue? I think it may be related to tensorflow version (I installed tensorflow==2.6.0 and keras==2.6.0). Thanks a lot.
I am new to using PyTorch. I am using Google Colaboratory, Accelerator is GPU.
Attempted Solutions (same error):
torch.cuda.empty_cache(), suggested here.
torch.cuda.memory_summary(device=None, abbreviated=False), suggested here.
batch_size=1 for dm instance.
Attempted Solutions (different error):
torch.cuda.clear_memory_allocated(), suggested here.
AttributeError: module 'torch.cuda' has no attribute 'clear_memory_allocated'
Code:
trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback], logger=loggers)
#torch.cuda.empty_cache()
#torch.cuda.memory_summary(device=None, abbreviated=False)
#torch.cuda.clear_memory_allocated()
trainer.fit(model, dm) # ERROR
model_file = os.path.join(args.modeldir, 'last.ckpt')
trainer.save_checkpoint(model_file, weights_only=True)
Error:
RuntimeError: CUDA out of memory. Tried to allocate 240.00 MiB (GPU 0; 11.17 GiB total capacity; 10.58 GiB already allocated; 63.81 MiB free; 10.66 GiB reserved in total by PyTorch)
Output and Traceback:
Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForMulticlassSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight']
- This IS expected if you are initializing BertForMulticlassSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertForMulticlassSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of BertForMulticlassSequenceClassification were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['classifiers.1.bias', 'classifiers.0.weight', 'classifiers.1.weight', 'classifiers.0.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/utilities/distributed.py:52: UserWarning: ModelCheckpoint(save_last=True, monitor=None) is a redundant configuration. You can save the last checkpoint with ModelCheckpoint(save_top_k=None, monitor=None).
warnings.warn(*args, **kwargs)
GPU available: True, used: True
TPU available: False, using: 0 TPU cores
LOCAL_RANK: 0 - CUDA_VISIBLE_DEVICES: [0]
| Name | Type | Params
----------------------------------------------------------------------------
0 | model | BertForMulticlassSequenceClassification | 108 M
1 | valid_acc | Accuracy | 0
2 | valid_f1 | F1 | 0
3 | valid_acc_multi | ModuleList | 0
----------------------------------------------------------------------------
108 M Trainable params
0 Non-trainable params
108 M Total params
433.413 Total estimated model params size (MB)
###score: val_score### 0.0625
/usr/local/lib/python3.7/dist-packages/torchmetrics/utilities/prints.py:36: UserWarning: The ``compute`` method of metric Accuracy was called before the ``update`` method which may lead to errors, as metric states have not yet been updated.
warnings.warn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/torchmetrics/utilities/prints.py:36: UserWarning: The ``compute`` method of metric F1 was called before the ``update`` method which may lead to errors, as metric states have not yet been updated.
warnings.warn(*args, **kwargs)
/usr/local/lib/python3.7/dist-packages/torchmetrics/utilities/prints.py:36: UserWarning: The ``compute`` method of metric CompositionalMetric was called before the ``update`` method which may lead to errors, as metric states have not yet been updated.
warnings.warn(*args, **kwargs)
Epoch 0: 0% 0/3715 [00:00<?, ?it/s]
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-89b1728bd5a6> in <module>()
7 --gpus 1
8 """.split()
----> 9 run_training(args)
37 frames
<ipython-input-5-7f8e9eed480d> in run_training(input)
68
69 trainer = pl.Trainer.from_argparse_args(args, callbacks=[checkpoint_callback], logger=loggers)
---> 70 trainer.fit(model, dm)
71 model_file = os.path.join(args.modeldir, 'last.ckpt')
72 trainer.save_checkpoint(model_file, weights_only=True)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloader, val_dataloaders, datamodule)
497
498 # dispath `start_training` or `start_testing` or `start_predicting`
--> 499 self.dispatch()
500
501 # plugin will finalized fitting (e.g. ddp_spawn will load trained model)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in dispatch(self)
544
545 else:
--> 546 self.accelerator.start_training(self)
547
548 def train_or_test_or_predict(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in start_training(self, trainer)
71
72 def start_training(self, trainer):
---> 73 self.training_type_plugin.start_training(trainer)
74
75 def start_testing(self, trainer):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in start_training(self, trainer)
112 def start_training(self, trainer: 'Trainer') -> None:
113 # double dispatch to initiate the training loop
--> 114 self._results = trainer.run_train()
115
116 def start_testing(self, trainer: 'Trainer') -> None:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in run_train(self)
635 with self.profiler.profile("run_training_epoch"):
636 # run train epoch
--> 637 self.train_loop.run_training_epoch()
638
639 if self.max_steps and self.max_steps <= self.global_step:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in run_training_epoch(self)
490 # ------------------------------------
491 with self.trainer.profiler.profile("run_training_batch"):
--> 492 batch_output = self.run_training_batch(batch, batch_idx, dataloader_idx)
493
494 # when returning -1 from train_step, we end epoch early
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in run_training_batch(self, batch, batch_idx, dataloader_idx)
652
653 # optimizer step
--> 654 self.optimizer_step(optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
655
656 else:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in optimizer_step(self, optimizer, opt_idx, batch_idx, train_step_and_backward_closure)
431 on_tpu=self.trainer._device_type == DeviceType.TPU and _TPU_AVAILABLE,
432 using_native_amp=using_native_amp,
--> 433 using_lbfgs=is_lbfgs,
434 )
435
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/core/lightning.py in optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx, optimizer_closure, on_tpu, using_native_amp, using_lbfgs)
1388 # wraps into LightingOptimizer only for running step
1389 optimizer = LightningOptimizer._to_lightning_optimizer(optimizer, self.trainer, optimizer_idx)
-> 1390 optimizer.step(closure=optimizer_closure)
1391
1392 def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/core/optimizer.py in step(self, closure, *args, **kwargs)
212 profiler_name = f"optimizer_step_and_closure_{self._optimizer_idx}"
213
--> 214 self.__optimizer_step(*args, closure=closure, profiler_name=profiler_name, **kwargs)
215 self._total_optimizer_step_calls += 1
216
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/core/optimizer.py in __optimizer_step(self, closure, profiler_name, **kwargs)
132
133 with trainer.profiler.profile(profiler_name):
--> 134 trainer.accelerator.optimizer_step(optimizer, self._optimizer_idx, lambda_closure=closure, **kwargs)
135
136 def step(self, *args, closure: Optional[Callable] = None, **kwargs):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in optimizer_step(self, optimizer, opt_idx, lambda_closure, **kwargs)
275 )
276 if make_optimizer_step:
--> 277 self.run_optimizer_step(optimizer, opt_idx, lambda_closure, **kwargs)
278 self.precision_plugin.post_optimizer_step(optimizer, opt_idx)
279 self.training_type_plugin.post_optimizer_step(optimizer, opt_idx, **kwargs)
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in run_optimizer_step(self, optimizer, optimizer_idx, lambda_closure, **kwargs)
280
281 def run_optimizer_step(self, optimizer: Optimizer, optimizer_idx: int, lambda_closure: Callable, **kwargs):
--> 282 self.training_type_plugin.optimizer_step(optimizer, lambda_closure=lambda_closure, **kwargs)
283
284 def optimizer_zero_grad(self, current_epoch: int, batch_idx: int, optimizer: Optimizer, opt_idx: int) -> None:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in optimizer_step(self, optimizer, lambda_closure, **kwargs)
161
162 def optimizer_step(self, optimizer: torch.optim.Optimizer, lambda_closure: Callable, **kwargs):
--> 163 optimizer.step(closure=lambda_closure, **kwargs)
/usr/local/lib/python3.7/dist-packages/torch/optim/optimizer.py in wrapper(*args, **kwargs)
86 profile_name = "Optimizer.step#{}.step".format(obj.__class__.__name__)
87 with torch.autograd.profiler.record_function(profile_name):
---> 88 return func(*args, **kwargs)
89 return wrapper
90
/usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
26 def decorate_context(*args, **kwargs):
27 with self.__class__():
---> 28 return func(*args, **kwargs)
29 return cast(F, decorate_context)
30
/usr/local/lib/python3.7/dist-packages/torch/optim/adam.py in step(self, closure)
64 if closure is not None:
65 with torch.enable_grad():
---> 66 loss = closure()
67
68 for group in self.param_groups:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in train_step_and_backward_closure()
647 def train_step_and_backward_closure():
648 result = self.training_step_and_backward(
--> 649 split_batch, batch_idx, opt_idx, optimizer, self.trainer.hiddens
650 )
651 return None if result is None else result.loss
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in training_step_and_backward(self, split_batch, batch_idx, opt_idx, optimizer, hiddens)
740 with self.trainer.profiler.profile("training_step_and_backward"):
741 # lightning module hook
--> 742 result = self.training_step(split_batch, batch_idx, opt_idx, hiddens)
743 self._curr_step_result = result
744
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/training_loop.py in training_step(self, split_batch, batch_idx, opt_idx, hiddens)
291 model_ref._results = Result()
292 with self.trainer.profiler.profile("training_step"):
--> 293 training_step_output = self.trainer.accelerator.training_step(args)
294 self.trainer.accelerator.post_training_step()
295
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/accelerators/accelerator.py in training_step(self, args)
154
155 with self.precision_plugin.train_step_context(), self.training_type_plugin.train_step_context():
--> 156 return self.training_type_plugin.training_step(*args)
157
158 def post_training_step(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/plugins/training_type/training_type_plugin.py in training_step(self, *args, **kwargs)
123
124 def training_step(self, *args, **kwargs):
--> 125 return self.lightning_module.training_step(*args, **kwargs)
126
127 def post_training_step(self):
<ipython-input-4-a6cb4f83dcb2> in training_step(self, batch, batch_idx)
104 def training_step(self, batch, batch_idx):
105 x, y_true = batch
--> 106 loss, _ = self(x, labels=y_true)
107 self.log('train_loss', loss)
108 return loss
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-4-a6cb4f83dcb2> in forward(self, *input, **kwargs)
100
101 def forward(self, *input, **kwargs):
--> 102 return self.model(*input, **kwargs)
103
104 def training_step(self, batch, batch_idx):
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
<ipython-input-4-a6cb4f83dcb2> in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict)
50 output_attentions=output_attentions,
51 output_hidden_states=output_hidden_states,
---> 52 return_dict=return_dict,
53 )
54
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/bert/modeling_bert.py in forward(self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
999 output_attentions=output_attentions,
1000 output_hidden_states=output_hidden_states,
-> 1001 return_dict=return_dict,
1002 )
1003 sequence_output = encoder_outputs[0]
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/bert/modeling_bert.py in forward(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, use_cache, output_attentions, output_hidden_states, return_dict)
587 encoder_attention_mask,
588 past_key_value,
--> 589 output_attentions,
590 )
591
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/bert/modeling_bert.py in forward(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
473 head_mask,
474 output_attentions=output_attentions,
--> 475 past_key_value=self_attn_past_key_value,
476 )
477 attention_output = self_attention_outputs[0]
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/bert/modeling_bert.py in forward(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
406 encoder_attention_mask,
407 past_key_value,
--> 408 output_attentions,
409 )
410 attention_output = self.output(self_outputs[0], hidden_states)
/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
/usr/local/lib/python3.7/dist-packages/transformers/models/bert/modeling_bert.py in forward(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)
303
304 # Take the dot product between "query" and "key" to get the raw attention scores.
--> 305 attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
306
307 if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
RuntimeError: CUDA out of memory. Tried to allocate 240.00 MiB (GPU 0; 11.17 GiB total capacity; 10.58 GiB already allocated; 63.81 MiB free; 10.66 GiB reserved in total by PyTorch)
from torch import nn
from transformers.models.bert.modeling_bert import BertConfig
config = BertConfig.from_pretrained("bert-base-uncased")
self.x_head = nn.Linear(in_features=config.hidden_size, out_features=28)
word_embeddings = nn.Embedding( num_embeddings=979, embedding_dim=config.hidden_size ,padding_idx=0,)(ids_1).to(self.device)
vis_embeddings = nn.Embedding( num_embeddings=127, embedding_dim=config.hidden_size, padding_idx=0,)(ids_2).to(self.device)
input_embeds = torch.cat([word_embeddings , vis_embeddings] , dim =1).to(self.device)
in forward() method I have below code:
x_scores = self.x_head(input_embeds)
I am getting below error at x_scores(last line above)
~/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051 return forward_call(*input, **kwargs)
1052 # Do not call functions when jit is used
1053 full_backward_hooks, non_full_backward_hooks = [], []
~/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/parallel/data_parallel.py in forward(self, *inputs, **kwargs)
166 return self.module(*inputs[0], **kwargs[0])
167 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
--> 168 outputs = self.parallel_apply(replicas, inputs, kwargs)
169 return self.gather(outputs, self.output_device)
170
~/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/parallel/data_parallel.py in parallel_apply(self, replicas, inputs, kwargs)
176
177 def parallel_apply(self, replicas, inputs, kwargs):
--> 178 return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
179
180 def gather(self, outputs, output_device):
~/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/parallel/parallel_apply.py in parallel_apply(modules, inputs, kwargs_tup, devices)
84 output = results[i]
85 if isinstance(output, ExceptionWrapper):
---> 86 output.reraise()
87 outputs.append(output)
88 return outputs
~/pytorch-env_py3.7/lib/python3.7/site-packages/torch/_utils.py in reraise(self)
423 # have message field
424 raise self.exc_type(message=msg)
--> 425 raise self.exc_type(msg)
426
427
RuntimeError: Caught RuntimeError in replica 0 on device 0.
Original Traceback (most recent call last):
File "/home/gems/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/parallel/parallel_apply.py", line 61, in _worker
output = module(*input, **kwargs)
File "/home/gems/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "", line 56, in forward
x_scores = self.x_head(input_embeds)
File "/home/gems/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1051, in _call_impl
return forward_call(*input, **kwargs)
File "/home/gems/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 96, in forward
return F.linear(input, self.weight, self.bias)
File "/home/gems/pytorch-env_py3.7/lib/python3.7/site-packages/torch/nn/functional.py", line 1847, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: CUDA error: CUBLAS_STATUS_NOT_INITIALIZED when calling cublasCreate(handle)
I am running the new apple native tensorflow package 2.4, and ran into a problem I did not have before. This jupyter notebook code works in old intel based environment where an older tensorflow version was used. But with M1 apple MLcomputer TensorFlow2.4 it is not compatible
with Numpy 1.20 or 1.18(I downgraded numpy to try). The error log:
NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-20-73358e637fe3> in <module>
4 model = Sequential()
5 model.add(Embedding(vocab_size+1, W2V_SIZE, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False))
----> 6 model.add(LSTM(500, dropout=0.2, recurrent_dropout=0.2))
7 model.add(Dense(units = 10000, kernel_initializer = 'glorot_uniform', activation = 'relu'))
8 model.add(Dropout(0.35))
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
515 self._self_setattr_tracking = False # pylint: disable=protected-access
516 try:
--> 517 result = method(self, *args, **kwargs)
518 finally:
519 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py in add(self, layer)
221 # If the model is being built continuously on top of an input layer:
222 # refresh its output.
--> 223 output_tensor = layer(self.outputs[0])
224 if len(nest.flatten(output_tensor)) != 1:
225 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
658
659 if initial_state is None and constants is None:
--> 660 return super(RNN, self).__call__(inputs, **kwargs)
661
662 # If any of `initial_state` or `constants` are specified and are Keras
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
944 # >> model = tf.keras.Model(inputs, outputs)
945 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 946 return self._functional_construction_call(inputs, args, kwargs,
947 input_list)
948
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1083 layer=self, inputs=inputs, build_graph=True, training=training_value):
1084 # Check input assumptions set after layer building, e.g. input shape.
-> 1085 outputs = self._keras_tensor_symbolic_call(
1086 inputs, input_masks, args, kwargs)
1087
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
815 return nest.map_structure(keras_tensor.KerasTensor, output_signature)
816 else:
--> 817 return self._infer_output_signature(inputs, args, kwargs, input_masks)
818
819 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
856 # TODO(kaftan): do we maybe_build here, or have we already done it?
857 self._maybe_build(inputs)
--> 858 outputs = call_fn(inputs, *args, **kwargs)
859
860 self._handle_activity_regularization(inputs, outputs)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent_v2.py in call(self, inputs, mask, training, initial_state)
1161 # LSTM does not support constants. Ignore it during process.
1162 orig_initial_state = initial_state
-> 1163 inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
1164
1165 if isinstance(mask, list):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _process_inputs(self, inputs, initial_state, constants)
857 initial_state = self.states
858 elif initial_state is None:
--> 859 initial_state = self.get_initial_state(inputs)
860
861 if len(initial_state) != len(self.states):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs)
640 dtype = inputs.dtype
641 if get_initial_state_fn:
--> 642 init_state = get_initial_state_fn(
643 inputs=None, batch_size=batch_size, dtype=dtype)
644 else:
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs, batch_size, dtype)
2504
2505 def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
-> 2506 return list(_generate_zero_filled_state_for_cell(
2507 self, inputs, batch_size, dtype))
2508
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype)
2985 batch_size = array_ops.shape(inputs)[0]
2986 dtype = inputs.dtype
-> 2987 return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
2988
2989
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state(batch_size_tensor, state_size, dtype)
3001
3002 if nest.is_nested(state_size):
-> 3003 return nest.map_structure(create_zeros, state_size)
3004 else:
3005 return create_zeros(state_size)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
657
658 return pack_sequence_as(
--> 659 structure[0], [func(*x) for x in entries],
660 expand_composites=expand_composites)
661
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
657
658 return pack_sequence_as(
--> 659 structure[0], [func(*x) for x in entries],
660 expand_composites=expand_composites)
661
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in create_zeros(unnested_state_size)
2998 flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()
2999 init_state_size = [batch_size_tensor] + flat_dims
-> 3000 return array_ops.zeros(init_state_size, dtype=dtype)
3001
3002 if nest.is_nested(state_size):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in wrapped(*args, **kwargs)
2817
2818 def wrapped(*args, **kwargs):
-> 2819 tensor = fun(*args, **kwargs)
2820 tensor._is_zeros_tensor = True
2821 return tensor
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name)
2866 # Create a constant if it won't be very big. Otherwise create a fill
2867 # op to prevent serialized GraphDefs from becoming too large.
-> 2868 output = _constant_if_small(zero, shape, dtype, name)
2869 if output is not None:
2870 return output
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in _constant_if_small(value, shape, dtype, name)
2802 def _constant_if_small(value, shape, dtype, name):
2803 try:
-> 2804 if np.prod(shape) < 1000:
2805 return constant(value, shape=shape, dtype=dtype, name=name)
2806 except TypeError:
<__array_function__ internals> in prod(*args, **kwargs)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in prod(a, axis, dtype, out, keepdims, initial, where)
3028 10
3029 """
-> 3030 return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
3031 keepdims=keepdims, initial=initial, where=where)
3032
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
85 return reduction(axis=axis, out=out, **passkwargs)
86
---> 87 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
88
89
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in __array__(self)
850
851 def __array__(self):
--> 852 raise NotImplementedError(
853 "Cannot convert a symbolic Tensor ({}) to a numpy array."
854 " This error may indicate that you're trying to pass a Tensor to"
NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported