Getting AttributeError while training DNN classifer - string

Here is the code:
continous=['age', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']
creating feat_cols
I have automated the process using for loop
feat_cols = []
for col in census.columns:
## Continous - Unchanged
if col in continous:
feat_cols.append(tf.feature_column.numeric_column(col))
## Categorical - Trick, no need to count
elif col!='income_bracket':
print('Embedded {}'.format(col))
feat_cols.append(tf.feature_column.embedding_column(categorical_column=col,dimension=X_train[col].nunique()))
Creating model
Importing Tensorflow I've created this model.
dnnmodel = tf.estimator.DNNClassifier(hidden_units=[7,7,7], feature_columns=feat_cols, n_classes=2)
ip_dnn = tf.estimator.inputs.pandas_input_fn(X_train, y_train, num_epochs=None,shuffle=True)
dnnmodel.train(input_fn=ip_dnn, steps=5000)
Error:
Earlier with LinearClassifier everything worked fine.
INFO:tensorflow:Calling model_fn.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-67-751f0a94d0d2> in <module>()
1 # ip_func created earliar
2
----> 3 dnnmodel.train(input_fn=ip_dnn, steps=5000)
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
356
357 saving_listeners = _check_listeners_type(saving_listeners)
--> 358 loss = self._train_model(input_fn, hooks, saving_listeners)
359 logging.info('Loss for final step: %s.', loss)
360 return self
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
1122 return self._train_model_distributed(input_fn, hooks, saving_listeners)
1123 else:
-> 1124 return self._train_model_default(input_fn, hooks, saving_listeners)
1125
1126 def _train_model_default(self, input_fn, hooks, saving_listeners):
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
1152 worker_hooks.extend(input_hooks)
1153 estimator_spec = self._call_model_fn(
-> 1154 features, labels, model_fn_lib.ModeKeys.TRAIN, self.config)
1155 global_step_tensor = training_util.get_global_step(g)
1156 return self._train_with_estimator_spec(estimator_spec, worker_hooks,
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\estimator.py in _call_model_fn(self, features, labels, mode, config)
1110
1111 logging.info('Calling model_fn.')
-> 1112 model_fn_results = self._model_fn(features=features, **kwargs)
1113 logging.info('Done calling model_fn.')
1114
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py in _model_fn(features, labels, mode, config)
520 input_layer_partitioner=input_layer_partitioner,
521 config=config,
--> 522 batch_norm=batch_norm)
523
524 super(DNNClassifier, self).__init__(
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py in _dnn_model_fn(features, labels, mode, head, hidden_units, feature_columns, optimizer, activation_fn, dropout, input_layer_partitioner, config, use_tpu, batch_norm)
285 input_layer_partitioner=input_layer_partitioner,
286 batch_norm=batch_norm)
--> 287 logits = logit_fn(features=features, mode=mode)
288
289 if use_tpu:
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py in dnn_logit_fn(features, mode)
101 batch_norm,
102 name='dnn')
--> 103 return dnn_model(features, mode)
104
105 return dnn_logit_fn
~\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\base_layer.py in __call__(self, inputs, *args, **kwargs)
552 # In graph mode, failure to build the layer's graph
553 # implies a user-side bug. We don't catch exceptions.
--> 554 outputs = self.call(inputs, *args, **kwargs)
555 else:
556 try:
~\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py in call(self, features, mode)
193 'input_from_feature_columns',
194 partitioner=self._input_layer_partitioner):
--> 195 net = self._input_layer(features)
196 for i in range(len(self._hidden_layers)):
197 net = self._hidden_layers[i](net)
~\Anaconda3\lib\site-packages\tensorflow\python\feature_column\feature_column.py in __call__(self, features)
335 trainable=self._trainable,
336 cols_to_vars=None,
--> 337 from_template=True)
338
339 #property
~\Anaconda3\lib\site-packages\tensorflow\python\ops\template.py in __call__(self, *args, **kwargs)
366 custom_getter=self._custom_getter) as vs:
367 self._variable_scope = vs
--> 368 return self._call_func(args, kwargs)
369
370 #property
~\Anaconda3\lib\site-packages\tensorflow\python\ops\template.py in _call_func(self, args, kwargs)
309 # Checkpointable).
310 with checkpointable_util.capture_dependencies(template=self):
--> 311 result = self._func(*args, **kwargs)
312
313 if self._variables_created:
~\Anaconda3\lib\site-packages\tensorflow\python\feature_column\feature_column.py in _internal_input_layer(features, feature_columns, weight_collections, trainable, cols_to_vars, scope, cols_to_output_tensors, from_template)
179 """See input_layer. `scope` is a name or variable scope to use."""
180
--> 181 feature_columns = _normalize_feature_columns(feature_columns)
182 for column in feature_columns:
183 if not isinstance(column, _DenseColumn):
~\Anaconda3\lib\site-packages\tensorflow\python\feature_column\feature_column.py in _normalize_feature_columns(feature_columns)
2266 name_to_column = dict()
2267 for column in feature_columns:
-> 2268 if column.name in name_to_column:
2269 raise ValueError('Duplicate feature column name found for columns: {} '
2270 'and {}. This usually means that these columns refer to '
~\Anaconda3\lib\site-packages\tensorflow\python\feature_column\feature_column_v2.py in name(self)
2960 def name(self):
2961 """See `FeatureColumn` base class."""
-> 2962 return '{}_embedding'.format(self.categorical_column.name)
2963
2964 #property
**AttributeError: 'str' object has no attribute 'name'**
originally defined at:
File "C:\Users\Subham\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py", line 102, in dnn_logit_fn
name='dnn')
File "C:\Users\Subham\Anaconda3\lib\site-packages\tensorflow_estimator\python\estimator\canned\dnn.py", line 134, in __init__
create_scope_now=False)
File "C:\Users\Subham\Anaconda3\lib\site-packages\tensorflow\python\feature_column\feature_column.py", line 327, in __init__
self._name, _internal_input_layer, create_scope_now_=create_scope_now)
File "C:\Users\Subham\Anaconda3\lib\site-packages\tensorflow\python\ops\template.py", line 154, in make_template
**kwargs)

Related

Colab IndexError: Target 255 is out of bounds

I'm trying to perform an image semantic segmentation (segment mining fields) using lightning-flash. My images are all RGB/uint8/512x512 and the masks are L/uint8/512x512.
When I run the code, I get an error when fitting.
My code is this one:
import torch
import flash
from flash.image import SemanticSegmentation, SemanticSegmentationData
import os
from google.colab import drive
import ssl
drive.mount("/content/drive")
DATA_DIR = '/content/drive/MyDrive/data/'
x_train_dir = os.path.join(DATA_DIR, 'train_images')
y_train_dir = os.path.join(DATA_DIR, 'train_masks')
x_valid_dir = os.path.join(DATA_DIR, 'val_images')
y_valid_dir = os.path.join(DATA_DIR, 'val_masks')
x_test_dir = os.path.join(DATA_DIR, 'test_images')
y_test_dir = os.path.join(DATA_DIR, 'test_masks')
datamodule = SemanticSegmentationData.from_folders(
train_folder=x_train_dir,
train_target_folder=y_train_dir,
val_folder=x_valid_dir,
val_target_folder=y_valid_dir,
test_folder=x_test_dir,
test_target_folder=y_test_dir,
transform_kwargs=dict(image_size=(256, 256)),
num_classes=1,
batch_size=16,
)
#avoid ssl error
ssl._create_default_https_context = ssl._create_unverified_context
model = SemanticSegmentation(
head="unetplusplus",
backbone="densenet169",
pretrained="imagenet",
num_classes=datamodule.num_classes
)
GPUS = torch.cuda.device_count()
if GPUS > 0:
trainer = flash.Trainer(max_epochs=2, gpus=torch.cuda.device_count())
else:
trainer = flash.Trainer(max_epochs=2)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
trainer.save_checkpoint("semantic_segmentation_model.pt")
When I run the code, I get this error:
IndexError Traceback (most recent call last)
<ipython-input-7-11e2ce087ca0> in <module>
6
7 #trainer.fit(model, datamodule=datamodule)
----> 8 trainer.finetune(model, datamodule=datamodule, strategy="freeze")
9 trainer.save_checkpoint("semantic_segmentation_model.pt")
19 frames
/usr/local/lib/python3.7/dist-packages/flash/core/trainer.py in finetune(self, model, train_dataloader, val_dataloaders, datamodule, strategy, train_bn)
162 """
163 self._resolve_callbacks(model, strategy, train_bn=train_bn)
--> 164 return super().fit(model, train_dataloader, val_dataloaders, datamodule)
165
166 def predict(
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
695 self.strategy.model = model
696 self._call_and_handle_interrupt(
--> 697 self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
698 )
699
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
648 return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
649 else:
--> 650 return trainer_fn(*args, **kwargs)
651 # TODO(awaelchli): Unify both exceptions below, where `KeyboardError` doesn't re-raise
652 except KeyboardInterrupt as exception:
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
735 ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
736 )
--> 737 results = self._run(model, ckpt_path=self.ckpt_path)
738
739 assert self.state.stopped
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run(self, model, ckpt_path)
1166 self._checkpoint_connector.resume_end()
1167
-> 1168 results = self._run_stage()
1169
1170 log.detail(f"{self.__class__.__name__}: trainer tearing down")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_stage(self)
1252 if self.predicting:
1253 return self._run_predict()
-> 1254 return self._run_train()
1255
1256 def _pre_training_routine(self):
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_train(self)
1274
1275 with isolate_rng():
-> 1276 self._run_sanity_check()
1277
1278 # enable train mode
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _run_sanity_check(self)
1343 # run eval step
1344 with torch.no_grad():
-> 1345 val_loop.run()
1346
1347 self._call_callback_hooks("on_sanity_check_end")
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/dataloader/evaluation_loop.py in advance(self, *args, **kwargs)
153 if self.num_dataloaders > 1:
154 kwargs["dataloader_idx"] = dataloader_idx
--> 155 dl_outputs = self.epoch_loop.run(self._data_fetcher, dl_max_batches, kwargs)
156
157 # store batch level output per dataloader
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/loop.py in run(self, *args, **kwargs)
198 try:
199 self.on_advance_start(*args, **kwargs)
--> 200 self.advance(*args, **kwargs)
201 self.on_advance_end()
202 self._restarting = False
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in advance(self, data_fetcher, dl_max_batches, kwargs)
141
142 # lightning module methods
--> 143 output = self._evaluation_step(**kwargs)
144 output = self._evaluation_step_end(output)
145
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/loops/epoch/evaluation_epoch_loop.py in _evaluation_step(self, **kwargs)
238 """
239 hook_name = "test_step" if self.trainer.testing else "validation_step"
--> 240 output = self.trainer._call_strategy_hook(hook_name, *kwargs.values())
241
242 return output
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/trainer/trainer.py in _call_strategy_hook(self, hook_name, *args, **kwargs)
1704
1705 with self.profiler.profile(f"[Strategy]{self.strategy.__class__.__name__}.{hook_name}"):
-> 1706 output = fn(*args, **kwargs)
1707
1708 # restore current_fx when nested context
/usr/local/lib/python3.7/dist-packages/pytorch_lightning/strategies/strategy.py in validation_step(self, *args, **kwargs)
368 with self.precision_plugin.val_step_context():
369 assert isinstance(self.model, ValidationStep)
--> 370 return self.model.validation_step(*args, **kwargs)
371
372 def test_step(self, *args: Any, **kwargs: Any) -> Optional[STEP_OUTPUT]:
/usr/local/lib/python3.7/dist-packages/flash/image/segmentation/model.py in validation_step(self, batch, batch_idx)
151 def validation_step(self, batch: Any, batch_idx: int) -> Any:
152 batch = (batch[DataKeys.INPUT], batch[DataKeys.TARGET])
--> 153 return super().validation_step(batch, batch_idx)
154
155 def test_step(self, batch: Any, batch_idx: int) -> Any:
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in validation_step(self, batch, batch_idx)
423
424 def validation_step(self, batch: Any, batch_idx: int) -> None:
--> 425 output = self.step(batch, batch_idx, self.val_metrics)
426 log_kwargs = {"batch_size": output.get(OutputKeys.BATCH_SIZE, None)} if _PL_GREATER_EQUAL_1_5_0 else {}
427 self.log_dict(
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in step(self, batch, batch_idx, metrics)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/flash/core/model.py in <dictcomp>(.0)
360 output = {OutputKeys.OUTPUT: y_hat}
361 y_hat = self.to_loss_format(output[OutputKeys.OUTPUT])
--> 362 losses = {name: l_fn(y_hat, y) for name, l_fn in self.loss_fn.items()}
363
364 y_hat = self.to_metrics_format(output[OutputKeys.OUTPUT])
/usr/local/lib/python3.7/dist-packages/torch/nn/functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
3012 if size_average is not None or reduce is not None:
3013 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 3014 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
3015
3016
IndexError: Target 255 is out of bounds.
How can I solve this problem? I researched others issues on stackoverflow and they were all related to the number of classes. But in my case, I only want to segment mining fields.

TypeError: maybe_convert_to_ragged() got an unexpected keyword argument 'go_backwards'

I'm trying to rerun code from this github link, when it comes to the chunck of code below:
tf.keras.backend.clear_session()
# SET SEED FOR REPRODUCIBILITY
np.random.seed(seed)
n_past = 30
batch_size = 64
n_dims = input_df.shape[1]
mat_X_train, mat_y_train = windowed_dataset(X_train, y_train, n_past)
# CONSTRUCTING MULTIVARIATE BIDIRECTIONAL LSTM NN
lstm_5 = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=[n_past, n_dims]),
# BATCH NORMALIZATION
tf.keras.layers.BatchNormalization(),
# ADDING 1st LSTM LAYER
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(32, return_sequences=True)),
tf.keras.layers.Dropout(0.1),
# ADDING 2nd LSTM LAYER
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(16)),
tf.keras.layers.Dropout(0.1),
# DENSE OUTPUT LAYER
tf.keras.layers.Dense(1)
])
lstm_5.compile(loss='mse',
optimizer="adam",
metrics=[rmspe])
checkpoint_cb = ModelCheckpoint('lstm_5.h5',
save_best_only=True,
monitor='val_rmspe')
# STOPPING THE TRAINING IF VALIDATION RMSPE IS NOT IMPROVING
early_stopping_cb = EarlyStopping(patience=30,
restore_best_weights=True,
monitor='val_rmspe')
print(lstm_5.summary())
It raises the following type error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-345-9065b1b40a1c> in <module>
11
12 # CONSTRUCTING MULTIVARIATE BIDIRECTIONAL LSTM NN
---> 13 lstm_5 = tf.keras.models.Sequential([
14 tf.keras.layers.InputLayer(input_shape=[n_past, n_dims]),
15 # BATCH NORMALIZATION
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
585 with no_automatic_dependency_tracking_scope(model):
586 model.arr2 = [] # Creates a regular, untracked python list
--> 587 ```
588
589 Args:
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\sequential.py in __init__(self, layers, name)
132 layers = [layers]
133 for layer in layers:
--> 134 self.add(layer)
135
136 #property
~\AppData\Roaming\Python\Python38\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
585 with no_automatic_dependency_tracking_scope(model):
586 model.arr2 = [] # Creates a regular, untracked python list
--> 587 ```
588
589 Args:
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\sequential.py in add(self, layer)
215 # If the model is being built continuously on top of an input layer:
216 # refresh its output.
--> 217 output_tensor = layer(self.outputs[0])
218 if len(tf.nest.flatten(output_tensor)) != 1:
219 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\wrappers.py in __call__(self, inputs, initial_state, constants, **kwargs)
581
582 if initial_state is None and constants is None:
--> 583 return super(Bidirectional, self).__call__(inputs, **kwargs)
584
585 # Applies the same workaround as in `RNN.__call__`
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
974 # >> model = tf.keras.Model(inputs, outputs)
975 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 976 return self._functional_construction_call(inputs, args, kwargs,
977 input_list)
978
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1112 layer=self, inputs=inputs, build_graph=True, training=training_value):
1113 # Check input assumptions set after layer building, e.g. input shape.
-> 1114 outputs = self._keras_tensor_symbolic_call(
1115 inputs, input_masks, args, kwargs)
1116
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
846 return tf.nest.map_structure(keras_tensor.KerasTensor, output_signature)
847 else:
--> 848 return self._infer_output_signature(inputs, args, kwargs, input_masks)
849
850 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
886 self._maybe_build(inputs)
887 inputs = self._maybe_cast_inputs(inputs)
--> 888 outputs = call_fn(inputs, *args, **kwargs)
889
890 self._handle_activity_regularization(inputs, outputs)
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\wrappers.py in call(self, inputs, training, mask, initial_state, constants)
696 forward_state, backward_state = None, None
697
--> 698 y = self.forward_layer(forward_inputs,
699 initial_state=forward_state, **kwargs)
700 y_rev = self.backward_layer(backward_inputs,
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
657
658 if initial_state is None and constants is None:
--> 659 return super(RNN, self).__call__(inputs, **kwargs)
660
661 # If any of `initial_state` or `constants` are specified and are Keras
~\AppData\Roaming\Python\Python38\site-packages\keras\engine\base_layer.py in __call__(self, *args, **kwargs)
1035 with autocast_variable.enable_auto_cast_variables(
1036 self._compute_dtype_object):
-> 1037 outputs = call_fn(inputs, *args, **kwargs)
1038
1039 if self._activity_regularizer:
~\AppData\Roaming\Python\Python38\site-packages\keras\layers\recurrent_v2.py in call(self, inputs, mask, training, initial_state)
1263
1264 if self.return_sequences:
-> 1265 output = backend.maybe_convert_to_ragged(
1266 is_ragged_input, outputs, row_lengths, go_backwards=self.go_backwards)
1267 else:
TypeError: maybe_convert_to_ragged() got an unexpected keyword argument 'go_backwards'
Does someone could help to deal with this issue? I think it may be related to tensorflow version (I installed tensorflow==2.6.0 and keras==2.6.0). Thanks a lot.

'Line2D' object has no property 'ylabel' error with pd.plot()

I am trying to plot using df.plot from the pandas plotting library, and was using the following code:
df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
sharey=True, ylabel = "Percent Change", title="Average movement")
I thought it might have something to do with using np.transpose() since it would convert it into a numpy array, but after conversion back to a pd.DataFrame(), the error still persists.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-269-85f6c80ca026> in <module>
1 df_mean = pd.DataFrame(df_mean)
2
----> 3 df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
4 title="Average movement",
5 sharey=True, ylabel = "Percent Change")
~\anaconda3\lib\site-packages\pandas\plotting\_core.py in __call__(self, *args,
**kwargs)
845 keyword_args = ", ".join(
846 f"{name}={repr(value)}" for (name, default), value in
zip(arg_def, args)
--> 847 )
848 msg = (
849 "`Series.plot()` should not be called with positional "
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\__init__.py in plot(data,
kind, **kwargs)
59 kwargs["ax"] = getattr(ax, "left_ax", ax)
60 plot_obj = PLOT_CLASSES[kind](data, **kwargs)
---> 61 plot_obj.generate()
62 plot_obj.draw()
63 return plot_obj.result
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in generate(self)
261 else:
262 return self.data.shape[1]
--> 263
264 def draw(self):
265 self.plt.draw_if_interactive()
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in
_make_plot(self)
1075 self.data = self.data.fillna(value=0)
1076 self.x_compat = plot_params["x_compat"]
-> 1077 if "x_compat" in self.kwds:
1078 self.x_compat = bool(self.kwds.pop("x_compat"))
1079
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in _plot(cls, ax,
x, y, style, column_num, stacking_id, **kwds)
1102
1103 stacking_id = self._get_stacking_id()
-> 1104 is_errorbar = com.any_not_none(*self.errors.values())
1105
1106 colors = self._get_colors()
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\converter.py in
wrapper(*args, **kwargs)
64 with pandas_converters():
65 return func(*args, **kwargs)
---> 66
67 return wrapper
68
~\anaconda3\lib\site-packages\pandas\plotting\_matplotlib\core.py in _plot(cls, ax,
x, y, style, is_errorbar, **kwds)
654
655 if is_errorbar:
--> 656 if "xerr" in kwds:
657 kwds["xerr"] = np.array(kwds.get("xerr"))
658 if "yerr" in kwds:
~\anaconda3\lib\site-packages\matplotlib\axes\_axes.py in plot(self, scalex,
scaley, data, *args, **kwargs)
1741
1742 kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)
-> 1743 lines = [*self._get_lines(*args, data=data, **kwargs)]
1744 for line in lines:
1745 self.add_line(line)
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in __call__(self, data,
*args, **kwargs)
271 this += args[0],
272 args = args[1:]
--> 273 yield from self._plot_args(this, kwargs)
274
275 def get_next_color(self):
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in _plot_args(self, tup,
kwargs)
416 if ncx > 1 and ncy > 1 and ncx != ncy:
417 raise ValueError(f"x has {ncx} columns but y has {ncy}
columns")
--> 418 return [func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
419 for j in range(max(ncx, ncy))]
420
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in <listcomp>(.0)
416 if ncx > 1 and ncy > 1 and ncx != ncy:
417 raise ValueError(f"x has {ncx} columns but y has {ncy}
columns")
--> 418 return [func(x[:, j % ncx], y[:, j % ncy], kw, kwargs)
419 for j in range(max(ncx, ncy))]
420
~\anaconda3\lib\site-packages\matplotlib\axes\_base.py in _makeline(self, x, y,
kw, kwargs)
310 default_dict = self._getdefaults(set(), kw)
311 self._setdefaults(default_dict, kw)
--> 312 seg = mlines.Line2D(x, y, **kw)
313 return seg
314
~\anaconda3\lib\site-packages\matplotlib\lines.py in __init__(self, xdata, ydata,
linewidth, linestyle, color, marker, markersize, markeredgewidth, markeredgecolor,
markerfacecolor, markerfacecoloralt, fillstyle, antialiased, dash_capstyle,
solid_capstyle, dash_joinstyle, solid_joinstyle, pickradius, drawstyle, markevery,
**kwargs)
388 # update kwargs before updating data to give the caller a
389 # chance to init axes (and hence unit support)
--> 390 self.update(kwargs)
391 self.pickradius = pickradius
392 self.ind_offset = 0
~\anaconda3\lib\site-packages\matplotlib\artist.py in update(self, props)
994 func = getattr(self, f"set_{k}", None)
995 if not callable(func):
--> 996 raise AttributeError(f"{type(self).__name__!r}
object "
997 f"has no property {k!r}")
998 ret.append(func(v))
AttributeError: 'Line2D' object has no property 'ylabel'
I was able to run this code fine on my mac, but when I transferred it over to my desktop, I get this error, and I'm not sure why. I thought it could be a version problem, but I updated pandas and it didn't fix anything.
Anybody have an idea what could be causing something like this?
You can try this trick:
If the ylabel parameter is the problem, remove it and set it directly to ax.
ax = df_mean.plot(kind='line', subplots=True, layout=(1,8), figsize=(40,8),
sharey=True, title="Average movement")
ax.set_ylabel('Percent Change')
plt.show()

M1 MacBook Apple ML compute Tensorflow2.4 compatibility issue with Numpy

I am running the new apple native tensorflow package 2.4, and ran into a problem I did not have before. This jupyter notebook code works in old intel based environment where an older tensorflow version was used. But with M1 apple MLcomputer TensorFlow2.4 it is not compatible
with Numpy 1.20 or 1.18(I downgraded numpy to try). The error log:
NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
<ipython-input-20-73358e637fe3> in <module>
4 model = Sequential()
5 model.add(Embedding(vocab_size+1, W2V_SIZE, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False))
----> 6 model.add(LSTM(500, dropout=0.2, recurrent_dropout=0.2))
7 model.add(Dense(units = 10000, kernel_initializer = 'glorot_uniform', activation = 'relu'))
8 model.add(Dropout(0.35))
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py in _method_wrapper(self, *args, **kwargs)
515 self._self_setattr_tracking = False # pylint: disable=protected-access
516 try:
--> 517 result = method(self, *args, **kwargs)
518 finally:
519 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/sequential.py in add(self, layer)
221 # If the model is being built continuously on top of an input layer:
222 # refresh its output.
--> 223 output_tensor = layer(self.outputs[0])
224 if len(nest.flatten(output_tensor)) != 1:
225 raise ValueError(SINGLE_LAYER_OUTPUT_ERROR_MSG)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in __call__(self, inputs, initial_state, constants, **kwargs)
658
659 if initial_state is None and constants is None:
--> 660 return super(RNN, self).__call__(inputs, **kwargs)
661
662 # If any of `initial_state` or `constants` are specified and are Keras
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, *args, **kwargs)
944 # >> model = tf.keras.Model(inputs, outputs)
945 if _in_functional_construction_mode(self, inputs, args, kwargs, input_list):
--> 946 return self._functional_construction_call(inputs, args, kwargs,
947 input_list)
948
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _functional_construction_call(self, inputs, args, kwargs, input_list)
1083 layer=self, inputs=inputs, build_graph=True, training=training_value):
1084 # Check input assumptions set after layer building, e.g. input shape.
-> 1085 outputs = self._keras_tensor_symbolic_call(
1086 inputs, input_masks, args, kwargs)
1087
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _keras_tensor_symbolic_call(self, inputs, input_masks, args, kwargs)
815 return nest.map_structure(keras_tensor.KerasTensor, output_signature)
816 else:
--> 817 return self._infer_output_signature(inputs, args, kwargs, input_masks)
818
819 def _infer_output_signature(self, inputs, args, kwargs, input_masks):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in _infer_output_signature(self, inputs, args, kwargs, input_masks)
856 # TODO(kaftan): do we maybe_build here, or have we already done it?
857 self._maybe_build(inputs)
--> 858 outputs = call_fn(inputs, *args, **kwargs)
859
860 self._handle_activity_regularization(inputs, outputs)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent_v2.py in call(self, inputs, mask, training, initial_state)
1161 # LSTM does not support constants. Ignore it during process.
1162 orig_initial_state = initial_state
-> 1163 inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
1164
1165 if isinstance(mask, list):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _process_inputs(self, inputs, initial_state, constants)
857 initial_state = self.states
858 elif initial_state is None:
--> 859 initial_state = self.get_initial_state(inputs)
860
861 if len(initial_state) != len(self.states):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs)
640 dtype = inputs.dtype
641 if get_initial_state_fn:
--> 642 init_state = get_initial_state_fn(
643 inputs=None, batch_size=batch_size, dtype=dtype)
644 else:
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in get_initial_state(self, inputs, batch_size, dtype)
2504
2505 def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
-> 2506 return list(_generate_zero_filled_state_for_cell(
2507 self, inputs, batch_size, dtype))
2508
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state_for_cell(cell, inputs, batch_size, dtype)
2985 batch_size = array_ops.shape(inputs)[0]
2986 dtype = inputs.dtype
-> 2987 return _generate_zero_filled_state(batch_size, cell.state_size, dtype)
2988
2989
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in _generate_zero_filled_state(batch_size_tensor, state_size, dtype)
3001
3002 if nest.is_nested(state_size):
-> 3003 return nest.map_structure(create_zeros, state_size)
3004 else:
3005 return create_zeros(state_size)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in map_structure(func, *structure, **kwargs)
657
658 return pack_sequence_as(
--> 659 structure[0], [func(*x) for x in entries],
660 expand_composites=expand_composites)
661
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/nest.py in <listcomp>(.0)
657
658 return pack_sequence_as(
--> 659 structure[0], [func(*x) for x in entries],
660 expand_composites=expand_composites)
661
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/keras/layers/recurrent.py in create_zeros(unnested_state_size)
2998 flat_dims = tensor_shape.TensorShape(unnested_state_size).as_list()
2999 init_state_size = [batch_size_tensor] + flat_dims
-> 3000 return array_ops.zeros(init_state_size, dtype=dtype)
3001
3002 if nest.is_nested(state_size):
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py in wrapper(*args, **kwargs)
199 """Call target, and fall back on dispatchers if there is a TypeError."""
200 try:
--> 201 return target(*args, **kwargs)
202 except (TypeError, ValueError):
203 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in wrapped(*args, **kwargs)
2817
2818 def wrapped(*args, **kwargs):
-> 2819 tensor = fun(*args, **kwargs)
2820 tensor._is_zeros_tensor = True
2821 return tensor
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name)
2866 # Create a constant if it won't be very big. Otherwise create a fill
2867 # op to prevent serialized GraphDefs from becoming too large.
-> 2868 output = _constant_if_small(zero, shape, dtype, name)
2869 if output is not None:
2870 return output
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in _constant_if_small(value, shape, dtype, name)
2802 def _constant_if_small(value, shape, dtype, name):
2803 try:
-> 2804 if np.prod(shape) < 1000:
2805 return constant(value, shape=shape, dtype=dtype, name=name)
2806 except TypeError:
<__array_function__ internals> in prod(*args, **kwargs)
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in prod(a, axis, dtype, out, keepdims, initial, where)
3028 10
3029 """
-> 3030 return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
3031 keepdims=keepdims, initial=initial, where=where)
3032
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/numpy/core/fromnumeric.py in _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs)
85 return reduction(axis=axis, out=out, **passkwargs)
86
---> 87 return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
88
89
~/miniforge3/envs/tf2.4/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in __array__(self)
850
851 def __array__(self):
--> 852 raise NotImplementedError(
853 "Cannot convert a symbolic Tensor ({}) to a numpy array."
854 " This error may indicate that you're trying to pass a Tensor to"
NotImplementedError: Cannot convert a symbolic Tensor (lstm_1/strided_slice:0) to a numpy array. This error may indicate that you're trying to pass a Tensor to a NumPy call, which is not supported

keras.backend is not defined for variational autoencoder model

I created a Variational Autoencoder model. To do the sampling, I created the following method:
from keras import backend as k
def sampling(args):
z_mean , z_log_var=args
batch=k.shape(z_mean)[0]
dim=k.int_shape(z_mean)[1]
epsilon=k.random_normal(shape=(batch,dim))
return z_mean + k.exp(0.5 * z_log_var) * epsilon
this is the model architecture:
def create_variationalModel(original_dim):
input_shape=(original_dim,)
intermidiate_dim=58
batch_size=10
latent_dim=3
epochs=100
inputs=Input(shape=input_shape,name="encoder_input")
x= Dense(units=original_dim,activation="tanh")(inputs)
x= Dense(units=int(original_dim/2),activation="tanh")(x)
x1= Dense(units=int(original_dim/4),activation="tanh")(x)
x2= Dense(units=int(original_dim/8),activation="tanh")(x1)
x3= Dense(units=10,activation="tanh")(x2)
z_mean=Dense(latent_dim,name="z_mean")(x3)
z_log_var=Dense(latent_dim,name="z_log_var")(x3)
z=Lambda(sampling,output_shape=(latent_dim,),name="z")([z_mean,z_log_var])
encoder=Model(inputs,[z_mean,z_log_var,z],name="encoder")
encoder.summary()
latent_inputs=Input(shape=(latent_dim,),name="z_sampling")
x= Dense(units=10,activation="tanh")(latent_inputs)
x1=Dense(units=int(original_dim/8),activation="tanh")(x)
x2=Dense(units=int(original_dim/4),activation="tanh")(x1)
x3=Dense(units=int(original_dim/2),activation="tanh")(x2)
x3=Dense(units=original_dim,activation="tanh")(x3)
outputs=Dense(units=original_dim,activation="sigmoid")(x3)
decoder=Model(latent_inputs,outputs,name="decoder")
decoder.summary()
outputs=decoder(encoder(inputs)[2])
vae = Model(inputs,outputs,name="vae_mlp")
reconstruction_loss=mse(inputs,outputs)
reconstruction_loss*=original_dim
kl_loss = 1 + z_log_var -k.square(z_mean) - k.exp(z_log_var)
kl_loss=k.sum(kl_loss,axis=-1)
kl_loss*=-0.5
vae_loss=k.mean(reconstruction_loss+kl_loss)
vae.add_loss(vae_loss)
plot_model(vae,to_file='vae.png',show_shapes=True)
vae.compile(optimizer=RMSprop(),loss="mean_squared_error",metrics=["mae"])
return vae
the after training the model and test it, I decide to save it like this:
vae.save("./models/vae.h5")
but when I tried to load the model like this:
model = load_model("./models/vae.h5")
I have this issue:
--------------------------------------------------------------------------- NameError Traceback (most recent call
last) in
1 #load model
----> 2 model = load_model("./models/vae.h5")
3 # summarize model.
4 model.summary()
5 with open("./models/LabelEncoders_dic.pickle","rb") as f:
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/saving.py
in load_wrapper(*args, **kwargs)
490 os.remove(tmp_filepath)
491 return res
--> 492 return load_function(*args, **kwargs)
493
494 return load_wrapper
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/saving.py
in load_model(filepath, custom_objects, compile)
582 if H5Dict.is_supported_type(filepath):
583 with H5Dict(filepath, mode='r') as h5dict:
--> 584 model = _deserialize_model(h5dict, custom_objects, compile)
585 elif hasattr(filepath, 'write') and callable(filepath.write):
586 def load_function(h5file):
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/saving.py
in _deserialize_model(h5dict, custom_objects, compile)
272 raise ValueError('No model found in config.')
273 model_config = json.loads(model_config.decode('utf-8'))
--> 274 model = model_from_config(model_config, custom_objects=custom_objects)
275 model_weights_group = h5dict['model_weights']
276
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/saving.py
in model_from_config(config, custom_objects)
625 'Sequential.from_config(config)?')
626 from ..layers import deserialize
--> 627 return deserialize(config, custom_objects=custom_objects)
628
629
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/layers/init.py
in deserialize(config, custom_objects)
163 globs['Model'] = models.Model
164 globs['Sequential'] = models.Sequential
--> 165 return deserialize_keras_object(config,
166 module_objects=globs,
167 custom_objects=custom_objects,
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/utils/generic_utils.py
in deserialize_keras_object(identifier, module_objects,
custom_objects, printable_module_name)
142 custom_objects = custom_objects or {}
143 if has_arg(cls.from_config, 'custom_objects'):
--> 144 return cls.from_config(
145 config['config'],
146 custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/network.py
in from_config(cls, config, custom_objects) 1054 # First,
we create all layers and enqueue nodes to be processed 1055
for layer_data in config['layers']:
-> 1056 process_layer(layer_data) 1057 1058 # Then we process nodes in order of layer depth.
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/network.py
in process_layer(layer_data) 1039 from ..layers import
deserialize as deserialize_layer 1040
-> 1041 layer = deserialize_layer(layer_data, 1042 custom_objects=custom_objects) 1043
created_layers[layer_name] = layer
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/layers/init.py
in deserialize(config, custom_objects)
163 globs['Model'] = models.Model
164 globs['Sequential'] = models.Sequential
--> 165 return deserialize_keras_object(config,
166 module_objects=globs,
167 custom_objects=custom_objects,
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/utils/generic_utils.py
in deserialize_keras_object(identifier, module_objects,
custom_objects, printable_module_name)
142 custom_objects = custom_objects or {}
143 if has_arg(cls.from_config, 'custom_objects'):
--> 144 return cls.from_config(
145 config['config'],
146 custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/network.py
in from_config(cls, config, custom_objects) 1073
node_data = node_data_list[node_index] 1074
try:
-> 1075 process_node(layer, node_data) 1076 1077 # If the node does not have all
inbound layers
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/network.py
in process_node(layer, node_data) 1023 # and building
the layer if needed. 1024 if input_tensors:
-> 1025 layer(unpack_singleton(input_tensors), **kwargs) 1026 1027 def process_layer(layer_data):
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/backend/tensorflow_backend.py
in symbolic_fn_wrapper(*args, **kwargs)
73 if _SYMBOLIC_SCOPE.value:
74 with get_graph().as_default():
---> 75 return func(*args, **kwargs)
76 else:
77 return func(*args, **kwargs)
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/engine/base_layer.py
in call(self, inputs, **kwargs)
487 # Actually call the layer,
488 # collecting output(s), mask(s), and shape(s).
--> 489 output = self.call(inputs, **kwargs)
490 output_mask = self.compute_mask(inputs, previous_mask)
491
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/layers/core.py
in call(self, inputs, mask)
714 else:
715 self._input_dtypes = K.dtype(inputs)
--> 716 return self.function(inputs, **arguments)
717
718 def compute_mask(self, inputs, mask=None):
~/anaconda3/envs/myenv/lib/python3.8/site-packages/keras/layers/core.py
in sampling(args)
NameError: name 'k' is not defined
K comes from from keras import backend as k. even y adding this importation, I have the same error. Can anyone know how to fix this ?

Resources