How to manage multilabel segmentation map for Active contour loss? - keras

I am using Active contour loss (https://github.com/xuuuuuuchen/Active-Contour-Loss/blob/master/Active-Contour-Loss.py) which is as follows:
from keras import backend as K
import numpy as np
def Active_Contour_Loss(y_true, y_pred):
#y_pred = K.cast(y_pred, dtype = 'float64')
x = y_pred[:,:,1:,:] - y_pred[:,:,:-1,:] # horizontal and vertical directions
y = y_pred[:,:,:,1:] - y_pred[:,:,:,:-1]
delta_x = x[:,:,1:,:-2]**2
delta_y = y[:,:,:-2,1:]**2
delta_u = K.abs(delta_x + delta_y)
epsilon = 0.00000001 # where is a parameter to avoid square root is zero in practice.
w = 1
lenth = w * K.sum(K.sqrt(delta_u + epsilon)) # equ.(11) in the paper
"""
region term
"""
C_1 = np.ones((256, 256))
C_2 = np.zeros((256, 256))
region_in = K.abs(K.sum( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper
region_out = K.abs(K.sum( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper
lambdaP = 1 # lambda parameter could be various.
loss = lenth + lambdaP * (region_in + region_out)
return loss
However, when I use it for the U-net model. I am getting below error while compiling.
InvalidArgumentError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1606 try:
-> 1607 c_op = c_api.TF_FinishOperation(op_desc)
1608 except errors.InvalidArgumentError as e:
InvalidArgumentError: Dimensions must be equal, but are 4 and 256 for 'loss_2/activation_57_loss/mul_1' (op: 'Mul') with input shapes: [?,256,4], [?,256,256].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
12 frames
<ipython-input-33-b98b233ef3b2> in <module>()
50 # model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
51
---> 52 model.compile(optimizer='adam', loss=Active_Contour_Loss, metrics=['accuracy'])
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, **kwargs)
343 with K.name_scope(self.output_names[i] + '_loss'):
344 output_loss = weighted_loss(y_true, y_pred,
--> 345 sample_weight, mask)
346 if len(self.outputs) > 1:
347 self.metrics_tensors.append(output_loss)
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in weighted(y_true, y_pred, weights, mask)
426 """
427 # score_array has ndim >= 2
--> 428 score_array = fn(y_true, y_pred)
429 if mask is not None:
430 # Cast the mask to floatX to avoid float64 upcasting in Theano
<ipython-input-32-b273672af934> in Active_Contour_Loss(y_true, y_pred)
28 C_2 = np.zeros((256, 256))
29
---> 30 region_in = K.abs(K.sum( y_pred[:,0,:,:] * ((y_true[:,0,:,:] - C_1)**2) ) ) # equ.(12) in the paper
31 region_out = K.abs(K.sum( (1-y_pred[:,0,:,:]) * ((y_true[:,0,:,:] - C_2)**2) )) # equ.(12) in the paper
32
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_ops.py in binary_op_wrapper(x, y)
897 with ops.name_scope(None, op_name, [x, y]) as name:
898 if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
--> 899 return func(x, y, name=name)
900 elif not isinstance(y, sparse_tensor.SparseTensor):
901 try:
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/math_ops.py in _mul_dispatch(x, y, name)
1204 is_tensor_y = isinstance(y, ops.Tensor)
1205 if is_tensor_y:
-> 1206 return gen_math_ops.mul(x, y, name=name)
1207 else:
1208 assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/gen_math_ops.py in mul(x, y, name)
6699 # Add nodes to the TensorFlow graph.
6700 _, _, _op = _op_def_lib._apply_op_helper(
-> 6701 "Mul", x=x, y=y, name=name)
6702 _result = _op.outputs[:]
6703 _inputs_flat = _op.inputs
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
792 op = g.create_op(op_type_name, inputs, dtypes=None, name=scope,
793 input_types=input_types, attrs=attr_protos,
--> 794 op_def=op_def)
795
796 # Conditionally invoke tfdbg v2's op callback(s).
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/util/deprecation.py in new_func(*args, **kwargs)
505 'in a future version' if date is None else ('after %s' % date),
506 instructions)
--> 507 return func(*args, **kwargs)
508
509 doc = _add_deprecated_arg_notice_to_docstring(
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in create_op(***failed resolving arguments***)
3355 raise TypeError("Input #%d is not a tensor: %s" % (idx, a))
3356 return self._create_op_internal(op_type, inputs, dtypes, input_types, name,
-> 3357 attrs, op_def, compute_device)
3358
3359 def _create_op_internal(
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in _create_op_internal(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_device)
3424 input_types=input_types,
3425 original_op=self._default_original_op,
-> 3426 op_def=op_def)
3427 self._create_op_helper(ret, compute_device=compute_device)
3428 return ret
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in __init__(self, node_def, g, inputs, output_types, control_inputs, input_types, original_op, op_def)
1768 op_def, inputs, node_def.attr)
1769 self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
-> 1770 control_input_ops)
1771 # pylint: enable=protected-access
1772
/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1608 except errors.InvalidArgumentError as e:
1609 # Convert to ValueError for backwards compatibility.
-> 1610 raise ValueError(str(e))
1611
1612 return c_op
ValueError: Dimensions must be equal, but are 4 and 256 for 'loss_2/activation_57_loss/mul_1' (op: 'Mul') with input shapes: [?,256,4], [?,256,256].
I tried to assign C_1 and C_2 with shape (256,256,4). But, the segmentation mask is not generated. Am I missing something?

The source code you shared for Active Contours seems fine. Probably, you are feeding the loss function with wrong parameter size.
Since the code is working with tensors, you should feed the function 4-D. Try something like that:
a0 = np.zeros([1,256,256,1], dtype='float64')
a0[0,...] = np.expand_dims(C_1, -1).astype(np.float64)
a1 = np.zeros([1,256,256,1], dtype='float64')
a1[0,...] = np.expand_dims(C_2, -1).astype(np.float64)
los_ac = Active_Contour_Loss(a0, a1)
print(sess.run(los_ac))
I wrote this code quickly but I suppose it'll work.

Related

XGBoost: While using the `eval_set` in .fit causing Error

I'm trying to train the model using Xgboost. The code is doing split using KFold. And for each fold, it's running the Xgboost model using fit. Within the fit function, I'm trying to evaluate both train and valid data to check if the errors. And then doing the prediction in test set.
I'm running the following code using Xgboost.
kf = GroupKFold(n_splits=4)
for trn_idx, test_idx in kf.split(X, groups=X.year) :
x_train, x_valid = X.iloc[trn_idx], X.iloc[test_idx]
y_train, y_valid = y.iloc[trn_idx], y.iloc[test_idx]
xgb_model = xgb.XGBRegressor(
booster = 'dart',
eta = 0.1,
gamma = 0,
colsample_bytree = 0.7,
n_estimators = 1200,
max_depth = 1,
reg_alpha = 1.1,
reg_lambda = 1.1,
subsample = 0.03,
eval_metric=my_smape)
xgb_model.fit(x_train, y_train,
eval_set=[(x_train, y_train), (x_valid,y_valid)], early_stopping_rounds=20,
verbose=True)
But I'm getting the following error. I checked this doc, and my code is according to the doc. Can someone please help me find the solution?
AttributeError Traceback (most recent call last)
<ipython-input-38-81b11a21472c> in <module>
23 eval_metric=my_smape)
24
---> 25 xgb_model.fit(x_train, y_train,
26 eval_set=[(x_valid,y_valid)], early_stopping_rounds=20,
27 verbose=True)
D:\Anaconda\lib\site-packages\xgboost\core.py in inner_f(*args, **kwargs)
573 for k, arg in zip(sig.parameters, args):
574 kwargs[k] = arg
--> 575 return f(**kwargs)
576
577 return inner_f
D:\Anaconda\lib\site-packages\xgboost\sklearn.py in fit(self, X, y, sample_weight, base_margin, eval_set, eval_metric, early_stopping_rounds, verbose, xgb_model, sample_weight_eval_set, base_margin_eval_set, feature_weights, callbacks)
959 xgb_model, eval_metric, params, early_stopping_rounds, callbacks
960 )
--> 961 self._Booster = train(
962 params,
963 train_dmatrix,
D:\Anaconda\lib\site-packages\xgboost\core.py in inner_f(*args, **kwargs)
573 for k, arg in zip(sig.parameters, args):
574 kwargs[k] = arg
--> 575 return f(**kwargs)
576
577 return inner_f
D:\Anaconda\lib\site-packages\xgboost\training.py in train(params, dtrain, num_boost_round, evals, obj, feval, maximize, early_stopping_rounds, evals_result, verbose_eval, xgb_model, callbacks, custom_metric)
180 break
181 bst.update(dtrain, i, obj)
--> 182 if cb_container.after_iteration(bst, i, dtrain, evals):
183 break
184
D:\Anaconda\lib\site-packages\xgboost\callback.py in after_iteration(self, model, epoch, dtrain, evals)
237 for _, name in evals:
238 assert name.find('-') == -1, 'Dataset name should not contain `-`'
--> 239 score: str = model.eval_set(evals, epoch, self.metric, self._output_margin)
240 splited = score.split()[1:] # into datasets
241 # split up `test-error:0.1234`
D:\Anaconda\lib\site-packages\xgboost\core.py in eval_set(self, evals, iteration, feval, output_margin)
1860 if feval is not None:
1861 for dmat, evname in evals:
-> 1862 feval_ret = feval(
1863 self.predict(dmat, training=False, output_margin=output_margin), dmat
1864 )
D:\Anaconda\lib\site-packages\xgboost\sklearn.py in inner(y_score, dmatrix)
99 def inner(y_score: np.ndarray, dmatrix: DMatrix) -> Tuple[str, float]:
100 y_true = dmatrix.get_label()
--> 101 return func.__name__, func(y_true, y_score)
102 return inner
103
AttributeError: '_PredictScorer' object has no attribute '__name__'
It looks like you've run make_scorer() on your custom metric. Try supplying the original function as eval_metric instead, this should fix the issue.

FGSM attack in Foolbox

I am using Foolbox 3.3.1 to perform some adversarial attacks on resnet50 network. The code is as follows:
import torch
from torchvision import models
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.resnet50(pretrained=True).to(device)
model.eval()
mean = [0.485, 0.456, 0.406]
std=[0.229, 0.224, 0.225]
preprocessing = dict(mean=mean, std=std, axis=-3)
bounds = (0, 1)
fmodel = fb.models.PyTorchModel(model, bounds=bounds, preprocessing=preprocessing)
images, labels = fb.utils.samples(fmodel, dataset='imagenet', batchsize=8)
labels_float = labels.to(torch.float32)
def perform_attack(attack, fmodel, images, labels, predicted_labels_before_attack):
print(f'Performing attack with {type(attack).__name__}...', end='')
raw, clipped, is_adv = attack(fmodel, images, labels, epsilons=0.03)
print('done')
logits_after_attacks = fmodel(clipped)
labels_after_attack = logits_after_attacks.max(dim=1)[1].cpu().numpy()
for image, predicted_label_before_attack, label, label_after_attack in zip(images, predicted_labels_before_attack, labels.cpu().numpy(), labels_after_attack):
label_imshow = type(attack).__name__
if predicted_label_before_attack == label and label != label_after_attack:
label_imshow += '; successful attack'
label_imshow += f'\nTrue class: {lab_dict[label]}\nClassified before attack as: {lab_dict[predicted_label_before_attack]}\nClassified after attack as: {lab_dict[label_after_attack]}'
imshow(image, label_imshow)
for attack in (
fb.attacks.FGSM(), # "nll_loss_forward_no_reduce_cuda_kernel_index" not implemented for 'Int'
):
perform_attack(attack, fmodel, images, labels, predicted_labels_before_attack)
I get the error:
RuntimeError: "nll_loss_forward_no_reduce_cuda_kernel_index" not implemented for 'Int'
with full stack:
Performing attack with LinfFastGradientAttack...
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_1736/3238714708.py in <module>
28 # fb.attacks.BoundaryAttack(), # very slow
29 ):
---> 30 perform_attack(attack, fmodel, images, labels, predicted_labels_before_attack)
~\AppData\Local\Temp/ipykernel_1736/3978727835.py in perform_attack(attack, fmodel, images, labels, predicted_labels_before_attack)
1 def perform_attack(attack, fmodel, images, labels, predicted_labels_before_attack):
2 print(f'Performing attack with {type(attack).__name__}...', end='')
----> 3 raw, clipped, is_adv = attack(fmodel, images, labels, epsilons=0.03)
4 print('done')
5 logits_after_attacks = fmodel(clipped)
~\anaconda3\envs\adversarial\lib\site-packages\foolbox\attacks\base.py in __call__(***failed resolving arguments***)
277 success = []
278 for epsilon in real_epsilons:
--> 279 xp = self.run(model, x, criterion, epsilon=epsilon, **kwargs)
280
281 # clip to epsilon because we don't really know what the attack returns;
~\anaconda3\envs\adversarial\lib\site-packages\foolbox\attacks\fast_gradient_method.py in run(self, model, inputs, criterion, epsilon, **kwargs)
90 raise ValueError("unsupported criterion")
91
---> 92 return super().run(
93 model=model, inputs=inputs, criterion=criterion, epsilon=epsilon, **kwargs
94 )
~\anaconda3\envs\adversarial\lib\site-packages\foolbox\attacks\gradient_descent_base.py in run(***failed resolving arguments***)
90
91 for _ in range(self.steps):
---> 92 _, gradients = self.value_and_grad(loss_fn, x)
93 gradients = self.normalize(gradients, x=x, bounds=model.bounds)
94 x = x + gradient_step_sign * stepsize * gradients
~\anaconda3\envs\adversarial\lib\site-packages\foolbox\attacks\gradient_descent_base.py in value_and_grad(self, loss_fn, x)
50 x: ep.Tensor,
51 ) -> Tuple[ep.Tensor, ep.Tensor]:
---> 52 return ep.value_and_grad(loss_fn, x)
53
54 def run(
~\anaconda3\envs\adversarial\lib\site-packages\eagerpy\framework.py in value_and_grad(f, t, *args, **kwargs)
350 f: Callable[..., TensorType], t: TensorType, *args: Any, **kwargs: Any
351 ) -> Tuple[TensorType, TensorType]:
--> 352 return t.value_and_grad(f, *args, **kwargs)
353
354
~\anaconda3\envs\adversarial\lib\site-packages\eagerpy\tensor\tensor.py in value_and_grad(self, f, *args, **kwargs)
541 self: TensorType, f: Callable[..., TensorType], *args: Any, **kwargs: Any
542 ) -> Tuple[TensorType, TensorType]:
--> 543 return self._value_and_grad_fn(f, has_aux=False)(self, *args, **kwargs)
544
545 #final
~\anaconda3\envs\adversarial\lib\site-packages\eagerpy\tensor\pytorch.py in value_and_grad(x, *args, **kwargs)
493 loss, aux = f(x, *args, **kwargs)
494 else:
--> 495 loss = f(x, *args, **kwargs)
496 loss = loss.raw
497 loss.backward()
~\anaconda3\envs\adversarial\lib\site-packages\foolbox\attacks\gradient_descent_base.py in loss_fn(inputs)
40 def loss_fn(inputs: ep.Tensor) -> ep.Tensor:
41 logits = model(inputs)
---> 42 return ep.crossentropy(logits, labels).sum()
43
44 return loss_fn
~\anaconda3\envs\adversarial\lib\site-packages\eagerpy\framework.py in crossentropy(logits, labels)
319
320 def crossentropy(logits: TensorType, labels: TensorType) -> TensorType:
--> 321 return logits.crossentropy(labels)
322
323
~\anaconda3\envs\adversarial\lib\site-packages\eagerpy\tensor\pytorch.py in crossentropy(self, labels)
462 raise ValueError("labels must be 1D and must match the length of logits")
463 return type(self)(
--> 464 torch.nn.functional.cross_entropy(self.raw, labels.raw, reduction="none")
465 )
466
~\anaconda3\envs\adversarial\lib\site-packages\torch\nn\functional.py in cross_entropy(input, target, weight, size_average, ignore_index, reduce, reduction, label_smoothing)
2844 if size_average is not None or reduce is not None:
2845 reduction = _Reduction.legacy_get_string(size_average, reduce)
-> 2846 return torch._C._nn.cross_entropy_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index, label_smoothing)
2847
2848
RuntimeError: "nll_loss_forward_no_reduce_cuda_kernel_index" not implemented for 'Int'
Any clue?
i think the problem is on the 3978727835.py file try to change the argument labels to 'labels.to(DEVICE).long()' if you use CUDA unless change it to 'labels.long()'

How to reconstruct the decoder from an LSTM-AE?

I have a trained LSTM-AE, of which the architecture is as follows:
In brief, I have an LSTM-AE of depth 3, the number of cells on the LSTM layers on the encoder side are [120, 80, 50] (and symmetric for the decoder). I built the model using the code shown on this page. For information, because I want to train the LSTM-AT directly on variable-length time series, so I didn't specify the timestamps in the input layer, which means the model is trained on batches of size 1 (one time series per batch).
I can extract the encoder just fine, but I cannot do the same for the decoder :-(... My goal is to check, given a vector of 50 features (which are extracted by the encoder), whether the decoder can reconstruct the input series.
Here's my attempt so far:
# load the full autoencoder
model = load_model(path_to_model)
# reconstruct the decoder
in_layer = Input(shape=(None, 50))
time_dist = model.layers[-1]
dec_1 = model.layers[-2]
dec_2 = model.layers[-3]
dec_3 = model.layers[-4]
rep_vec = model.layers[-5]
out_layer = time_dist(dec_1(dec_2(dec_3(rep_vec(in_layer)))))
decoder = Model(in_layer, out_layer, name='decoder')
res = decoder(input_feature) # input_feature has shape (50,)
I obtained this error:
InvalidArgumentError: slice index 1 of dimension 0 out of bounds. [Op:StridedSlice] name: decoder/repeat/strided_slice/
If you are interested in the full error log...
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
Input In [86], in <module>
13 out_layer = time_dist(dec_1(dec_2(dec_3(rep_vec(in_layer)))))
14 decoder = Model(in_layer, out_layer, name='decoder')
---> 15 res = decoder(input_feature)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:1030, in Layer.__call__(self, *args, **kwargs)
1026 inputs = self._maybe_cast_inputs(inputs, input_list)
1028 with autocast_variable.enable_auto_cast_variables(
1029 self._compute_dtype_object):
-> 1030 outputs = call_fn(inputs, *args, **kwargs)
1032 if self._activity_regularizer:
1033 self._handle_activity_regularization(inputs, outputs)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py:420, in Functional.call(self, inputs, training, mask)
401 #doc_controls.do_not_doc_inheritable
402 def call(self, inputs, training=None, mask=None):
403 """Calls the model on new inputs.
404
405 In this case `call` just reapplies
(...)
418 a list of tensors if there are more than one outputs.
419 """
--> 420 return self._run_internal_graph(
421 inputs, training=training, mask=mask)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/functional.py:556, in Functional._run_internal_graph(self, inputs, training, mask)
553 continue # Node is not computable, try skipping.
555 args, kwargs = node.map_arguments(tensor_dict)
--> 556 outputs = node.layer(*args, **kwargs)
558 # Update tensor_dict.
559 for x_id, y in zip(node.flat_output_ids, nest.flatten(outputs)):
File ~/venv/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py:1030, in Layer.__call__(self, *args, **kwargs)
1026 inputs = self._maybe_cast_inputs(inputs, input_list)
1028 with autocast_variable.enable_auto_cast_variables(
1029 self._compute_dtype_object):
-> 1030 outputs = call_fn(inputs, *args, **kwargs)
1032 if self._activity_regularizer:
1033 self._handle_activity_regularization(inputs, outputs)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/keras/layers/core.py:919, in Lambda.call(self, inputs, mask, training)
915 return var
917 with backprop.GradientTape(watch_accessed_variables=True) as tape,\
918 variable_scope.variable_creator_scope(_variable_creator):
--> 919 result = self.function(inputs, **kwargs)
920 self._check_variables(created_variables, tape.watched_variables())
921 return result
File D:/PhD/Code/feature_learning/train_models/train_lstmae.py:30, in repeat_vector(args)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206, in add_dispatch_support.<locals>.wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
209 # TypeError, when given unexpected types. So we need to catch both.
210 result = dispatch(wrapper, args, kwargs)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py:1040, in _slice_helper(tensor, slice_spec, var)
1038 var_empty = constant([], dtype=dtypes.int32)
1039 packed_begin = packed_end = packed_strides = var_empty
-> 1040 return strided_slice(
1041 tensor,
1042 packed_begin,
1043 packed_end,
1044 packed_strides,
1045 begin_mask=begin_mask,
1046 end_mask=end_mask,
1047 shrink_axis_mask=shrink_axis_mask,
1048 new_axis_mask=new_axis_mask,
1049 ellipsis_mask=ellipsis_mask,
1050 var=var,
1051 name=name)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/util/dispatch.py:206, in add_dispatch_support.<locals>.wrapper(*args, **kwargs)
204 """Call target, and fall back on dispatchers if there is a TypeError."""
205 try:
--> 206 return target(*args, **kwargs)
207 except (TypeError, ValueError):
208 # Note: convert_to_eager_tensor currently raises a ValueError, not a
209 # TypeError, when given unexpected types. So we need to catch both.
210 result = dispatch(wrapper, args, kwargs)
File ~/venv/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py:1213, in strided_slice(input_, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, var, name)
1210 if strides is None:
1211 strides = ones_like(begin)
-> 1213 op = gen_array_ops.strided_slice(
1214 input=input_,
1215 begin=begin,
1216 end=end,
1217 strides=strides,
1218 name=name,
1219 begin_mask=begin_mask,
1220 end_mask=end_mask,
1221 ellipsis_mask=ellipsis_mask,
1222 new_axis_mask=new_axis_mask,
1223 shrink_axis_mask=shrink_axis_mask)
1225 parent_name = name
1227 if var is not None:
File ~/venv/lib/python3.8/site-packages/tensorflow/python/ops/gen_array_ops.py:10505, in strided_slice(input, begin, end, strides, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, name)
10503 return _result
10504 except _core._NotOkStatusException as e:
> 10505 _ops.raise_from_not_ok_status(e, name)
10506 except _core._FallbackException:
10507 pass
File ~/venv/lib/python3.8/site-packages/tensorflow/python/framework/ops.py:6897, in raise_from_not_ok_status(e, name)
6895 message = e.message + (" name: " + name if name is not None else "")
6896 # pylint: disable=protected-access
-> 6897 six.raise_from(core._status_to_exception(e.code, message), None)
File <string>:3, in raise_from(value, from_value)
InvalidArgumentError: slice index 1 of dimension 0 out of bounds. [Op:StridedSlice] name: decoder/repeat/strided_slice/
I appreciate very much any advice you would give me!
Edit
Here is the code I used to build the mode:
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.initializers import GlorotUniform
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.backend import shape
def repeat_vector(args):
"""Builds the repeat vector layer dynamically by the size of the input series"""
layer_to_repeat = args[0]
sequence_layer = args[1]
return RepeatVector(shape(sequence_layer)[1])(layer_to_repeat)
n_atts = 3 # time series of 3 measurements
n_units = [120, 80, 50] # encoder - 1st layer: 120, 2nd layer: 80, 3rd layer: 50 (and symmetric for decoder)
n_layers = len(n_units)
init = GlorotUniform(seed=420)
reg = None
optimizer = Adam(learning_rate=0.0001)
activ = 'tanh'
loss_metric = 'mse'
inputs = Input(shape=(None, n_atts), name='input_layer')
# the encoder
encoded = LSTM(n_units[0], name='encoder_1', return_sequences=(n_layers != 1), kernel_initializer=init,
kernel_regularizer=reg, activation=activ)(inputs)
for i in range(1, n_layers):
if i != n_layers - 1:
encoded = LSTM(n_units[i], name='encoder_{}'.format(i + 1), return_sequences=(n_layers != 1),
kernel_initializer=init, kernel_regularizer=reg, activation=activ)(encoded)
else:
encoded = LSTM(n_units[i], name='encoder_{}'.format(i + 1), return_sequences=False,
kernel_initializer=init, kernel_regularizer=reg, activation=activ)(encoded)
# repeat the vector (plug the encoder to the decoder)
repeated = Lambda(repeat_vector, output_shape=(None, n_units[-1]), name='repeat')([encoded, inputs])
# the decoder
decoded = LSTM(n_units[n_layers - 1], return_sequences=True, name='decoder_1',
kernel_initializer=init, kernel_regularizer=reg, activation=activ)(repeated) # first layer
for i in range(1, n_layers):
decoded = LSTM(n_units[n_layers - 1 - i], return_sequences=True, name='decoder_{}'.format(i + 1),
kernel_initializer=init, kernel_regularizer=reg, activation=activ)(decoded)
# last layer
tdist = TimeDistributed(Dense(n_atts))(decoded)
# compile the model
model = Model(inputs, tdist, name='lstm-ae')
model.compile(optimizer=optimizer, loss=loss_metric)
For information, I use tensorflow 2.5.
Because the number of units is read from a config file, I wrote the code this way to add the layers programmatically.

How to use SHAP with a linear SVC model from sklearn using Pipeline?

I am doing text classification using a linear SVC model from sklearn. Now I want to visualize which words/tokens have the highest impact on the classification decision by using SHAP (https://github.com/slundberg/shap).
Right now this does not work because I am getting an error that seems to originate from the vectorizer step in the pipeline I have defined - whats wrong here?
Is my general approach on how to use SHAP in this case correct?
x_Train, x_Test, y_Train, y_Test = train_test_split(df_all['PDFText'], df_all['class'], test_size = 0.2, random_state = 1234)
pipeline = Pipeline([
(
'tfidv',
TfidfVectorizer(
ngram_range=(1,3),
analyzer='word',
strip_accents = ascii,
use_idf = True,
sublinear_tf=True,
max_features=6000,
min_df=2,
max_df=1.0
)
),
(
'lin_svc',
svm.SVC(
C=1.0,
probability=True,
kernel='linear'
)
)
])
pipeline.fit(x_Train, y_Train)
shap.initjs()
explainer = shap.KernelExplainer(pipeline.predict_proba, x_Train)
shap_values = explainer.shap_values(x_Test, nsamples=100)
shap.force_plot(explainer.expected_value[0], shap_values[0][0,:], x_Test.iloc[0,:])
This is the error message I get:
Provided model function fails when applied to the provided data set.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-81-4bca63616b3b> in <module>
3
4 # use Kernel SHAP to explain test set predictions
----> 5 explainer = shap.KernelExplainer(pipeline.predict_proba, x_Train)
6 shap_values = explainer.shap_values(x_Test, nsamples=100)
7
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\shap\explainers\kernel.py in __init__(self, model, data, link, **kwargs)
95 self.keep_index_ordered = kwargs.get("keep_index_ordered", False)
96 self.data = convert_to_data(data, keep_index=self.keep_index)
---> 97 model_null = match_model_to_data(self.model, self.data)
98
99 # enforce our current input type limitations
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\shap\common.py in match_model_to_data(model, data)
80 out_val = model.f(data.convert_to_df())
81 else:
---> 82 out_val = model.f(data.data)
83 except:
84 print("Provided model function fails when applied to the provided data set.")
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\utils\metaestimators.py in <lambda>(*args, **kwargs)
116
117 # lambda, but not partial, allows help() to work with update_wrapper
--> 118 out = lambda *args, **kwargs: self.fn(obj, *args, **kwargs)
119 # update the docstring of the returned function
120 update_wrapper(out, self.fn)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\pipeline.py in predict_proba(self, X)
379 for name, transform in self.steps[:-1]:
380 if transform is not None:
--> 381 Xt = transform.transform(Xt)
382 return self.steps[-1][-1].predict_proba(Xt)
383
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in transform(self, raw_documents, copy)
1631 check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
1632
-> 1633 X = super(TfidfVectorizer, self).transform(raw_documents)
1634 return self._tfidf.transform(X, copy=False)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in transform(self, raw_documents)
1084
1085 # use the same matrix-building strategy as fit_transform
-> 1086 _, X = self._count_vocab(raw_documents, fixed_vocab=True)
1087 if self.binary:
1088 X.data.fill(1)
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in _count_vocab(self, raw_documents, fixed_vocab)
940 for doc in raw_documents:
941 feature_counter = {}
--> 942 for feature in analyze(doc):
943 try:
944 feature_idx = vocabulary[feature]
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in <lambda>(doc)
326 tokenize)
327 return lambda doc: self._word_ngrams(
--> 328 tokenize(preprocess(self.decode(doc))), stop_words)
329
330 else:
c:\users\s.p\appdata\local\programs\python\python37\lib\site-packages\sklearn\feature_extraction\text.py in <lambda>(x)
254
255 if self.lowercase:
--> 256 return lambda x: strip_accents(x.lower())
257 else:
258 return strip_accents
AttributeError: 'numpy.ndarray' object has no attribute 'lower'
KernelExplainer expects to receive a classification model as the first argument. Please check the use of Pipeline with Shap following the link.
In your case, you can use the Pipeline as follows:
x_Train = pipeline.named_steps['tfidv'].fit_transform(x_Train)
explainer = shap.KernelExplainer(pipeline.named_steps['lin_svc'].predict_proba, x_Train)

Tensorflow seq2seq Decoder problems?

I try to write a seq2seq decoder with the tensorflow tf.contrib.seq2seq package.
I am wondering if my code is correct and if there is better way to rewrite it. The documentation is not easy to read.
Or my question can be: how can I easily debug this kind of code? How can I inspect some intermediate results in tensorflow?
class Decoder:
def __init__(self, embedding, hidden_size, num_layers=1, max_length=15):
self.embedding = embedding
self.hidden_size = hidden_size
self.num_layers = num_layers
self.cell = tf.nn.rnn_cell.GRUCell(hidden_size)
self.linear = tf.Variable(tf.random_normal(shape=(self.hidden_size, cn_total_words))*0.1)
def __call__(self, inputs, state, encoder_outputs, encoder_state, decoder_length, mode="train"):
with tf.variable_scope("decoder") as scope:
inputs = tf.nn.embedding_lookup(self.embedding, inputs)
encoder_state = tf.tile(tf.expand_dims(encoder_state, 1), (1, tf.shape(inputs)[1], 1))
attention_mechanism = tf.contrib.seq2seq.LuongAttention(self.hidden_size, encoder_outputs)
attn_cell = tf.contrib.seq2seq.AttentionWrapper(self.cell, attention_mechanism, self.hidden_size)
if mode == "train":
helper = tf.contrib.seq2seq.TrainingHelper(inputs=inputs, sequence_length=decoder_length)
elif mode == "infer":
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=self.embedding,
start_tokens=tf.tile([en_dict["BOS"]], [tf.shape(inputs)[0]]), end_token=en_dict["EOS"])
decoder = tf.contrib.seq2seq.BasicDecoder(cell=attn_cell, helper=helper,
initial_state=attn_cell.zero_state(tf.shape(inputs)[0], tf.float32))
outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=decoder)
outputs = tf.concat([tf.expand_dims(out, 1) for out in outputs], 1)
outputs = tf.tensordot(outputs, self.linear, axes=[[2], [0]])
return outputs, state
I got the following error when running the code
--------------------------------------------------------------------------- ValueError Traceback (most recent call
last)
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py
in apply_op(self, op_type_name, name, **keywords)
434 preferred_dtype=default_dtype,
--> 435 as_ref=input_arg.is_ref)
436 if input_arg.number_attr and len(
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py
in internal_convert_n_to_tensor(values, dtype, name, as_ref,
preferred_dtype)
736 as_ref=as_ref,
--> 737 preferred_dtype=preferred_dtype))
738 return ret
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py
in internal_convert_to_tensor(value, dtype, name, as_ref,
preferred_dtype)
675 if ret is None:
--> 676 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
677
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/ops.py
in _TensorTensorConversionFunction(t, dtype, name, as_ref)
548 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
--> 549 % (dtype.name, t.dtype.name, str(t)))
550 return t
ValueError: Tensor conversion requested dtype float32 for Tensor with
dtype int32: 'Tensor("seq2seq-train/decoder/ExpandDims_2:0", shape=(?,
1, ?), dtype=int32)'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call
last) in ()
4 emb_en = np.random.uniform(low=-0.1, high=0.1, size=(en_total_words, hidden_size))
5 emb_cn = np.random.uniform(low=-0.1, high=0.1, size=(cn_total_words, hidden_size))
----> 6 model = Seq2Seq(hidden_size, num_layers, emb_en, emb_cn)
7 sess = tf.Session()
8 init = tf.global_variables_initializer()
in init(self, hidden_size,
num_layers, embed_words_en, embed_words_cn)
81 encoder_outputs, encoder_state = self.encoder(self.encoder_inputs, self.encoder_length)
82 decoder_length = tf.cast(tf.reduce_sum(self.decoder_mask, 1), tf.int32)
---> 83 decoder_outputs, decoder_state = self.decoder(self.decoder_inputs, encoder_state, encoder_outputs,
encoder_state, decoder_length)
84
85 # decoder_outputs.append(decoder_out)
in call(self, inputs, state,
encoder_outputs, encoder_state, decoder_length, mode)
50
51 outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder=decoder)
---> 52 outputs = tf.concat([tf.expand_dims(out, 1) for out in outputs], 1)
53
54 outputs = tf.tensordot(outputs, self.linear, axes=[[2], [0]])
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py
in concat(values, axis, name) 1064 return
gen_array_ops._concat_v2(values=values, 1065
axis=axis,
-> 1066 name=name) 1067 1068
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py
in _concat_v2(values, axis, name)
491 """
492 result = _op_def_lib.apply_op("ConcatV2", values=values, axis=axis,
--> 493 name=name)
494 return result
495
~/anaconda3/envs/py36/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py
in apply_op(self, op_type_name, name, **keywords)
461 (prefix, dtype.name))
462 else:
--> 463 raise TypeError("%s that don't all match." % prefix)
464 else:
465 raise TypeError("%s that are invalid." % prefix)
TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have
types [float32, int32] that don't all match.

Resources