Related
import torch
from models.Unets import Unet_dict, NestedUNet, U_Net, R2U_Net, AttU_Net, R2AttU_Net
from copy import deepcopy
model_Inputs = [U_Net, R2U_Net, AttU_Net, R2AttU_Net, NestedUNet]
def model_unet(model_input, in_channel=3, out_channel=1):
model_test = model_input(in_channel, out_channel)
return model_test
input_ = torch.randn(32, 3, 128, 128).cuda()
model = model_unet(model_Inputs[3], 3, 156).cuda()
model_ = deepcopy(model)
model_.eval()
with torch.no_grad():
print(torch.sum(torch.abs(model_(input_) - model(input_))))
output: tensor(25810780., device='cuda:0')
Why is there such a difference between the model and model_ for the same input data?
I want to test/valid for the model, what should I do?
Im tring to use list as a value in pandas.DataFrame
but Im getting Exception when trying to use use the adapt function in on the Normalization layer with the NumPy array
this is the error:
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list).
and this is the code:
import pandas as pd
import numpy as np
# Make NumPy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
import tensorflow as tf
from tensorflow.keras import layers
data = [[45.975, 45.81, 45.715, 45.52, 45.62, 45.65, 4],
[55.67, 55.975, 55.97, 56.27, 56.23, 56.275, 5],
[86.87, 86.925, 86.85, 85.78, 86.165, 86.165, 3],
[64.3, 64.27, 64.285, 64.29, 64.325, 64.245, 6],
[35.655, 35.735, 35.66, 35.69, 35.665, 35.63, 5]
]
lables = [0, 1, 0, 1, 1]
def do():
d_1 = None
for l, d in zip(lables, data):
if d_1 is None:
d_1 = pd.DataFrame({'lable': l, 'close_price': [d]})
else:
d_1 = d_1.append({'lable': l, 'close_price': d}, ignore_index=True)
dataset = d_1.copy()
print(dataset.isna().sum())
dataset = dataset.dropna()
print(dataset.keys())
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
print(train_dataset.describe().transpose())
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('lable')
test_labels = test_features.pop('lable')
print(train_dataset.describe().transpose()[['mean', 'std']])
normalizer = tf.keras.layers.Normalization(axis=-1)
ar = np.array(train_features)
normalizer.adapt(ar)
print(normalizer.mean.numpy())
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
diraction = np.array(train_features)
diraction_normalizer = layers.Normalization(input_shape=[1, ], axis=None)
diraction_normalizer.adapt(diraction)
diraction_model = tf.keras.Sequential([
diraction_normalizer,
layers.Dense(units=1)
])
print(diraction_model.summary())
print(diraction_model.predict(diraction[:10]))
diraction_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
print(train_features['close_price'])
history = diraction_model.fit(
train_features['close_price'],
train_labels,
epochs=100,
# Suppress logging.
verbose=0,
# Calculate validation results on 20% of the training data.
validation_split=0.2)
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
print(hist.tail())
test_results = {}
test_results['diraction_model'] = diraction_model.evaluate(
test_features,
test_labels, verbose=0)
x = tf.linspace(0.0, 250, 251)
y = diraction_model.predict(x)
print("end")
def main():
do()
if __name__ == "__main__":
main()
I think it is not the usual practice to shrink your features into one column.
Quick-fix is you may put the following line
train_features = np.array(train_features['close_price'].to_list())
before
normalizer = tf.keras.layers.Normalization(axis=-1)
to get rid of the error, but now because your train_features has changed from a DataFrame into a np.array, your subsequent code may suffer, so you need to take care of that too.
If I were you, however, I would have constructed the DataFrame this way
df = pd.DataFrame(data)
df['label'] = lables
Please consider.
I got an error when implementing Residual Network in Keras. Below is the code that gives me error (the error comes from the first line of the final step in the function definition):
Load packages:
import numpy as np
from keras import layers
from keras.layers import Input, Add, Concatenate, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from resnets_utils import *
from keras.initializers import glorot_uniform
import scipy.misc
from matplotlib.pyplot import imshow
%matplotlib inline
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
Define the function: (it's the first line of the "final step" that gives me the error)
def identity_block(X, f, filters, stage, block):
"""
Implementation of the identity block as defined in Figure 4
Arguments:
X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
f -- integer, specifying the shape of the middle CONV's window for the main path
filters -- python list of integers, defining the number of filters in the CONV layers of the main path
stage -- integer, used to name the layers, depending on their position in the network
block -- string/character, used to name the layers, depending on their position in the network
Returns:
X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
"""
# defining name basis
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# Save the input value. You'll need this later to add back to the main path.
X_shortcut = X
# First component of main path
X = Conv2D(filters = F1, kernel_size = (1, 1), strides = (1,1), padding = 'valid', name = conv_name_base + '2a', kernel_initializer = glorot_uniform(seed=0))(X)
X = BatchNormalization(axis = 3, name = bn_name_base + '2a')(X)
X = Activation('relu')(X)
# Second component of main path
X = Conv2D(filters=F2, kernel_size=(f,f),strides=(1,1),padding='same',name=conv_name_base+'2b',kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+'2b')(X)
X = Activation('relu')(X)
# Third component of main path
X = Conv2D(filters=F3,kernel_size=(1,1),strides=(1,1),padding='valid',name=conv_name_base+'2c',kernel_initializer=glorot_uniform(seed=0))(X)
print(f'before BatchNormalization: X={X}');X = BatchNormalization(axis=3,name=bn_name_base+'2c');print(f'after BatchNormalization: X={X}');
# Final step: Add shortcut value to main path, and pass it through a RELU activation
X = Add()([X_shortcut,X])
X = Activation('relu')(X)
### END CODE HERE ###
return X
Call/test the above function:
tf.reset_default_graph()
with tf.Session() as test:
np.random.seed(1)
A_prev = tf.placeholder("float", [3, 4, 4, 6])
X = np.random.randn(3, 4, 4, 6)
A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
test.run(tf.global_variables_initializer())
out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
print("out = " + str(out[0][1][1][0]))
Below is the print message and error message:
before BatchNormalization: X=Tensor("res1a_branch2c/BiasAdd:0", shape=(3, 4, 4, 6), dtype=float32)
after BatchNormalization: X= <keras.layers.normalization.BatchNormalization object at 0x7f169c6d9668>
ValueError: Unexpectedly found an instance of type `<class 'keras.layers.normalization.BatchNormalization'>`. Expected a symbolic tensor instance.
Below is the complete log (in case you need it)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/opt/conda/lib/python3.6/site-packages/keras/engine/topology.py in assert_input_compatibility(self, inputs)
424 try:
--> 425 K.is_keras_tensor(x)
426 except ValueError:
/opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in is_keras_tensor(x)
399 tf.SparseTensor)):
--> 400 raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. '
401 'Expected a symbolic tensor instance.')
ValueError: Unexpectedly found an instance of type `<class 'keras.layers.normalization.BatchNormalization'>`. Expected a symbolic tensor instance.
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-6-b3d1050f50dc> in <module>()
5 A_prev = tf.placeholder("float", [3, 4, 4, 6])
6 X = np.random.randn(3, 4, 4, 6)
----> 7 A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
8 test.run(tf.global_variables_initializer())
9 out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
<ipython-input-5-013941ce79d6> in identity_block(X, f, filters, stage, block)
43
44 # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
---> 45 X = Add()([X_shortcut,X])
46 X = Activation('relu')(X)
47
/opt/conda/lib/python3.6/site-packages/keras/engine/topology.py in __call__(self, inputs, **kwargs)
556 # Raise exceptions in case the input is not compatible
557 # with the input_spec specified in the layer constructor.
--> 558 self.assert_input_compatibility(inputs)
559
560 # Collect input shapes to build layer.
/opt/conda/lib/python3.6/site-packages/keras/engine/topology.py in assert_input_compatibility(self, inputs)
429 'Received type: ' +
430 str(type(x)) + '. Full input: ' +
--> 431 str(inputs) + '. All inputs to the layer '
432 'should be tensors.')
433
ValueError: Layer add_1 was called with an input that isn't a symbolic tensor. Received type: <class 'keras.layers.normalization.BatchNormalization'>. Full input: [<tf.Tensor 'Placeholder:0' shape=(3, 4, 4, 6) dtype=float32>, <keras.layers.normalization.BatchNormalization object at 0x7f169c6d9668>]. All inputs to the layer should be tensors.
I guess that I missed something in the final step of the function definition, but I have no idea why I got the error. Could any Keras expert here help me out?
Always remember to pass tensors into layers:
print(f'before BatchNormalization: X={X}');
#X = BatchNormalization(axis=3,name=bn_name_base+'2c') # <--- INCORRECT
X = BatchNormalization(axis=3,name=bn_name_base+'2c')(X) # <--- CORRECT
print(f'after BatchNormalization: X={X}');
The difference between 'CORRECT' and 'INCORRECT' is, latter yields a layer - whereas former evaluates that layer into a tensor when fed with X.
Furthermore, your identity_block() lacks a return, which will throw another error; add:
return X. Lastly, F1, F2, F3 are neither defined within the function nor passed as arguments - which you may not see as an error since they were defined outside the function - e.g. in your local namespace.
I have a multilabel dataset that I would like to use a wide-n-deep neural network to classify the samples.
This is a very small example just to test:
import numpy as np
import pandas as pd
import tensorflow as tf
tf.enable_eager_execution()
training_df: pd.DataFrame = pd.DataFrame(
data={
'feature1': np.random.rand(10),
'feature2': np.random.rand(10),
'feature3': np.random.rand(10),
'feature4': np.random.randint(0, 3, 10),
'feature5': np.random.randint(0, 3, 10),
'feature6': np.random.randint(0, 3, 10),
'target1': np.random.randint(0, 2, 10),
'target2': np.random.randint(0, 2, 10),
'target3': np.random.randint(0, 2, 10)
}
)
features = ['feature1', 'feature2', 'feature3','feature4', 'feature5', 'feature6']
targets = ['target1', 'target2', 'target3']
Categorical_Cols = ['feature4', 'feature5', 'feature6']
Numerical_Cols = ['feature1', 'feature2', 'feature3']
wide_columns = [tf.feature_column.categorical_column_with_vocabulary_list(key=x, vocabulary_list=[0, 1, -1])
for x in Categorical_Cols]
deep_columns = [tf.feature_column.numeric_column(x) for x in Numerical_Cols]
def wrap_dataset(df, features, labels):
dataset = (
tf.data.Dataset.from_tensor_slices(
(
tf.cast(df[features].values, tf.float32),
tf.cast(df[labels].values, tf.int32),
)
)
)
return(dataset)
input_fn_train = wrap_dataset(training_df, features, targets)
m = tf.contrib.estimator.DNNLinearCombinedEstimator(
head=tf.contrib.estimator.multi_label_head(n_classes=2),
# wide settings
linear_feature_columns=wide_columns,
# linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=deep_columns,
# dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
dnn_hidden_units=[10, 30, 10])
m.train(input_fn=input_fn_train)
In this example, we have 6 features including:
3 numerical features: feature1, feature2, and feature3
3 categorical features: feature4, feature5, and feature6
where each sample has three labels and each label has a binary value: 0 or 1.
The error is about the input function and I cannot figure out how to define the input function in a correct way.
Any help to correct the code is appreciated.
UPDATE: The error is:
TypeError: <TensorSliceDataset shapes: ((6,), (3,)), types: (tf.float32, tf.int32)> is not a callable object
Since it says it is not a callable object, you can simply add lambda and it should work
input_fn_train = lambda: wrap_dataset(training_df, features, targets)
Also I think you need to sort out how you pass your data to the Estimator. It might take dictionaries since you are using feature columns. Right now you are passing tensors and not dictionary of Tensors. Check out this useful post.
Finally, I figured out how to make the code working. I post it here to help people who would like to do multi-label classification using built-in function DNNLinearCombinedEstimator from tensorflow package, version 1.13.
import numpy as np
import pandas as pd
import tensorflow as tf
# from tensorflow import contrib
tf.enable_eager_execution()
training_df: pd.DataFrame = pd.DataFrame(
data={
'feature1': np.random.rand(10),
'feature2': np.random.rand(10),
'feature3': np.random.rand(10),
'feature4': np.random.randint(0, 3, 10),
'feature5': np.random.randint(0, 3, 10),
'feature6': np.random.randint(0, 3, 10),
'target1': np.random.randint(0, 2, 10),
'target2': np.random.randint(0, 2, 10),
'target3': np.random.randint(0, 2, 10)
}
)
features = ['feature1', 'feature2', 'feature3','feature4', 'feature5', 'feature6']
targets = ['target1', 'target2', 'target3']
Categorical_Cols = ['feature4', 'feature5', 'feature6']
Numerical_Cols = ['feature1', 'feature2', 'feature3']
wide_columns = [tf.feature_column.categorical_column_with_vocabulary_list(key=x, vocabulary_list=[0, 1, -1])
for x in Categorical_Cols]
deep_columns = [tf.feature_column.numeric_column(x) for x in Numerical_Cols]
def input_fn(df):
# Creates a dictionary mapping from each continuous feature column name (k) to
# the values of that column stored in a constant Tensor.
continuous_cols = {k: tf.constant(df[k].values)
for k in Numerical_Cols}
# Creates a dictionary mapping from each categorical feature column name (k)
# to the values of that column stored in a tf.SparseTensor.
categorical_cols = {k: tf.SparseTensor(
indices=[[i, 0] for i in range(df[k].size)],
values=df[k].values,
dense_shape=[df[k].size, 1])
for k in Categorical_Cols}
# Merges the two dictionaries into one.
feature_cols = continuous_cols.copy()
feature_cols.update(categorical_cols)
labels =tf.convert_to_tensor(training_df.as_matrix(training_df[targets].columns.tolist()), dtype=tf.int32)
return feature_cols, labels
def train_input_fn():
return input_fn(training_df)
def eval_input_fn():
return input_fn(training_df)
m = tf.contrib.learn.DNNLinearCombinedEstimator(
head=tf.contrib.learn.multi_label_head(n_classes=3),
# wide settings
linear_feature_columns=wide_columns,
# linear_optimizer=tf.train.FtrlOptimizer(...),
# deep settings
dnn_feature_columns=deep_columns,
# dnn_optimizer=tf.train.ProximalAdagradOptimizer(...),
dnn_hidden_units=[10, 10])
m.train(input_fn=train_input_fn, steps=20)
results = m.evaluate(input_fn=eval_input_fn, steps=1)
print("#########################################################")
for key in sorted(results):
print("%s: %s" % (key, results[key]))
I am trying to train a tensorflow based random forest regression on numerical and continuos data.
When I try to fit my estimator it begins with the message below:
INFO:tensorflow:Constructing forest with params =
INFO:tensorflow:{'num_trees': 10, 'max_nodes': 1000, 'bagging_fraction': 1.0, 'feature_bagging_fraction': 1.0, 'num_splits_to_consider': 10, 'max_fertile_nodes': 0, 'split_after_samples': 250, 'valid_leaf_threshold': 1, 'dominate_method': 'bootstrap', 'dominate_fraction': 0.99, 'model_name': 'all_dense', 'split_finish_name': 'basic', 'split_pruning_name': 'none', 'collate_examples': False, 'checkpoint_stats': False, 'use_running_stats_method': False, 'initialize_average_splits': False, 'inference_tree_paths': False, 'param_file': None, 'split_name': 'less_or_equal', 'early_finish_check_every_samples': 0, 'prune_every_samples': 0, 'feature_columns': [_NumericColumn(key='Average_Score', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='lat', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None), _NumericColumn(key='lng', shape=(1,), default_value=None, dtype=tf.float32, normalizer_fn=None)], 'num_classes': 1, 'num_features': 2, 'regression': True, 'bagged_num_features': 2, 'bagged_features': None, 'num_outputs': 1, 'num_output_columns': 2, 'base_random_seed': 0, 'leaf_model_type': 2, 'stats_model_type': 2, 'finish_type': 0, 'pruning_type': 0, 'split_type': 0}
Then the process breaks down and I get a value error below:
ValueError: Shape must be at least rank 2 but is rank 1 for 'concat' (op: 'ConcatV2') with input shapes: [?], [?], [?], [] and with computed input tensors: input[3] = <1>.
This is the code I am using:
import tensorflow as tf
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.ops import resources
import pandas as pd
from tensorflow.contrib.tensor_forest.client import random_forest
from tensorflow.python.estimator.inputs import numpy_io
import numpy as np
def getFeatures():
Average_Score = tf.feature_column.numeric_column('Average_Score')
lat = tf.feature_column.numeric_column('lat')
lng = tf.feature_column.numeric_column('lng')
return [Average_Score,lat ,lng]
# Import hotel data
Hotel_Reviews=pd.read_csv("./DataMining/Hotel_Reviews.csv")
Hotel_Reviews_Filtered=Hotel_Reviews[(Hotel_Reviews.lat.notnull() |
Hotel_Reviews.lng.notnull())]
Hotel_Reviews_Filtered_Target = Hotel_Reviews_Filtered[["Reviewer_Score"]]
Hotel_Reviews_Filtered_Features = Hotel_Reviews_Filtered[["Average_Score","lat","lng"]]
#Preprocess the data
x=Hotel_Reviews_Filtered_Features.to_dict('list')
for key in x:
x[key] = np.array(x[key])
y=Hotel_Reviews_Filtered_Target.values
#specify params
params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(
feature_colums= getFeatures(),
num_classes=1,
num_features=2,
regression=True,
num_trees=10,
max_nodes=1000)
#build the graph
graph_builder_class = tensor_forest.RandomForestGraphs
est=random_forest.TensorForestEstimator(
params, graph_builder_class=graph_builder_class)
#define input function
train_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=1000,
num_epochs=1,
shuffle=True)
est.fit(input_fn=train_input_fn, steps=500)
The variables x is a list of numpy array of shape (512470,):
{'Average_Score': array([ 7.7, 7.7, 7.7, ..., 8.1, 8.1, 8.1]),
'lat': array([ 52.3605759, 52.3605759, 52.3605759, ..., 48.2037451,
48.2037451, 48.2037451]),
'lng': array([ 4.9159683, 4.9159683, 4.9159683, ..., 16.3356767,
16.3356767, 16.3356767])}
The variable y is numpy array of shape (512470,1):
array([[ 2.9],
[ 7.5],
[ 7.1],
...,
[ 2.5],
[ 8.8],
[ 8.3]])
Force each array in x to be 2 dim using ndmin=2. Then the shapes should match and concat should be able to operate.