I get a bug when running the below inference code. In the function recognize(), it takes 0.4s to finish prediction. It takes another 3s to return the result preds_str to the caller function. I found that if I set gpu_id=0 in file config, it returns instantly. How can I fix this bug? Thanks in advance.
def recognize(imgs, model, demo_loader):
t = time()
model.eval()
with torch.no_grad():
for image_tensors, image_path_list in demo_loader:
batch_size = image_tensors.size(0)
image = image_tensors.to(config.device)
# For max length prediction
length_for_pred = torch.IntTensor([config.batch_max_length] * batch_size).to(config.device)
text_for_pred = torch.LongTensor(batch_size, config.batch_max_length + 1).fill_(0).to(config.device)
preds = model(image, text_for_pred, is_train=False)
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
print('time elapsed before return:'time()-t) #0.4s
return preds_str
def main():
model = Model()
self.model.cuda(config.device)
model = torch.nn.DataParallel(model, device_ids=[config.device], output_device=[config.device]).to(config.device)
model.load_state_dict(torch.load(config.saved_model, map_location=config.device))
AlignCollate_demo = AlignCollate(imgH=config.imgH, imgW=config.imgW, keep_ratio_with_pad=config.PAD)
imgs_dataset = ImageDataset(imgs)
demo_loader = torch.utils.data.DataLoader(imgs_dataset, batch_size=config.batch_size,shuffle=False,num_workers=int(config.workers),collate_fn=AlignCollate_demo, pin_memory=True)
start_time = time()
# imgs = [img1, img2, ....]
preds_str = recognize(imgs, model, demo_loader)
print('time elapsed after return', time()-start_time) #3.4s
Config file:
class ConfigWordRecognizer:
gpu_id = 1 #troublesome line here
device = torch.device('cuda:{}'.format(gpu_id) if torch.cuda.is_available() else 'cpu')
imgH = 32
imgW = 100
batch_size = 80
workers = 8
batch_max_length = 25
I found the solution from this post.
I set CUDA_VISIBLE_DEVICES=1, gpu_id=0. Then, I remove
model = torch.nn.DataParallel(model, device_ids=[config.device], output_device=[config.device]).to(config.device)
and change
model.load_state_dict(torch.load(config.saved_model, map_location=config.device))
to
model.load_state_dict(self.copyStateDict(torch.load(self.config.saved_model, map_location=self.config.device)))
Copy stateDict function:
def copyStateDict(self, state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
The model works well on gpu1. But I still don't understand why if I set 'gpu_id=0', it works well on gpu0 without copyStateDict
Related
I'm trying to implement an Pong game with DQN model by torch. However I got two problems during the execution. Firstly, I found that the game never get done. Secondly, I found the loss function does not have any change in the trainning. This is my code below:
I defined a CNN network with the input of the size (batch=32, channels=4, height=84, weight=84). By this step there's nothing wrong happened:
class CNN(nn.Module):
def __init__(self, s_channels, a_space):
super(CNN, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=1)
self.conv1 = nn.Conv2d(s_channels,out_channels=32,kernel_size=8,stride=4)
self.conv2 = nn.Conv2d(32,64,4,2)
self.conv3 = nn.Conv2d(64,64,3,1)
self.fc1 = nn.Linear(64*4*4,1024)
self.fc2 = nn.Linear(1024,512)
self.fc3 = nn.Linear(512,a_space)
def forward(self,input):
output = self.pool(F.relu(self.conv1(input)))
output = self.pool(F.relu(self.conv2(output)))
output = self.pool(F.relu(self.conv3(output)))
output = output.view(-1,64*4*4)
output = F.relu(self.fc1(output))
output = F.relu(self.fc2(output))
output = F.relu(self.fc3(output))
return output
For the agent class, I defined a back propagation function to replay the weight in CNN and the data pre-processing function:
# Agent
class Agent():
def __init__(self, s_space, a_space) -> None:
# define parameters
self.epsilon = 1.0
self.min_epsilon = 0.01
self.dr = 0.995
self.lr = 0.001
self.gamma = 0.9
# define models
self.evl_net = CNN(s_space, a_space)
self.tgt_net = CNN(s_space, a_space)
self.cert = nn.SmoothL1Loss()
self.optimal = th.optim.Adam(self.evl_net.parameters(),lr=self.lr)
# define memory store
self.memory = deque(maxlen=2000)
# self.img_stack = deque(maxlen=4)
# pre-processing frame images: transform the imaages into tensors
# def bsl_image_pre_process(self,env):
# env = aw.AtariWrapper(env,noop_max=30,frame_skip=4,screen_size=84,terminal_on_life_loss=True,clip_reward = True)
# return env
def gym_image_pre_process(self,env):
#Atari preprocessing
env = gym.wrappers.AtariPreprocessing(env, noop_max=30, frame_skip=4, screen_size=84, terminal_on_life_loss=False, grayscale_obs=True, grayscale_newaxis=False, scale_obs=False)
#create frame stack
env = gym.wrappers.FrameStack(env, 4)
channels = env.observation_space.shape[0]
return env,channels
# env = aw.AtariWrapper(env,noop_max=30,frame_skip=4,screen_size=84,terminal_on_life_loss=True,clip_reward = True)
# return env
def data_pre_process(self,batch_size):
s_v = []
a_v = []
next_s_v = []
r_v = []
dones = []
materials = random.sample(self.memory,batch_size)
for t in materials:
s_v.append(t[0])
a_v.append(t[1])
next_s_v.append(t[2])
r_v.append(t[3])
dones.append(t[4])
# print(th.FloatTensor(r_v))
# print(th.FloatTensor(r_v).size())
# print(s_v)
s_v = th.Tensor(s_v) # size: [32,3,210,160]
a_v = th.LongTensor(a_v).unsqueeze(1) # size: [32,1]
next_s_v = th.Tensor(next_s_v) # size: [32,3,210,160]
r_v = th.FloatTensor(r_v) # size: [32]
return s_v, a_v, next_s_v, r_v, dones
# remember the transformed images
def record(self,tpl):
self.memory.append(tpl)
# select actions according to the states (input images with 4 channels)
def select(self,state,a_space):
actions = self.evl_net(state).data.tolist()
if(random.random() <= self.epsilon):
action = random.randint(0,a_space-1)
else:
action = actions.index(max(actions))
return action
# DQN trainning progression
def train(self,state,batch_size):
s_v,a_v,next_s_v,r_v,dones = self.data_pre_process(batch_size)
self.tgt_net.load_state_dict(self.evl_net.state_dict())
evl_Q_value = self.evl_net(s_v).gather(0,a_v) # size: [32,6].gather() -> [32,1]
tgt = self.tgt_net(next_s_v).max(1)[0].detach() # size [32,1]
tgt_Q_value = (r_v + self.gamma * tgt)
for index in range(len(dones)):
if(dones[index]==True):
tgt[index][0] = -1
# print(tgt_Q_value)
tgt_Q_value = tgt_Q_value.reshape(batch_size,1) # size: [32, 1] cannot be back propagated
# print(tgt_Q_value)
self.optimal.zero_grad()
loss = self.cert(evl_Q_value, tgt_Q_value)
print(loss)
loss.backward()
for pr in self.evl_net.parameters():
pr.grad.data.clamp_(-1, 1)
self.optimal.step()
if(self.epsilon > self.min_epsilon):
self.epsilon *= self.dr
At the training stage, I found the first question. the condition of done in each episode is always false. With gym.wrappers I've pre-processed the image tensor into 48484 and the environment with only one life. But it still appears:
# main test
_display = Display(visible=0, size=(900,1400))
_display.start()
# set episode step and batch_size
episodes = 5000
batch_size = 32
env = gym.make("PongNoFrameskip-v4")
env = gym.wrappers.AtariPreprocessing(env, noop_max=30, frame_skip=4, screen_size=84, terminal_on_life_loss=False, grayscale_obs=True, grayscale_newaxis=False, scale_obs=False)
# create frame stack for the input image data (size: (4,84,84))
env = gym.wrappers.FrameStack(env, 4)
channels = env.observation_space.shape[0]
a_space = env.action_space.n
agent = Agent(channels, a_space)
# env.render()
# testing:
for e in range(episodes):
# step 1: reset the agent at the beginning
s = np.array(env.reset())
for run in range(100):
score = 0
# display.clear_output(wait=True)
# display.display(Image.fromarray(env.render(mode='rgb_array')))
# env.render("rgb_array")
img = plt.imshow(env.render('rgb_array'))
# step 2: create state space tensor
# step 3: iterate actions
a = agent.select(th.Tensor(s).unsqueeze(0),a_space)
next_s, reward, done, _ = env.step(a)
if(done==True):
next_s = None
next_s = np.array(next_s) # done is never true. Why?
# step 4: record the data into buffer
dataset = (s,a,next_s,reward,done)
agent.record(dataset)
# step 5: update state steps
s = next_s
score += reward
if(done==True or run == 99):
print("episodes:",e,"score:",score,"epsilon: {:.2}".format(agent.epsilon))
break
# step 6: training and update CNN
if(len(agent.memory) > batch_size):
agent.train(channels,batch_size)
As I tried to find this problem, I detected that the loss value never even roughly decreases(at most fluctuate around 1.2). I rechecked the input and output tensor but found nothing else. I hope to get some help for how to fix these two problems. Many thanks!
I am creating my first multivariate multistep encoder-decoder LSTM to forecast revenues.
As you can see, the values move towards a value and then stop at that value. The aim is to create a forecast for a longer period, but there is no deviation at all from this standard value after the first week.
What is wrong and what can I do? To me it doesn't look like it is working at all.
code:
class ModelTrainer:
def __init__(self, prediction_length=30, offset=1):
self.prediction_length = prediction_length
self.offset = offset
self._setup_values()
self.use_scaling = True
self.__prepare_data()
def _setup_values(self):
# Model configuration
self.additional_metrics = ['accuracy']
self.embedding_output_dims = 15
self.max_sequence_length = 300
self.num_distinct_words = 5000
self.verbosity_mode = 1
# DATA
self.WINDOW_LENGTH = 70 # ! SHOULD BE ADJUSTED TO THE AMOUNT OF FORECASTING DAYS
self.SAMPLING_RATE = 1
self.BATCH_SIZE = 128
# MODEL
self.DROPOUT = 0.3
self.NODES_PER_LAYER = 256
self.NUMBER_OF_LAYERS = 3
# TRAINING
self.LEARNING_RATE = 0.001
self.OPTIMIZER = Adam(learning_rate=self.LEARNING_RATE)
self.VALIDATION_SPLIT = 0.20
self.NUMBER_OF_EPOCHS = 10
self.TEST_SIZE = 0.1
self.RANDOM_STATE = 123
self.LOSS_FUNCTION = MeanSquaredError()
def __import_data(self):
self.series = DataOrganizer().df
def __prepare_data(self):
self.__import_data()
self.scaler = preprocessing.MinMaxScaler()
data_scaled = self.scaler.fit_transform(self.series)
self.features, self.target = self._create_feature_target_values_window(
data_scaled)
def _create_feature_target_values_window(self, data):
self.number_of_output_columns = 4
feature_data = data
target_data = data[:, :self.number_of_output_columns]
features, target = list(), list()
in_start = 0
for _ in range(len(data)):
in_end = in_start + self.WINDOW_LENGTH
out_end = in_end + self.prediction_length
if out_end <= len(data):
features.append(feature_data[in_start:in_end, :])
target.append(
target_data[in_end:out_end, 0:self.number_of_output_columns])
in_start += 1
return np.array(features), np.array(target)
def __create_LSTM_model(self):
num_feature_columns = self.features.shape[2]
num_output_columns = self.target.shape[2]
model = Sequential()
model.add(LSTM(self.NODES_PER_LAYER, input_shape=(
self.WINDOW_LENGTH, num_feature_columns)))
model.add(Dropout(self.DROPOUT))
model.add(RepeatVector(self.prediction_length))
model.add(LSTM(self.NODES_PER_LAYER, return_sequences=True))
model.add(Dropout(self.DROPOUT))
model.add(TimeDistributed(Dense(self.NODES_PER_LAYER)))
model.add(Dropout(self.DROPOUT))
model.add(TimeDistributed(Dense(num_output_columns)))
model.summary()
return model
def train_model(self, callbacks=[]):
model = self.__create_LSTM_model()
model.compile(loss=self.LOSS_FUNCTION,
optimizer=self.OPTIMIZER,
metrics=['accuracy', MeanAbsoluteError()]
)
model.fit(
x=self.features,
y=self.target,
epochs=self.NUMBER_OF_EPOCHS,
validation_split=self.TEST_SIZE,
shuffle=False,
callbacks=callbacks
)
self.model = model
def create_forecast(self):
prediction = self.model.predict(self.features[-1:])
# prediction = self.model.predict(self.features[-30:-29]) # Show forecast from a month old
test_X = self.features.copy()
test_X = test_X[:self.prediction_length,
:1, self.number_of_output_columns:]
test_X = test_X.reshape(
self.prediction_length, self.series.shape[1] - self.number_of_output_columns)
prediction = prediction.reshape(self.prediction_length,
self.number_of_output_columns)
inv_yhat = np.concatenate((prediction, test_X), axis=1)
inv_yhat = self.scaler.inverse_transform(inv_yhat)
prediction_df = pd.DataFrame(
inv_yhat, columns=self.scaler.feature_names_in_)
first_date = self.series.last_valid_index() + timedelta(days=1)
last_date = first_date + timedelta(days=self.prediction_length-1)
days = pd.date_range(first_date, last_date, freq='D')
prediction_df.set_index(days, inplace=True)
prediction_df = prediction_df[self.series.columns[0:4]]
Actual
Forecast:
(I know the x-axis description is incorrect. Don't worry about it)
I have two networks. The output of the first network is the input to the other. In order to calculate the loss for the second network, I use vanilla policy gradient. I want to backpropagate this loss into the first network. After checking if the gradeints has changed, I see that they are all none.
I first load the first network (a pre-trained autoencoer in my network this way):
def load_checkpoint(filepath, model):
checkpoint = torch.load(filepath)
model.load_state_dict(checkpoint['state_dict'])
for parameter in model.parameters():
parameter.requires_grad = True
model.train()
return model
Then I define the optimizers for both networks this way:
class MultipleOptimizer(object):
def __init__(self, *op):
self.optimizers = op
def zero_grad(self):
for op in self.optimizers:
op.zero_grad()
def step(self):
for op in self.optimizers:
op.step()
opt = MultipleOptimizer(SGD(model.parameters(), lr=1, momentum=0.9), Adam(logits_net.parameters(), lr=lr))
the reward function is:
#Reward function
def reward(x, act):
#print('action', act)
#print('x type', type(x))
km = KMeans(act, n_init=20, n_jobs=4)
y_pred = km.fit_predict(x.detach().cpu().numpy())# seems we can only get a centre from batch
#print('k-means output type', type(y_pred))
sil_score = sil(x.detach().cpu().numpy(), y_pred)
#print('sil score', sil_score)
return sil_score
The architecture of the second neural net and an alternative to avoid (logits=logits.mean(0)):
def mlp(sizes, activation=nn.Tanh, output_activation=nn.Identity):
# Build a feedforward neural network. outputs are the logits
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
class mlp2(torch.nn.Module):
def __init__(self):
super(mlp2, self).__init__()
self.linear1 = nn.Linear(10,100)
self.relu1 = nn.ReLU(inplace=True)
self.linear2 = torch.nn.Linear(100,100)
self.linear3 = torch.nn.Linear(100,20)
self.linear4 = torch.nn.Linear(2000,100)
self.ident = nn.Identity()
def forward(self, x):
a = self.linear1(x)
a = self.relu1(a)
a = self.linear2(a)
a = self.relu1(a)
a = self.linear3(a)
a = torch.flatten(a)
a = self.linear4(a)
a = self.relu1(a)
a = self.linear3(a)
out = self.ident(a)
return out
Loss is calculated as in the following order:
def get_policy(obs):
logits = logits_net(obs)
return Categorical(logits=logits.mean(0))
def get_action(obs):
return get_policy(obs).sample().item()
def Logp(obs, act):
logp = get_policy(obs).log_prob(act.cuda())
return logp
def compute_loss(logp, weights):
return -(logp * weights).mean()
def train_one_epoch():
# make some empty lists for logging.
batch_obs = [] # for observations
batch_acts = [] # for actions
batch_weights = [] # for R(tau) weighting in policy gradient
batch_logp = []
# reset episode-specific variables
j = 1 # signal from environment that episode is over
ep_rews = [] # list for rewards accrued throughout ep
for i, data in enumerate(train_loader):
#Create the mean image out of those 100 images
x, label = data
x = model(x.cuda())#torch.Size([100, 10])
obs = x.data.cpu().numpy()#[100, 10] - a trajectory with only one state
# Save obs
batch_obs.append(obs.copy())
#act in the environment
#act = get_action(torch.as_tensor(obs, dtype=torch.float32))
act = get_action(x)
print('action type', type(act))
#log probability
#logp = Logp(torch.as_tensor(obs, dtype=torch.float32),act = torch.as_tensor(act, dtype=torch.int32))
logp = Logp(x, act = torch.as_tensor(act, dtype=torch.int32))
#rew = reward(obs, act+2)
rew = reward(x, act+2)
# save action, reward
batch_acts.append(act)
batch_weights.append(rew)#episode rewards
batch_logp.append(logp)
opt.zero_grad()
batch_logp = torch.stack(batch_logp, dim=0)
batch_loss = compute_loss(logp = torch.as_tensor(batch_logp, dtype=torch.float32),
weights = torch.as_tensor(batch_weights, dtype=torch.float32))
batch_loss.backward() #does it return anything? gradients? print them!
opt.step()
for name, param in logits_net.named_parameters():
print(name, param.grad)
I applied some changes with the assumption that maybe recreating some of the tensors maybe the issue:
I have the output of the first network, obs, converted like obs = x.data.cpu().numpy() this and then sent to get_action function: act = get_action(torch.as_tensor(obs, dtype=torch.float32)). I changes this to act = get_action(x) so, x is sent directly to this function. Also, change arguments of logp to logp = Logp(x, act = torch.as_tensor(act, dtype=torch.int32)).
After these changes, I still get the none value for the gradient. Is there anyway possible to backpropagate the gradient when loss is calculated this way? any changes that I can apply?
any help is appreciated.
I have a code that train an object coordinate for object detection.
I used CNN network and the output layer is regression layer (called bound_box_output) that return (x0,y0, height, width) for an object in an image. After this layer I try to save the image directly before loss step.
i = 0
image_decoded = tf.image.decode_jpeg(tf.read_file('3.jpg'), channels=3)
cropped = tf.image.crop_to_bounding_box(image = image_decoded,
offset_height = tf.cast(bound_box_output[i,0], tf.int32),
offset_width = tf.cast(bound_box_output[i,1], tf.int32),
target_height = tf.cast(bound_box_output[i,2], tf.int32),
target_width = tf.cast(bound_box_output[i,3], tf.int32))
enc = tf.image.encode_jpeg(cropped)
fname = tf.constant('4.jpeg')
fwrite = tf.write_file(fname, enc)
and in tf.train.SessionRunHook I run it
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(loss)
def after_run(self, run_context, run_values):
if self._step % LOG_FREQUENCY == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = LOG_FREQUENCY * BATCH_SIZE / duration
sec_per_batch = float(duration / LOG_FREQUENCY)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
if self._step == MAX_STEPS-1:
loss_value = run_values.results
print("The final value of loss is:: ")
print(loss_value)
print(fwrite)
tf.train.SessionRunArgs(fwrite)
the problem is that it does not save the '4.jpeg' image in a specific folder
Note: I use tensorflow 1.1.3 and python3.5
TLDR; Substitute tf.train.SessionRunArgs(fwrite) with run_context.session.run(fwrite).
SessionRunArgs doesn't actually run the supplied operation. SessionRunArgs are returned from the before_run() call. Their role is to add arguments to the next session.run() call.
if self._step == MAX_STEPS-1:
loss_value = run_values.results
print("The final value of loss is:: ")
print(loss_value)
print(fwrite)
tf.train.SessionRunArgs(fwrite) # problematic line
You are attempting to run the fwrite operation in the end of after_run(). However, it merely instantiates the SessionRunArgs object.
One option to achieve the desired behavior is to take advantage of the run_context argument supplied to after_run(). run_context is of type SessionRunContext, the type that contains a session reference.
run_context.session.run(fwrite) should do the trick for you.
I am trying to solve a ANN model using Tensorflow. At the moment, I am able to run the program as a long string of text. Now however, I would like to convert my code to something that is easier to use. So I converted my code to a class. Here is what I did. (basically copied the entire set of code to a class.
import os
import tensorflow as tf
class NNmodel:
def __init__(self,
layers, inpShape, outShape,
features,
learning_rate=0.1, nSteps = 100,
saveFolder='models'):
self.layers = layers
self.features = features
self.learning_rate = learning_rate
self.saveFolder = saveFolder
self.nSteps = 100
self.d = tf.placeholder(shape = inpShape, dtype = tf.float32, name='d') # input layer
self.dOut = tf.placeholder(shape = outShape, dtype = tf.float32, name='dOut') # output layer
self.weights = []
self.biases = []
self.compute = [self.d]
layerSizes = [self.features] + [l['size'] for l in self.layers]
for i, (v1, v2) in enumerate(zip(layerSizes, layerSizes[1:])):
self.weights.append(
tf.Variable(np.random.randn(v1, v2)*0.1, dtype = tf.float32, name='W{}'.format(i)))
self.biases.append(
tf.Variable(np.zeros((1,1)), dtype = tf.float32, name='b{}'.format(i)) )
self.compute.append( tf.matmul(
self.compute[-1], self.weights[i]) + self.biases[i] )
if self.layers[i]['activation'] == 'tanh':
self.compute.append( tf.tanh( self.compute[-1] ) )
if self.layers[i]['activation'] == 'relu':
self.compute.append( tf.nn.relu( self.compute[-1] ) )
if self.layers[i]['activation'] == 'sigmoid':
self.compute.append( tf.sigmoid ( self.compute[-1] ) )
self.result = self.compute[-1]
self.delta = self.dOut - self.result
self.cost = tf.reduce_mean(self.delta**2)
self.optimizer = tf.train.AdamOptimizer(
learning_rate = self.learning_rate).minimize(self.cost)
return
def findVal(self, func, inpDict, restorePt=None):
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
if restorePt is not None:
try:
saver.restore(sess, tf.train.latest_checkpoint(restorePt) )
print('Session restored')
except Exception as e:
print('Unable to restore the session ...')
return None
else:
print('Warning, no restore point selected ...')
result = sess.run(func, feed_dict = inpDict)
sess.close()
return result
def optTF(self, inpDict, printSteps=50, modelFile=None):
cost = []
saver = tf.train.Saver()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
print('x'*100)
for i in range(self.nSteps):
# First run the optimizer ...
sess.run(self.optimizer, feed_dict = inpDict)
# Save all the data you want to save
c = sess.run( self.cost, feed_dict = inpDict)
cost.append(c)
if (i%printSteps) == 0:
print('{:5d}'.format(i))
result = self.run(self.result, feed_dict = inpDict)
if modelFile is not None:
path = saver.save(sess, os.path.join(
self.saveFolder, modelFile))
print('Model saved in: {}'.format(path))
else:
print('Warning! model not saved')
sess.close()
return cost, result
When I use this model, I see that there seems to be a problem:
N = 500
features = 2
nSteps = 1000
X = [ (np.random.random(N))*np.random.randint(1000, 2000) for i in range(features)]
X = np.array([np.random.random(N), np.random.random(N)])
data = [X.T, X[0].reshape(-1, 1)]
layers = [
{'name':'6', 'size': 10, 'activation':'tanh'},
{'name':'7', 'size': 1, 'activation':'linear'},
]
m1 = NNmodel(layers, inpShape=np.shape(data[0]), outShape = np.shape(data[1]),
features=features,
learning_rate=0.1, nSteps = 100,
saveFolder='models1')
d = tf.placeholder(shape = np.shape(data[0]), dtype = tf.float32, name='d_4')
dOut = tf.placeholder(shape = np.shape(data[1]), dtype = tf.float32, name='dOut')
m1.findVal(m1.result, {d: data[0], dOut:data[1]})
Now it appears that there is a mismatch between the placeholders that I am using d and dOut that I provide form outside, and the ones that are already present within the model self.d and self.dOut. How do I solve this problem?
Why not to just use the placeholders declared within the model?
m1.findVal(m1.result, {m1.d: data[0], m1.dOut:data[1]})