Related
I am trying to make segmentation model using Pytorch and implement custom IoULoss:
def IoULoss(inputs, targets, smooth=1e-6):
inputs = (inputs.view(inputs.size(0), -1) > 0.5)
targets = targets.view(targets.size(0), -1)
intersection = (inputs & targets).float().sum(1)
union = (inputs | targets).float().sum(1)
IoU = (intersection + smooth) / (union + smooth)
return 1 - IoU.mean()
But when I train model, I am getting error:
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
Is there any good way to cast my predictions to labels?
Full error traceback:
RuntimeError Traceback (most recent call last)
<ipython-input-53-3bfc1b43c8ba> in <module>()
----> 1 my_train(model, 30, torch.optim.Adam(model.parameters(), lr=0.01), IoULoss, train_loader)
2 frames
<ipython-input-41-ebe9c66b1806> in my_train(clf, epochs, optimizer, criterion, train_data, test_data)
22 epoch_loss += loss.item()
23
---> 24 loss.backward()
25 optimizer.step()
26
/usr/local/lib/python3.7/dist-packages/torch/_tensor.py in backward(self, gradient, retain_graph, create_graph, inputs)
253 create_graph=create_graph,
254 inputs=inputs)
--> 255 torch.autograd.backward(self, gradient, retain_graph, create_graph, inputs=inputs)
256
257 def register_hook(self, hook):
/usr/local/lib/python3.7/dist-packages/torch/autograd/__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)
147 Variable._execution_engine.run_backward(
148 tensors, grad_tensors_, retain_graph, create_graph, inputs,
--> 149 allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag
150
151
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
Model inference:
def my_train(clf, epochs, optimizer, criterion, train_data, test_data=None):
cur_min_loss = 10e8
train_losses = []
for epoch_step in range(epochs):
epoch_loss = 0.0
for i, batch in enumerate(train_data):
X, y = batch
optimizer.zero_grad()
prediction = clf(X)
loss = criterion(prediction, y)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
del prediction
del X
del y
torch.cuda.empty_cache()
train_losses.append(epoch_loss / (i + 1))
Criterion is IoULoss; clf final activation is Sigmoid; optimizer Adam, train_data - custom dataset inherited from PyTorch Dataset
The first expression in your loss function:
inputs.view(inputs.size(0), -1) > 0.5
is not a differentiable operator, hence the gradient cannot propagate through that operation.
I am using Tensorflow 2.0.beta with Python 3.6.6 on Mac OS (nightly: tf-nightly-2.0-preview 2.0.0.dev20190721 but I never managed to have it working with compat module in Tensorflow 2.0).
I am traying to migrate a tf.estimator model from Tensorflow 1.12 (fully working) to Tensorflow 2.0. Here is the code:
# estimator model
def baseline_estimator_model(features, labels, mode, params):
"""
Model function for Estimator
"""
print('model based on keras layer but return an estimator model')
# gettings the bulding blocks
model = keras_building_blocks(params['dim_input'], params['num_classes'])
dense_inpout = features['dense_input']
# Logits layer
if mode == tf.estimator.ModeKeys.TRAIN:
logits = model(dense_inpout, training=True)
else:
logits = model(dense_inpout, training=False)
# Compute predictions
probabilities = tf.nn.softmax(logits)
classes = tf.argmax(input=probabilities, axis=1, )
# made prediction
predictions = {
'classes': classes,
'probabilities': probabilities,
}
# to be tested
predictions_output = tf.estimator.export.PredictOutput(predictions)
# Provide an estimator spec for `ModeKeys.PREDICT`
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode,
predictions=predictions,
export_outputs={tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY: predictions_output})
# Compute loss for both TRAIN and EVAL modes
# old -> loss = tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)(labels, logits)
# Generate necessary evaluation metrics
# old -> accuracy = tf.compat.v1.metrics.accuracy(labels=tf.argmax(input=labels, axis=1), predictions=classes, name='accuracy')
accuracy = tf.keras.metrics.CategoricalAccuracy()
accuracy.update_state(labels, logits)
eval_metrics = {'accuracy': accuracy}
tf.summary.scalar('accuracy', accuracy.result())
# Provide an estimator spec for `ModeKeys.EVAL`
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=eval_metrics)
# Provide an estimator spec for `ModeKeys.TRAIN`
if mode == tf.estimator.ModeKeys.TRAIN:
# old but working -> optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001, beta1=0.9)
# crashing
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, epsilon=1e-07)
# old -> train_op = optimizer.minimize(loss, tf.compat.v1.train.get_or_create_global_step())
train_op = optimizer.minimize(loss,var_list=model.weights)
return tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
train_op=train_op)
predictions=predictions, loss=loss, train_op=train_op, export_outputs=predictions_output)
If I keep the compat.v1 module it is working:
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.001, beta1=0.9)
If I try to use something without compat.v1 it is crashing:
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9,epsilon=1e-07)
with the following error (I am running the code locally for the moment, not on GCP):
I0721 17:33:04.812453 4526515648 estimator.py:209] Using config: {'_model_dir': 'results/Models/Mnist/tf_1_12/estimator/v3/ckpt/', '_tf_random_seed': None, '_save_summary_steps': 10, '_save_checkpoints_steps': 10, '_save_checkpoints_secs': None, '_session_config': allow_soft_placement: true
graph_options {
rewrite_options {
meta_optimizer_iterations: ONE
}
}
, '_keep_checkpoint_max': 3, '_keep_checkpoint_every_n_hours': 10000, '_log_step_count_steps': 50, '_train_distribute': None, '_device_fn': None, '_protocol': None, '_eval_distribute': None, '_experimental_distribute': None, '_experimental_max_worker_delay_secs': None, '_service': None, '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x1c37b11b70>, '_task_type': 'worker', '_task_id': 0, '_global_id_in_cluster': 0, '_master': '', '_evaluation_master': '', '_is_chief': True, '_num_ps_replicas': 0, '_num_worker_replicas': 1}
I0721 17:33:04.815697 4526515648 estimator_training.py:186] Not using Distribute Coordinator.
I0721 17:33:04.817899 4526515648 training.py:612] Running training and evaluation locally (non-distributed).
I0721 17:33:04.818665 4526515648 training.py:700] Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 10 or save_checkpoints_secs None.
I0721 17:33:04.834385 4526515648 model.py:211] input_dataset_fn: TRAIN, train
using keras layer and estimator (recommended way)
exporter <tensorflow_estimator.python.estimator.exporter.LatestExporter object at 0x1c37b115f8>
I0721 17:33:05.117963 4526515648 estimator.py:1145] Calling model_fn.
model based on keras layer but return an estimator model
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<timed exec> in <module>
~/Desktop/Work/Data_Science/Tutorials_Codes/Python/proj_DL_models_and_pipelines_with_GCP/src/model_mnist_2_0_v1/trainer/model.py in train_and_evaluate(FLAGS, use_keras)
589 exporters=exporter)
590
--> 591 tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
592
593 def train_and_evaluate_old(FLAGS, use_keras):
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/training.py in train_and_evaluate(estimator, train_spec, eval_spec)
471 '(with task id 0). Given task id {}'.format(config.task_id))
472
--> 473 return executor.run()
474
475
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/training.py in run(self)
611 config.task_type != run_config_lib.TaskType.EVALUATOR):
612 logging.info('Running training and evaluation locally (non-distributed).')
--> 613 return self.run_local()
614
615 # Distributed case.
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/training.py in run_local(self)
712 max_steps=self._train_spec.max_steps,
713 hooks=train_hooks,
--> 714 saving_listeners=saving_listeners)
715
716 eval_result = listener_for_eval.eval_result or _EvalResult(
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/estimator.py in train(self, input_fn, hooks, steps, max_steps, saving_listeners)
365
366 saving_listeners = _check_listeners_type(saving_listeners)
--> 367 loss = self._train_model(input_fn, hooks, saving_listeners)
368 logging.info('Loss for final step: %s.', loss)
369 return self
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/estimator.py in _train_model(self, input_fn, hooks, saving_listeners)
1156 return self._train_model_distributed(input_fn, hooks, saving_listeners)
1157 else:
-> 1158 return self._train_model_default(input_fn, hooks, saving_listeners)
1159
1160 def _train_model_default(self, input_fn, hooks, saving_listeners):
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/estimator.py in _train_model_default(self, input_fn, hooks, saving_listeners)
1186 worker_hooks.extend(input_hooks)
1187 estimator_spec = self._call_model_fn(
-> 1188 features, labels, ModeKeys.TRAIN, self.config)
1189 global_step_tensor = training_util.get_global_step(g)
1190 return self._train_with_estimator_spec(estimator_spec, worker_hooks,
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_estimator/python/estimator/estimator.py in _call_model_fn(self, features, labels, mode, config)
1144
1145 logging.info('Calling model_fn.')
-> 1146 model_fn_results = self._model_fn(features=features, **kwargs)
1147 logging.info('Done calling model_fn.')
1148
~/Desktop/Work/Data_Science/Tutorials_Codes/Python/proj_DL_models_and_pipelines_with_GCP/src/model_mnist_2_0_v1/trainer/model.py in baseline_estimator_model(features, labels, mode, params)
442 #train_op = optimizer.minimize(loss, tf.compat.v1.train.get_or_create_global_step())
443 #train_op = optimizer.minimize(loss, tf.train.get_or_create_global_step())
--> 444 train_op = optimizer.minimize(loss,var_list=model.weights)
445
446 print('step 8')
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py in minimize(self, loss, var_list, grad_loss, name)
315 """
316 grads_and_vars = self._compute_gradients(
--> 317 loss, var_list=var_list, grad_loss=grad_loss)
318
319 return self.apply_gradients(grads_and_vars, name=name)
~/anaconda-release/conda-env/env_gcp_dl_2_0_nightly/lib/python3.6/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py in _compute_gradients(self, loss, var_list, grad_loss)
349 if not callable(var_list):
350 tape.watch(var_list)
--> 351 loss_value = loss()
352 if callable(var_list):
353 var_list = var_list()
TypeError: 'Tensor' object is not callable
Any idea how to fix that ? The error messages was changing over time since Tensorflow 2.0 alpha.
I am also looking for a full working example of tf.estimator working with Tensorflow 2.0. I have issue to export the model as well. In the official documentation of Tensorflow 2.0 they only use in their example compat.v1 and don't export the model. All the online course on tf.estimator from GCP are using older version of Tensorflow (1.12 - 1.14).
I am trying to fine-tune VGG16 neural network, here is the code:
vgg16_model = VGG16(weights="imagenet", include_top="false", input_shape=(224,224,3))
model = Sequential()
model.add(vgg16_model)
#add fully connected layer:
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3, activation='softmax'))
I am getting this error:
ValueError Traceback (most recent call last)
in
2 model.add(vgg16_model)
3 #add fully connected layer:
----> 4 model.add(Flatten())
5 model.add(Dense(256, activation='relu'))
6 model.add(Dropout(0.5))
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/sequential.py in add(self, layer)
179 self.inputs = network.get_source_inputs(self.outputs[0])
180 elif self.outputs:
--> 181 output_tensor = layer(self.outputs[0])
182 if isinstance(output_tensor, list):
183 raise TypeError('All layers in a Sequential model '
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/base_layer.py in call(self, inputs, **kwargs)
412 # Raise exceptions in case the input is not compatible
413 # with the input_spec specified in the layer constructor.
--> 414 self.assert_input_compatibility(inputs)
415
416 # Collect input shapes to build layer.
/usr/local/anaconda/lib/python3.6/site-packages/keras/engine/base_layer.py in assert_input_compatibility(self, inputs)
325 self.name + ': expected min_ndim=' +
326 str(spec.min_ndim) + ', found ndim=' +
--> 327 str(K.ndim(x)))
328 # Check dtype.
329 if spec.dtype is not None:
ValueError: Input 0 is incompatible with layer flatten_5: expected min_ndim=3, found ndim=2
I tried many suggested solutions but none of them could solve my problem. How can I solve this?
In officially keras webpage, on
Fine-tune InceptionV3 on a new set of classes
from keras.models import Model
vgg16_model = VGG16(weights="imagenet", include_top="false", input_shape=(224,224,3))
x = vgg16_model.output
x=Flatten()(x)
x=Dense(256, activation='relu')(x)
x=Dropout(0.5)(x)
predictions=Dense(3, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
You have an error in include_top="false", this causes you the error message. Try:
vgg16_model = VGG16(weights="imagenet", include_top=False, input_shape=(224,224,3))
I'm trying to learn deep reinforcement learning through OpenAI spinning up. To do this, I want to rewrite some of their code using pytorch instead of tensorflow.
Currently I'm trying to convert the code for basic policy gradient (link with explanations) and this is my code so far:
import torch
import torch.nn as nn
from torch.nn.functional import log_softmax
from torch.distributions import Categorical
import torch.optim as optim
import numpy as np
import gym
from gym.spaces import Discrete, Box
class Policy(nn.Module):
def __init__(self, sizes, activation=nn.Tanh(), output_activation=None):
# Build a feedforward neural network.
super(Policy, self).__init__()
self.layers=nn.ModuleList([nn.Linear(sizes[i],sizes[i+1]) for i in
range(len(sizes)-1)])
self.activation=activation
self.output_activation=output_activation
self.returns=[] # for R(tau) weighting in policy gradient
self.rewards=[] # list for rewards accrued throughout ep
self.logits=[] # for measuring episode logits
def forward(self,x):
for layer in self.layers[:-1]:
x=self.activation(layer(x))
x=self.layers[-1](x)
if not self.output_activation==None:
x=self.output_activation(self.layers[-1](x))
return x
# make action selection op (outputs int actions, sampled from policy)
def select_action(logits):
return Categorical(logits=logits).sample()
# make loss function whose gradient, for the right data, is policy gradient
def loss(action_logits,tau_rets):
return torch.sum(torch.dot(log_softmax(action_logits),tau_rets))
def train(env_name='CartPole-v0', hidden_sizes=[32], lr=1e-2,
epochs=50, batch_size=5000, render=False):
# make environment, check spaces, get obs / act dims
env = gym.make(env_name)
assert isinstance(env.observation_space, Box), \
"This example only works for envs with continuous state spaces."
assert isinstance(env.action_space, Discrete), \
"This example only works for envs with discrete action spaces."
obs_dim = env.observation_space.shape[0]
n_acts = env.action_space.n
# make core of policy network
policy = Policy(sizes=[obs_dim]+hidden_sizes+[n_acts])
# make train op
train_op = optim.Adam(policy.parameters(), lr=lr)
# for training policy
def train_one_epoch():
# make some empty lists for logging.
batch_returns = [] # for measuring episode returns
batch_lens = [] # for measuring episode lengths
# reset episode-specific variables
obs = torch.from_numpy(env.reset()).type(torch.FloatTensor) # first obs comes from starting distribution
done = False # signal from environment that episode is over
num_obs=0 # to measure the number of observations
# render first episode of each epoch
finished_rendering_this_epoch = False
# collect experience by acting in the environment with current policy
while True:
# rendering
if (not finished_rendering_this_epoch) and render:
env.render()
# act in the environment
act_logit=policy.forward(obs)
act = select_action(act_logit)
tmp, reward, done, _ = env.step(act.numpy())
obs=torch.from_numpy(tmp).type(torch.FloatTensor)
num_obs+=1
# save logit, reward
policy.rewards.append(reward)
policy.logits.append(act_logit[act].item())
if done:
# if episode is over, record info about episode
ep_ret, ep_len = sum(policy.rewards), len(policy.rewards)
batch_returns.append(ep_ret)
batch_lens.append(ep_len)
# the weight for each logprob(a|s) is R(tau)
policy.returns+= [ep_ret] * ep_len
# reset episode-specific variables
tmp, done, policy.rewards = env.reset(), False, []
obs=torch.from_numpy(tmp).type(torch.FloatTensor)
# won't render again this epoch
finished_rendering_this_epoch = True
# end experience loop if we have enough of it
if num_obs > batch_size:
break
# take a single policy gradient update step
print (len(policy.returns),len(policy.rewards),len(policy.logits))
batch_loss = loss(torch.tensor(policy.logits),torch.tensor(policy.returns))
batch_loss.backward()
return batch_loss, batch_returns, batch_lens
# training loop
for i in range(epochs):
batch_loss, batch_rets, batch_lens = train_one_epoch()
print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f'%
(i, batch_loss, np.mean(batch_rets), np.mean(batch_lens)))
When I run train(), I get the following error:
RuntimeError Traceback (most recent call last)
<ipython-input-163-2da0ffaf5447> in <module>()
----> 1 train()
<ipython-input-162-560e772be08b> in train(env_name, hidden_sizes, lr, epochs,
batch_size, render)
114 # training loop
115 for i in range(epochs):
--> 116 batch_loss, batch_rets, batch_lens = train_one_epoch()
117 print('epoch: %3d \t loss: %.3f \t return: %.3f \t ep_len: %.3f'%
118 (i, batch_loss, np.mean(batch_rets), np.mean(batch_lens)))
<ipython-input-162-560e772be08b> in train_one_epoch()
109 print (len(policy.returns),len(policy.rewards),len(policy.logits))
110 batch_loss = loss(torch.tensor(policy.logits),torch.tensor(policy.returns))
--> 111 batch_loss.backward()
112 return batch_loss, batch_returns, batch_lens
113
~\Anaconda3\lib\site-packages\torch\tensor.py in backward(self, gradient,
retain_graph, create_graph)
91 products. Defaults to ``False``.
92 """
---> 93 torch.autograd.backward(self, gradient, retain_graph, create_graph)
94
95 def register_hook(self, hook):
~\Anaconda3\lib\site-packages\torch\autograd\__init__.py in backward(tensors, grad_tensors, retain_graph, create_graph, grad_variables)
88 Variable._execution_engine.run_backward(
89 tensors, grad_tensors, retain_graph, create_graph,
---> 90 allow_unreachable=True) # allow_unreachable flag
91
92
RuntimeError: element 0 of tensors does not require grad and does not have a grad_fn
I don't understand why this happens since my code is similar to other rl pytorch code such as this.
I am confronting with the following issue, I do not really know what can cause this.
Please check the following attachment
When I try to run the server using ng serve --open the server starts and compiles everything. Immediately after the page opens I receive a blank window which seems to be waiting for something. By opening the devtools I noticed that the browser holds onto the selected file for a very long time (more than 1 min as you can see in the pic). If I click the button from the browser that is killing the current script, it says that the selected file was successfully returned with 200 code and immediately after that the websocker appears with 101 status. I do not know onto which one is hanging and why but it seems he is trying to switch some protocols.
If I manage to open the console of the browser fast enough, it prints messages like this platform-browser.js:1770 Throttling history state changes to prevent the browser from hanging. without stopping.
Maybe this will be helpful
platform-browser.js:1770 Throttling history state changes to prevent the browser from hanging.
push../node_modules/#angular/platform-browser/esm5/platform-browser.js.BrowserPlatformLocation.replaceState # platform-browser.js:1770
push../node_modules/#angular/common/esm5/common.js.PathLocationStrategy.replaceState # common.js:731
push../node_modules/#angular/common/esm5/common.js.Location.replaceState # common.js:307
push../node_modules/#angular/router/esm5/router.js.Router.resetUrlToCurrentUrlTree # router.js:5625
(anonymous) # router.js:5574
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invoke # zone.js:388
onInvoke # core.js:4760
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invoke # zone.js:387
push../node_modules/zone.js/dist/zone.js.Zone.run # zone.js:138
(anonymous) # zone.js:872
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invokeTask # zone.js:421
onInvokeTask # core.js:4751
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invokeTask # zone.js:420
push../node_modules/zone.js/dist/zone.js.Zone.runTask # zone.js:188
drainMicroTaskQueue # zone.js:595
Promise.then (async)
scheduleMicroTask # zone.js:578
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.scheduleTask # zone.js:410
onScheduleTask # zone.js:297
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.scheduleTask # zone.js:401
push../node_modules/zone.js/dist/zone.js.Zone.scheduleTask # zone.js:232
push../node_modules/zone.js/dist/zone.js.Zone.scheduleMicroTask # zone.js:252
scheduleResolveOrReject # zone.js:862
resolvePromise # zone.js:808
(anonymous) # zone.js:724
webpackJsonpCallback # bootstrap:23
(anonymous) # dashboard-dashboard-module.js:1
platform-browser.js:1770 Throttling history state changes to prevent the browser from hanging.
push../node_modules/#angular/platform-browser/esm5/platform-browser.js.BrowserPlatformLocation.replaceState # platform-browser.js:1770
push../node_modules/#angular/common/esm5/common.js.PathLocationStrategy.replaceState # common.js:731
push../node_modules/#angular/common/esm5/common.js.Location.replaceState # common.js:307
push../node_modules/#angular/router/esm5/router.js.Router.resetUrlToCurrentUrlTree # router.js:5625
(anonymous) # router.js:5574
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invoke # zone.js:388
onInvoke # core.js:4760
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invoke # zone.js:387
push../node_modules/zone.js/dist/zone.js.Zone.run # zone.js:138
(anonymous) # zone.js:872
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invokeTask # zone.js:421
onInvokeTask # core.js:4751
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.invokeTask # zone.js:420
push../node_modules/zone.js/dist/zone.js.Zone.runTask # zone.js:188
drainMicroTaskQueue # zone.js:595
Promise.then (async)
scheduleMicroTask # zone.js:578
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.scheduleTask # zone.js:410
onScheduleTask # zone.js:297
push../node_modules/zone.js/dist/zone.js.ZoneDelegate.scheduleTask # zone.js:401
push../node_modules/zone.js/dist/zone.js.Zone.scheduleTask # zone.js:232
push../node_modules/zone.js/dist/zone.js.Zone.scheduleMicroTask # zone.js:252
scheduleResolveOrReject # zone.js:862
resolvePromise # zone.js:808
(anonymous) # zone.js:724
webpackJsonpCallback # bootstrap:23
(anonymous) # dashboard-dashboard-module.js:1
platform-browser.js:1770 Throttling history state changes to prevent the browser from hanging.
...
The port is not occupied.
I am using
angular cli 6.0.8
windows 7 x64
node 8.11.2
Any ideas why is this hanging?