azure app service Container (container name) didn't respond to HTTP pings on port: 8080 - azure

I've created a very simple discord bot and am trying to host it on azure.
When I run it locally, it works fine.
When I deploy it, it works fine for awhile and then it crashes and restarts.
i get
2020-03-23T21:41:50.738748919Z _____
2020-03-23T21:41:50.738817719Z / _ \ __________ _________ ____
2020-03-23T21:41:50.738824419Z / /_\ \___ / | \_ __ \_/ __ \
2020-03-23T21:41:50.738828419Z / | \/ /| | /| | \/\ ___/
2020-03-23T21:41:50.738832119Z \____|__ /_____ \____/ |__| \___ >
2020-03-23T21:41:50.738836019Z \/ \/ \/
2020-03-23T21:41:50.738839619Z A P P S E R V I C E O N L I N U X
2020-03-23T21:41:50.738843319Z
2020-03-23T21:41:50.738846719Z Documentation: http://aka.ms/webapp-linux
2020-03-23T21:41:50.738850119Z NodeJS quickstart: https://aka.ms/node-qs
2020-03-23T21:41:50.738853619Z NodeJS Version : v12.13.0
2020-03-23T21:41:50.738857019Z Note: Any data outside '/home' is not persisted
2020-03-23T21:41:50.738873019Z
2020-03-23T21:41:51.030040223Z Oryx Version: 0.2.20191105.2, Commit: 67e159d71419415435cb5d10c05a0f0758ee8809, ReleaseTagName: 20191105.2
2020-03-23T21:41:51.030978923Z Found build manifest file at '/home/site/wwwroot/oryx-manifest.toml'. Deserializing it...
2020-03-23T21:41:51.038540817Z Build Operation ID: |Nl1fjNBjXG0=.86fc023f_
2020-03-23T21:41:52.164947961Z Environment Variables for Application Insight's Codeless Configuration exists..
2020-03-23T21:41:52.165846260Z Writing output script to '/home/site/wwwroot/oryx-appinsightsloader.js'
2020-03-23T21:41:52.523126320Z Writing output script to '/opt/startup/startup.sh'
2020-03-23T21:41:52.729416182Z Running #!/bin/sh
2020-03-23T21:41:52.729893482Z
2020-03-23T21:41:52.729904682Z # Enter the source directory to make sure the script runs where the user expects
2020-03-23T21:41:52.729909382Z cd "/home/site/wwwroot"
2020-03-23T21:41:52.729913282Z
2020-03-23T21:41:52.729916782Z export NODE_PATH=$(npm root --quiet -g):$NODE_PATH
2020-03-23T21:41:52.729920482Z if [ -z "$PORT" ]; then
2020-03-23T21:41:52.730906381Z export PORT=8080
2020-03-23T21:41:52.730917481Z fi
2020-03-23T21:41:52.730921481Z
2020-03-23T21:41:52.730925081Z echo Found tar.gz based node_modules.
2020-03-23T21:41:52.730928781Z extractionCommand="tar -xzf node_modules.tar.gz -C /node_modules"
2020-03-23T21:41:52.731720380Z echo "Removing existing modules directory from root..."
2020-03-23T21:41:52.731731380Z rm -fr /node_modules
2020-03-23T21:41:52.731735380Z mkdir -p /node_modules
2020-03-23T21:41:52.731738880Z echo Extracting modules...
2020-03-23T21:41:52.737901476Z $extractionCommand
2020-03-23T21:41:52.737914876Z export NODE_PATH="/node_modules":$NODE_PATH
2020-03-23T21:41:52.737919576Z export PATH=/node_modules/.bin:$PATH
2020-03-23T21:41:52.737931076Z if [ -d node_modules ] || [ -L node_modules ]; then
2020-03-23T21:41:52.737935076Z mv -f node_modules _del_node_modules || true
2020-03-23T21:41:52.737938676Z fi
2020-03-23T21:41:52.738335276Z
2020-03-23T21:41:52.738348276Z if [ -d /node_modules ]; then
2020-03-23T21:41:52.738352876Z ln -s /node_modules ./node_modules
2020-03-23T21:41:52.738356776Z fi
2020-03-23T21:41:52.738635876Z
2020-03-23T21:41:52.738700276Z echo "Done."
2020-03-23T21:41:52.738743776Z export NODE_OPTIONS='--require ./oryx-appinsightsloader.js ' $NODE_OPTIONS
2020-03-23T21:41:52.738885175Z npm start
2020-03-23T21:41:53.504769761Z Found tar.gz based node_modules.
2020-03-23T21:41:53.505139861Z Removing existing modules directory from root...
2020-03-23T21:41:53.515730254Z Extracting modules...
2020-03-23T21:41:53.800945462Z Done.
2020-03-23T21:41:55.412480180Z
2020-03-23T21:41:55.412504880Z > my-bot#1.0.0 start /home/site/wwwroot
2020-03-23T21:41:55.412521380Z > node stonksbot.js
2020-03-23T21:41:55.412525880Z
2020-03-23T21:41:56.623960067Z Logged in as Mr.Stonks#0654!
2020-03-23 21:45:42.618 ERROR - Container stonksbot1_0_0cecfd85 for site stonksbot1 did not start within expected time limit. Elapsed time = 231.774951 sec
2020-03-23 21:45:42.620 ERROR - Container stonksbot1_0_0cecfd85 didn't respond to HTTP pings on port: 8080, failing site start. See container logs for debugging.
2020-03-23 21:45:42.642 INFO - Stoping site stonksbot1 because it failed during startup.
I didn't set up docker, so I'm guessing azure is doing it for me.
my package looks like this
{
"name": "my-bot",
"version": "1.0.0",
"main": "index.js",
"scripts": {
"start": "node stonksbot.js"
},
"dependencies": {
"discord.js": "^11.6.3"
},
"devDependencies": {}
}
and my code looks like this.
const Discord = require('discord.js');
const client = new Discord.Client();
client.login(my token);
client.on('ready', () => {
console.log(`Logged in as ${client.user.tag}!`);
});
client.on('message', msg => {
// do something
});
Anyone know the solution?

You need to let your app respond to a ping from Azure App Service on the port specified.
Example here: https://github.com/Azure-Samples/nodejs-docs-hello-world/blob/master/index.js
You also need to set the environment variable WEBSITES_PORT to the value of the port too.
If you don't respond to the ping Azure will think the app is down and try to restart it.

Related

Check file exists before launch it in webpack npm scripts

I have a package.json like this:
...
"scripts": {
"dev": "webpack --config webpack.dev.config.js --mode development --progress --colors",
"postdev": "if (Test-Path \"./postdev.sh\" ) { echo \"file exists\"; ./postdev.sh }"
},
...
How can I check if file "postdev.sh" exists and then launch it in NPM-scripts section?
I run that command in the terminal and it goes correctly, but if I try to launch that npm-script it says "Unexpected appearance: "./postdev.sh"."
on macos or linux try this one for postdev:
"postdev": "test -f ./postdev.sh && echo 'file exisits' && ./postdev.sh",
Finnally found a solution (maybe it works only on Windows, but it is enough for me):
"postdev": "if exist postdev.sh ( postdev.sh )",
You can use path-exists-cli package, a cross-platform tool, to check if a file/directory exists and use && or || after to run the next command if exists or not, respectively:
{
"scripts": {
// other scripts...
"postdev": "path-exists ./postdev.sh && echo 'Exists' || echo 'Does not exists'"
}
}

Can't start laravel-echo-server with supervisor

I have already referenced all of the other suggestions that people have made on other posts, nothing has worked.
Paths to Relevant Files
The root directory of my project is /var/www/html and that is where I have .env and laravel-echo-server.json.
I have laravel-echo-server installed globally. I can run it successfully from a with laravel-echo-server start --dir=/path/to/project/root
When I run which laravel-echo-server, it shows its path is ~/.nvm/versions/node/v13.5.0/bin/laravel-echo-server.
Likewise, the path for node is ~/.nvm/versions/node/v13.5.0/bin/node
My conf file for the supervisor worker is at /etc/supervisor/conf.d/laravel-echo-server.conf.
Supervisor runs the other workers successfully, such as Horizon, so it is not a problem with the supervisor configuration.
Conf File
[program:laravel-echo-server]
process_name=%(program_name)s_%(process_num)02d
command=laravel-echo-server start --dir=/var/www/html
autostart=true
numprocs=1
user=root
autorestart=true
stdout_logfile=/var/log/workers/laravel-echo-server.log
I have also tried the following variations for the command line:
command=/usr/bin/laravel-echo-server start --dir=/var/www/html
command=~/.nvm/versions/node/v13.5.0/bin/laravel-echo-server --dir=/var/www/html
All of these attempts and variations return ERROR (no such file).
I also tried making duplicate copies of laravel-echo-server.json to place in locations like /usr/bin and /etc/supervisor/conf.d but that didn't help.
I also tried changing the user from root to ec2-user (which is my username with which I can successfully initialize laravel-echo-server from the command line).
I have also tried adding another line: directory=/var/www/html but that doesn't help.
Shell Executable Attempt
I tried to make a shell executable file that supervisor could call. Here is the file:
#!/bin/bash
exec laravel-echo-server start --dir=../../../var/www/html
I called the executable with supervisor like this:
command=bash -c laravel-echo-server.sh
But it returned ERROR (spawn error).
Additional Info
supervisord.conf
[inet_http_server]
port=*:9001
[unix_http_server]
file=/var/run/supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisord]
;http_port=/var/tmp/supervisor.sock ; (default is to run a UNIX domain socket server)
;http_port=127.0.0.1:9001 ; (alternately, ip_address:port specifies AF_INET)
;sockchmod=0700 ; AF_UNIX socketmode (AF_INET ignore, default 0700)
;sockchown=nobody.nogroup ; AF_UNIX socket uid.gid owner (AF_INET ignores)
;umask=022 ; (process file creation umask;default 022)
logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB)
logfile_backups=10 ; (num of main logfile rotation backups;default 10)
loglevel=info ; (logging level;default info; others: debug,warn)
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
nodaemon=false ; (start in foreground if true;default false)
minfds=1024 ; (min. avail startup file descriptors;default 1024)
minprocs=200 ; (min. avail process descriptors;default 200)
;nocleanup=true ; (don't clean up tempfiles at start;default false)
;http_username=user ; (default is no username (open system))
;http_password=123 ; (default is no password (open system))
;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP)
;user=chrism ; (default is current user, required if root)
;directory=/tmp ; (default is not to cd during start)
;environment=KEY=value ; (key value pairs to add to environment)
[supervisorctl]
serverurl=unix:///var/run/supervisor.sock
;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket
;username=chris ; should be same as http_username if set
;password=123 ; should be same as http_password if set
;prompt=mysupervisor ; cmd line prompt (default "supervisor")
[include]
files = /etc/supervisor/conf.d/*.conf
laravel-echo-server.json
{
"authHost": http://mywebsite.com,
"authEndpoint": "/broadcasting/auth",
"clients": [],
"database": "redis",
"databaseConfig": {
"redis": {[my redis credentials]},
"sqlite": {
"databasePath": "/database/laravel-echo-server.sqlite"
}
},
"devMode": true,
"host": null,
"port": "6001",
"protocol": "http",
"socketio": {},
"sslCertPath": "",
"sslKeyPath": "",
"sslCertChainPath": "",
"sslPassphrase": "",
"subscribers": {
"http": true,
"redis": true
},
"apiOriginAllow": {
"allowCors": false,
"allowOrigin": "",
"allowMethods": "",
"allowHeaders": ""
}
}
UPDATE
Now I have tried:
command=/home/ec2-user/.nvm/versions/node/v13.5.0/bin/laravel-echo-server start --dir=/var/www/html
per the suggestion in the post comments. However that is returning ERROR (spawn error)
When I check the supervisord.log, it shows the following:
2019-12-31 07:27:05,869 INFO exited: laravel-echo-server_00 (exit status 127; not expected)
Exit status code 127 apparently means "command not found".
So after giving up on running it with composer, it became easiest to run it with pm2.
Here is my .ebextensions command:
sudo yum install -y gcc-c++ make
curl -sL https://rpm.nodesource.com/setup_13.x | sudo -E bash -
sudo yum install -y nodejs
npm config set scripts-prepend-node-path true
npm install -g laravel-echo-server
npm install -g pm2#latest
pm2 start laravel-echo-server-pm2.json
And my pm2 json:
{
"name": "laravel-echo-server",
"script": "laravel-echo-server",
"args": "start"
}
I also added a few more commands to .ebextensions that allow me to modify my .env file. The changes overwrite the values written into laravel-echo-server.json. This way, I don't have to change them every time I switch from dev to prod:
echo "LARAVEL_ECHO_SERVER_REDIS_HOST=production-redis-host.com" >> .env
echo "LARAVEL_ECHO_SERVER_REDIS_PORT=6379" >> .env
echo "LARAVEL_ECHO_SERVER_DEBUG=false" >> .env

Deploing a single bash script with nixops

I'm just starting to learn nix / nixos / nixops. I needed to install a simple bash script to remote host with nixops. And I can not realize how to do it. I have two files:
just-deploy-bash-script.nix
{
resources.sshKeyPairs.ssh-key = {};
test-host = { config, lib, pkgs, ... }: {
deployment.targetEnv = "digitalOcean";
deployment.digitalOcean.region = "sgp1";
deployment.digitalOcean.size = "s-2vcpu-4gb";
environment.systemPackages =
let
my-package = pkgs.callPackage ./my-package.nix { inherit pkgs; };
in [
pkgs.tmux
my-package
];
};
}
my-package.nix
{ pkgs ? import <nixpkgs> {}, ... }:
let
pname = "my-package";
version = "1.0.0";
stdenv = pkgs.stdenv;
in
stdenv.mkDerivation {
inherit pname version;
src = ./.;
installPhase =
let
script = pkgs.writeShellScriptBin "my-test" ''
echo This is my test script
'';
in
''
mkdir $out;
cp -r ${script} $out/
'';
}
I deploy as follows. I go to the directory in which these two files are located and then sequentially execute two commands:
nixops create -d test just-deploy-bash-script.nix
nixops deploy -d test
Deployment passes without errors and completes successfully. But when I login to the newly created remote host, I find that the tmux package from the standard set is present in the system, and my-package is absent:
nixops ssh -d test test-host
[root#test-host:~]# which tmux
/run/current-system/sw/bin/tmux
[root#test-host:~]# find /nix/store/ -iname tmux
/nix/store/hd1sgvb4pcllxj69gy3qa9qsns68arda-nixpkgs-20.03pre206749.5a3c1eda46e/nixpkgs/pkgs/tools/misc/tmux
/nix/store/609zdpfi5kpz2c7mbjcqjmpb4sd2y3j4-ncurses-6.0-20170902/share/terminfo/t/tmux
/nix/store/4cxkil2r3dzcf5x2phgwzbxwyvlk6i9k-system-path/share/bash-completion/completions/tmux
/nix/store/4cxkil2r3dzcf5x2phgwzbxwyvlk6i9k-system-path/bin/tmux
/nix/store/606ni2d9614sxkhnnnhr71zqphdam6jc-system-path/share/bash-completion/completions/tmux
/nix/store/606ni2d9614sxkhnnnhr71zqphdam6jc-system-path/bin/tmux
/nix/store/ddlx3x8xhaaj78xr0zasxhiy2m564m2s-nixos-17.09.3269.14f9ee66e63/nixos/pkgs/tools/misc/tmux
/nix/store/kvia4rwy9y4wis4v2kb9y758gj071p5v-ncurses-6.1-20190112/share/terminfo/t/tmux
/nix/store/c3m8qvmn2yxkgpfajjxbcnsgfrcinppl-tmux-2.9a/share/bash-completion/completions/tmux
/nix/store/c3m8qvmn2yxkgpfajjxbcnsgfrcinppl-tmux-2.9a/bin/tmux
[root#test-host:~]# which my-test
which: no my-test in (/root/bin:/run/wrappers/bin:/root/.nix-profile/bin:/etc/profiles/per-user/root/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin)
[root#test-host:~]# find /nix/store/ -iname *my-test*
[root#test-host:~]#
Help me figure out what's wrong with my scripts. Any links to documentation or examples of the implementation of such a task are welcome.
The shell can not find your script because it is copied into the wrong directory.
This becomes apparent after building my-package.nix:
$ nix-build my-package.nix
$ ls result/
zh5bxljvpmda4mi4x0fviyavsa3r12cx-my-test
Here you see the basename of a storepath inside a store path. This is caused by the line:
cp -r ${script} $out/
Changing it to something like this should fix that problem:
cp -r ${script}/* $out/

How to fix node.js using sails error: Failed to lift app

I’m trying to deploy a node.js app using the framework Sails in production mode.
I’m using the command npm start which runs NODE_ENV=production node app.js. In package.json I have the following content:
{
"name": "myapp",
"private": true,
"version": "0.0.0",
"description": "a Sails application",
"keywords": [],
"dependencies": {
"sails": "^1.2.3",
"grunt": "1.0.4",
"sails-hook-apianalytics": "^2.0.3",
"sails-hook-grunt": "^3.0.2",
"sails-hook-organics": "^0.16.0",
"sails-hook-orm": "^2.1.1",
"sails-hook-sockets": "^2.0.0",
"#sailshq/connect-redis": "^3.2.1",
"#sailshq/socket.io-redis": "^5.2.0",
"#sailshq/lodash": "^3.10.3"
},
"devDependencies": {
"eslint": "5.16.0",
"htmlhint": "0.11.0",
"lesshint": "6.3.6",
"#sailshq/eslint": "^4.19.3",
"sails-hook-grunt": "^4.0.0"
},
"scripts": {
"start": "NODE_ENV=production node app.js",
"test": "npm run lint && npm run custom-tests && echo 'Done.'",
"lint": "./node_modules/eslint/bin/eslint.js . --max-warnings=0 --report-unused-disable-directives && echo '✔ Your .js files look so good.' && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/**/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/**/**/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/**/**/**/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/**/**/**/**/*.ejs && ./node_modules/htmlhint/bin/htmlhint -c ./.htmlhintrc views/**/**/**/**/**/**/*.ejs && echo '✔ So do your .ejs files.' && ./node_modules/lesshint/bin/lesshint assets/styles/ --max-warnings=0 && echo '✔ Your .less files look good, too.'",
"custom-tests": "echo \"(No other custom tests yet.)\" && echo",
"deploy": "echo 'This script assumes a dead-simple, opinionated setup on Heroku.' && echo 'But, of course, you can deploy your app anywhere you like.' && echo '(Node.js/Sails.js apps are supported on all modern hosting platforms.)' && echo && echo 'Warning: Specifically, this script assumes you are on the master branch, and that your app can be deployed simply by force-pushing on top of the *deploy* branch. It will also temporarily use a local *predeploy* branch for preparing assets, that it will delete after it finishes. Please make sure there is nothing you care about on either of these two branches!!!' && echo '' && echo '' && echo 'Preparing to deploy...' && echo '--' && git status && echo '' && echo '--' && echo 'I hope you are on the master branch and have everything committed/pulled/pushed and are completely up to date and stuff.' && echo '********************************************' && echo '** IF NOT THEN PLEASE PRESS <CTRL+C> NOW! **' && echo '********************************************' && echo 'Press CTRL+C to cancel.' && echo '(you have five seconds)' && sleep 1 && echo '...4' && sleep 1 && echo '...3' && sleep 1 && echo '...2' && sleep 1 && echo '...1' && sleep 1 && echo '' && echo 'Alright, here we go. No turning back now!' && echo 'Trying to switch to master branch...' && git checkout master && echo && echo 'OK. Now wiping node_modules/ and running npm install...' && rm -rf node_modules && rm -rf package-lock.json && npm install && (git add package-lock.json && git commit -am 'AUTOMATED COMMIT: Did fresh npm install before deploying, and it caused something relevant (probably the package-lock.json file) to change! This commit tracks that change.' || true) && echo 'Deploying as version:' && npm version patch && echo '' && git push origin master && git push --tags && (git branch -D predeploy > /dev/null 2>&1 || true) && git checkout -b predeploy && (echo 'Now building+minifying assets for production...' && echo '(Hang tight, this could take a while.)' && echo && node node_modules/grunt/bin/grunt buildProd || (echo && echo '------------------------------------------' && echo 'IMPORTANT! IMPORTANT! IMPORTANT!' && echo 'ERROR: Could not compile assets for production!' && echo && echo 'Attempting to recover automatically by stashing, ' && echo 'switching back to the master branch, and then ' && echo 'deleting the predeploy branch... ' && echo && echo 'After this, please fix the issues logged above' && echo 'and push that up. Then, try deploying again.' && echo '------------------------------------------' && echo && echo 'Staging, deleting the predeploy branch, and switching back to master...' && git stash && git checkout master && git branch -D predeploy && false)) && mv www .www && git add .www && node -e 'sailsrc = JSON.parse(require(\"fs\").readFileSync(\"./.sailsrc\", \"utf8\")); if (sailsrc.paths&&sailsrc.paths.public !== undefined || sailsrc.hooks&&sailsrc.hooks.grunt !== undefined) { throw new Error(\"Cannot complete deployment script: .sailsrc file has conflicting contents! Please throw away this midway-complete deployment, switch back to your original branch (master), remove the conflicting stuff from .sailsrc, then commit and push that up.\"); } sailsrc.paths = sailsrc.paths || {}; sailsrc.paths.public = \"./.www\"; sailsrc.hooks = sailsrc.hooks || {}; sailsrc.hooks.grunt = false; require(\"fs\").writeFileSync(\"./.sailsrc\", JSON.stringify(sailsrc))' && git commit -am 'AUTOMATED COMMIT: Automatically bundling compiled assets as part of deploy, updating the EJS layout and .sailsrc file accordingly.' && git push origin predeploy && git checkout master && git push origin +predeploy:deploy && git push --tags && git branch -D predeploy && git push origin :predeploy && echo '' && echo '--' && echo 'OK, done. It should be live momentarily on your staging environment.' && echo '(if you get impatient, check the Heroku dashboard for status)' && echo && echo 'Staging environment:' && echo ' 🌐–• https://staging.example.com' && echo ' (hold ⌘ and click to open links in the terminal)' && echo && echo 'Please review that to make sure it looks good.' && echo 'When you are ready to go to production, visit your pipeline on Heroku and press the PROMOTE TO PRODUCTION button.'"
},
"main": "app.js",
"repository": {
"type": "git",
"url": "git://github.com/xxxxxx.git"
},
"author": "me",
"license": "",
"engines": {
"node": "^10.16"
}
}
If I run sails lift all gone right.
And when run npm start I get following error:
error: Failed to lift app: Error: Should not specify a trailing slash, but instead got: https://myapp.herokuapp.com/
at checkOriginUrl (/home/me/Documents/prog/myapp/node_modules/sails-hook-sockets/lib/util/check-origin-url.js:57:40)
at /home/me/Documents/prog/myapp/node_modules/sails-hook-sockets/lib/configure.js:86:9
at arrayEach (/home/me/Documents/prog/myapp/node_modules/#sailshq/lodash/lib/index.js:1470:13)
at Function.<anonymous> (/home/me/Documents/prog/myapp/node_modules/#sailshq/lodash/lib/index.js:3532:13)
at Hook.configure (/home/me/Documents/prog/myapp/node_modules/sails-hook-sockets/lib/configure.js:85:9)
at Hook.wrapper [as configure] (/home/me/Documents/prog/myapp/node_modules/#sailshq/lodash/lib/index.js:3282:19)
at /home/me/Documents/prog/myapp/node_modules/sails/lib/app/private/loadHooks.js:331:20
at /home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:3083:16
at eachOfArrayLike (/home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:1003:9)
at eachOf (/home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:1051:5)
at Object.eachLimit (/home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:3145:5)
at configure (/home/me/Documents/prog/myapp/node_modules/sails/lib/app/private/loadHooks.js:328:17)
at /home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:3853:24
at replenish (/home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:946:17)
at iterateeCallback (/home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:931:17)
at /home/me/Documents/prog/myapp/node_modules/sails/node_modules/async/dist/async.js:906:16
I’m trying to use heroku to deploy the app. For this reason I added this in production.js inside sockets object:
onlyAllowOrigins:[
'https://myapp.herokuapp.com/',
],
I think this problem could be caused by any missed configuration in production.js but I couldn’t identify what.

Yarn install production dependencies of a single package in workspace

I'm trying to install the production dependencies only for a single package in my workspace. Is that possible?
I've already tried this:
yarn workspace my-package-in-workspace install -- --prod
But it is installing all production dependencies of all my packages.
yarn 1 doesn't support it as far as I know.
If you are trying to install a specific package in a dockerfile, then there is a workaround:
copy the yarn.lock file and the root package.json
copy only the packages's package.json that you need: your package and which other packages that your package depends on (locally in the monorepo).
in the dockerfile, manually remove all the devDependnecies of all the package.json(s) that you copied.
run yarn install on the root package.json.
Note:
Deterministic installation - It is recommended to do so in monorepos to force deterministic install - https://stackoverflow.com/a/64503207/806963
Full dockefile example:
FROM node:12
WORKDIR /usr/project
COPY yarn.lock package.json remove-all-dev-deps-from-all-package-jsons.js change-version.js ./
ARG package_path=packages/dancer-placing-manager
COPY ${package_path}/package.json ./${package_path}/package.json
RUN node remove-all-dev-deps-from-all-package-jsons.js && rm remove-all-dev-deps-from-all-package-jsons.js
RUN yarn install --frozen-lockfile --production
COPY ${package_path}/dist/src ./${package_path}/dist/src
COPY ${package_path}/src ./${package_path}/src
CMD node --unhandled-rejections=strict ./packages/dancer-placing-manager/dist/src/index.js
remove-all-dev-deps-from-all-package-jsons.js:
const fs = require('fs')
const path = require('path')
const { execSync } = require('child_process')
async function deleteDevDeps(packageJsonPath) {
const packageJson = require(packageJsonPath)
delete packageJson.devDependencies
await new Promise((res, rej) =>
fs.writeFile(packageJsonPath, JSON.stringify(packageJson, null, 2), 'utf-8', error => (error ? rej(error) : res())),
)
}
function getSubPackagesPaths(repoPath) {
const result = execSync(`yarn workspaces --json info`).toString()
const workspacesInfo = JSON.parse(JSON.parse(result).data)
return Object.values(workspacesInfo)
.map(workspaceInfo => workspaceInfo.location)
.map(packagePath => path.join(repoPath, packagePath, 'package.json'))
}
async function main() {
const repoPath = __dirname
const packageJsonPath = path.join(repoPath, 'package.json')
await deleteDevDeps(packageJsonPath)
await Promise.all(getSubPackagesPaths(repoPath).map(packageJsonPath => deleteDevDeps(packageJsonPath)))
}
if (require.main === module) {
main()
}
It looks like this is easily possible now with Yarn 2: https://yarnpkg.com/cli/workspaces/focus
But I haven't tried myself.
Here is my solution for Yarn 1:
# Install dependencies for the whole monorepo because
# 1. The --ignore-workspaces flag is not implemented https://github.com/yarnpkg/yarn/issues/4099
# 2. The --focus flag is broken https://github.com/yarnpkg/yarn/issues/6715
# Avoid the target workspace dependencies to land in the root node_modules.
sed -i 's|"dependencies":|"workspaces": { "nohoist": ["**"] }, "dependencies":|g' apps/target-app/package.json
# Run `yarn install` twice to workaround https://github.com/yarnpkg/yarn/issues/6988
yarn || yarn
# Find all linked node_modules and dereference them so that there are no broken
# symlinks if the target-app is copied somewhere. (Don't use
# `cp -rL apps/target-app some/destination` because then it also dereferences
# node_modules/.bin/* and thus breaks them.)
cd apps/target-app/node_modules
for f in $(find . -maxdepth 1 -type l)
do
l=$(readlink -f $f) && rm $f && cp -rf $l $f
done
Now apps/target-app can be copied and used as a standalone app.
I would not recommend it for production. It is slow (because it installs dependencies for the whole monorepo) and not really reliable (because there may be additional issues with symlinks).
You may try
yarn workspace #my-monorepo/my-package-in-workspace install -- --prod

Resources