I am trying to dockerization my backend server.
my stack is nodejs-nestjs with redis and postgres
here is my Dockerfile
FROM node:15
WORKDIR /usr/src/app
COPY package*.json ./
COPY tsconfig.json ./
COPY wait-for-it.sh ./
COPY . .
RUN npm install -g npm#7.22.0
RUN npm install
RUN npm run build
RUN chmod +x ./wait-for-it.sh .
EXPOSE 3333
CMD [ "sh", "-c", "npm run start:prod"]
and here is my docker-compose file:
version: '3.2'
services:
redis-service:
image: "redis:alpine"
container_name: redis-container
ports:
- 127.0.0.1:6379:6379
expose:
- 6379
postgres:
image: postgres:14.1-alpine
container_name: postgres-container
restart: always
environment:
- POSTGRES_USER=root
- POSTGRES_PASSWORD=1234
- DB_NAME = db
ports:
- 127.0.0.1:5432:5432
expose:
- 5432
volumes:
- db:/var/lib/postgresql/data
oms-be:
build: .
ports:
- 3333:3333
links:
- postgres
- redis-service
depends_on:
- postgres
- redis-service
environment:
- DB_HOST=postgres
- POSTGRES_PASSWORD = 1234
- POSTGRES_USER=root
- AUTH_REDIS_HOST=redis-service
- DB_NAME = db
command: ["./wait-for-it.sh", "postgres:5432", "--", "sh", "-c", "npm run start:prod"]
volumes:
db:
driver: local
However, when I run docker-compose up
I got this error :
taching to oms-be-oms-be-1, postgres-container, redis-container
redis-container | 1:C 05 Jun 2022 00:35:16.730 # oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0Oo
redis-container | 1:C 05 Jun 2022 00:35:16.730 # Redis version=7.0.0, bits=64, commit=00000000, modified=0, pid=1, just started
redis-container | 1:C 05 Jun 2022 00:35:16.730 # Warning: no config file specified, using the default config. In order to specify a config file use redis-server /path/to/redis.conf
redis-container | 1:M 05 Jun 2022 00:35:16.731 * monotonic clock: POSIX clock_gettime
redis-container | 1:M 05 Jun 2022 00:35:16.731 * Running mode=standalone, port=6379.
redis-container | 1:M 05 Jun 2022 00:35:16.731 # Server initialized
redis-container | 1:M 05 Jun 2022 00:35:16.731 # WARNING overcommit_memory is set to 0! Background save may fail under low memory condition. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
redis-container | 1:M 05 Jun 2022 00:35:16.732 * The AOF directory appendonlydir doesn't exist
redis-container | 1:M 05 Jun 2022 00:35:16.732 * Ready to accept connections
postgres-container |
postgres-container | PostgreSQL Database directory appears to contain a database; Skipping initialization
postgres-container |
postgres-container | 2022-06-05 00:35:16.824 UTC [1] LOG: starting PostgreSQL 14.1 on x86_64-pc-linux-musl, compiled by gcc (Alpine 10.3.1_git20211027) 10.3.1 20211027, 64-bit
postgres-container | 2022-06-05 00:35:16.824 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432
postgres-container | 2022-06-05 00:35:16.824 UTC [1] LOG: listening on IPv6 address "::", port 5432
postgres-container | 2022-06-05 00:35:16.827 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"
postgres-container | 2022-06-05 00:35:16.833 UTC [21] LOG: database system was shut down at 2022-06-05 00:34:36 UTC
postgres-container | 2022-06-05 00:35:16.836 UTC [1] LOG: database system is ready to accept connections
oms-be-oms-be-1 | internal/modules/cjs/loader.js:905
oms-be-oms-be-1 | throw err;
oms-be-oms-be-1 | ^
oms-be-oms-be-1 |
oms-be-oms-be-1 | Error: Cannot find module '/app/wait-for-it.sh"'
oms-be-oms-be-1 | at Function.Module._resolveFilename (internal/modules/cjs/loader.js:902:15)
oms-be-oms-be-1 | at Function.Module._load (internal/modules/cjs/loader.js:746:27)
oms-be-oms-be-1 | at Function.executeUserEntryPoint [as runMain] (internal/modules/run_main.js:75:12)
oms-be-oms-be-1 | at internal/main/run_main_module.js:17:47 {
oms-be-oms-be-1 | code: 'MODULE_NOT_FOUND',
oms-be-oms-be-1 | requireStack: []
oms-be-oms-be-1 | }
oms-be-oms-be-1 exited with code 1
I tried to build it without wait-for-it.sh and it was complaining that the server cannot connect to the Postgres DB and Redis, so I added wait-for-it.sh file to make it wait until the Redis and the Postgres DB are up, but I got the above error
Can anyone tell me what I am doing wrong?
I've simplified your Dockerfile and docker-compose.yaml in order to test things out on my system. I have this package.json:
{
"name": "example",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"build": "echo \"Example build command\"",
"start:prod": "sleep inf"
},
"author": "",
"license": "ISC"
}
And this Dockerfile:
FROM node:15
WORKDIR /usr/src/app
COPY package*.json ./
COPY wait-for-it.sh ./
RUN chmod +x ./wait-for-it.sh .
RUN npm install
RUN npm run build
EXPOSE 3333
CMD [ "sh", "-c", "npm run start:prod"]
And this docker-compose.yaml:
version: '3.2'
services:
postgres:
image: docker.io/postgres:14
environment:
POSTGRES_PASSWORD: secret
oms-be:
build: .
ports:
- 3333:3333
command: [./wait-for-it.sh", "postgres:5432", "--", "sh", "-c", "npm run start:prod"]
Note that the command: on the final line there has the missing quote. If I try to bring this up using docker-compose up, I see:
oms-be_1 | node:internal/modules/cjs/loader:927
oms-be_1 | throw err;
oms-be_1 | ^
oms-be_1 |
oms-be_1 | Error: Cannot find module '/usr/src/app/wait-for-it.sh"'
oms-be_1 | at Function.Module._resolveFilename (node:internal/modules/cjs/loader:924:15)
oms-be_1 | at Function.Module._load (node:internal/modules/cjs/loader:769:27)
oms-be_1 | at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:76:12)
oms-be_1 | at node:internal/main/run_main_module:17:47 {
oms-be_1 | code: 'MODULE_NOT_FOUND',
oms-be_1 | requireStack: []
oms-be_1 | }
If I correct the syntax so that we have:
version: '3.2'
services:
postgres:
image: docker.io/postgres:14
environment:
POSTGRES_PASSWORD: secret
oms-be:
build: .
ports:
- 3333:3333
command: ["./wait-for-it.sh", "postgres:5432", "--", "sh", "-c", "npm run start:prod"]
Then it runs successfully:
oms-be_1 | wait-for-it.sh: waiting 15 seconds for postgres:5432
oms-be_1 | wait-for-it.sh: postgres:5432 is available after 0 seconds
oms-be_1 |
oms-be_1 | > example#1.0.0 start:prod
oms-be_1 | > sleep inf
oms-be_1 |
The difference in behavior is due to the ENTRYPOINT script in the underlying node:15 image, which includes this logic:
if [ "${1#-}" != "${1}" ] || [ -z "$(command -v "${1}")" ]; then
set -- node "$#"
fi
That says, essentially:
IF the first parameter starts with -
OR There is no command matching $1
THEN try starting the command with node
With the missing ", you end up with an argument that doesn't match any valid commands, which is why you end up with an error in which node is trying to run the wait-for-it.sh script.
Related
This issue has to do with the fact that the file exists on the backend container but not the postgres container. How could I transfer the file between containers automatically?
I am currently trying to execute the following script:
COPY climates(
station_id,
date,
element,
data_value,
m_flag,
q_flag,
s_flag,
obs_time
)
FROM '/usr/api/2017.csv`
DELIMITER ','
CSV HEADER;
within a docker container running a sequelize backend connecting to a postgres:14.1-alpine container.
The following error is returned:
db_1 | 2022-08-30 04:23:58.358 UTC [29] ERROR: could not open file "/usr/api/2017.csv" for reading: No such file or directory
db_1 | 2022-08-30 04:23:58.358 UTC [29] HINT: COPY FROM instructs the PostgreSQL server process to read a file. You may want a client-side facility such as psql's \copy.
db_1 | 2022-08-30 04:23:58.358 UTC [29] STATEMENT: COPY climates(
db_1 | station_id,
db_1 | date,
db_1 | element,
db_1 | data_value,
db_1 | m_flag,
db_1 | q_flag,
db_1 | s_flag,
db_1 | obs_time
db_1 | )
db_1 | FROM '/usr/api/2017.csv'
db_1 | DELIMITER ','
db_1 | CSV HEADER;
ebapi | Unable to connect to the database: MigrationError: Migration 20220829_02_populate_table.js (up) failed: Original error: could not open file "/usr/api/2017.csv" for reading: No such file or directory
ebapi | at /usr/api/node_modules/umzug/lib/umzug.js:151:27
ebapi | at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
ebapi | at async Umzug.runCommand (/usr/api/node_modules/umzug/lib/umzug.js:107:20)
ebapi | ... 2 lines matching cause stack trace ...
ebapi | at async start (/usr/api/index.js:14:3) {
ebapi | cause: Error
ebapi | at Query.run (/usr/api/node_modules/sequelize/lib/dialects/postgres/query.js:50:25)
ebapi | at /usr/api/node_modules/sequelize/lib/sequelize.js:311:28
ebapi | at process.processTicksAndRejections (node:internal/process/task_queues:95:5)
ebapi | at async Object.up (/usr/api/migrations/20220829_02_populate_table.js:10:5)
ebapi | at async /usr/api/node_modules/umzug/lib/umzug.js:148:21
ebapi | at async Umzug.runCommand (/usr/api/node_modules/umzug/lib/umzug.js:107:20)
ebapi | at async runMigrations (/usr/api/util/db.js:52:22)
ebapi | at async connectToDatabase (/usr/api/util/db.js:32:5)
ebapi | at async start (/usr/api/index.js:14:3) {
ebapi | name: 'SequelizeDatabaseError',
...
Here is my docker-compose.yml
# set up a postgres database version: "3.8" services: db:
image: postgres:14.1-alpine
restart: always
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
ports:
- "5432:5432"
volumes:
- db:/var/lib/postgresql/data
- ./db/init.sql:/docker-entrypoint-initdb.d/create_tables.sql api:
container_name: ebapi
build:
context: ./energybot
depends_on:
- db
ports:
- 3001:3001
environment:
DATABASE_URL: postgres://postgres:postgres#db:5432/postgres
DB_HOST: db
DB_PORT: 5432
DB_USER: postgres
DB_PASSWORD: postgres
DB_NAME: postgres
links:
- db
volumes:
- "./energybot:/usr/api"
volumes: db:
driver: local
I am trying to build a NestJS app with Prisma and PostgreSQL. I want to use docker; however, I got an error when I sent the request to the backend.
Here is my docker file
FROM node:14 AS builder
WORKDIR /app
COPY package*.json ./
COPY prisma ./prisma/
RUN npm install
RUN npx prisma generate
COPY . .
RUN npm run build
FROM node:14
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package*.json ./
COPY --from=builder /app/dist ./dist
EXPOSE 3000
CMD [ "npm", "run", "start:prod" ]
Here is my docker-compose.yml
version: '3.8'
services:
nest-api:
container_name: nest-api
build:
context: .
dockerfile: Dockerfile
ports:
- 3000:3000
depends_on:
- postgres
env_file:
- .env
postgres:
image: postgres:13
container_name: postgres
restart: always
ports:
- 5432:5432
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: task-management
env_file:
- .env
Here is my schema.prisma
// This is your Prisma schema file,
// learn more about it in the docs: https://pris.ly/d/prisma-schema
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
//url = "postgresql://postgres:postgres#localhost:5432/task-management?schema=public"
}
model Task {
id Int #id #default(autoincrement())
title String
description String
status TaskStatus #default(OPEN)
}
enum TaskStatus {
OPEN
IN_PRO
DOooNE
}
Here is the .env
# Environment variables declared in this file are automatically made available to Prisma.
# See the documentation for more detail: https://pris.ly/d/prisma-schema#using-environment-variables
# Prisma supports the native connection string format for PostgreSQL, MySQL, SQLite, SQL Server and MongoDB (Preview).
# See the documentation for all the connection string options: https://pris.ly/d/connection-strings
DATABASE_URL=postgresql://postgres:postgres#postgres:5432/task-management?schema=public
After I run the command:docker-compose up, everything is fine. However, if I send the request to the app, I get the following error:
nest-api | [Nest] 19 - 11/02/2021, 5:52:43 AM ERROR [ExceptionsHandler]
nest-api | Invalid `this.prisma.task.create()` invocation in
nest-api | /dist/tasks/tasks.service.js:29:33
nest-api |
nest-api | 26 return found;
nest-api | 27 }
nest-api | 28 async creatTask(data) {
nest-api | → 29 return this.prisma.task.create(
nest-api | The table `public.Task` does not exist in the current database.
nest-api | Error:
nest-api | Invalid `this.prisma.task.create()` invocation in
nest-api | /dist/tasks/tasks.service.js:29:33
nest-api |
nest-api | 26 return found;
nest-api | 27 }
nest-api | 28 async creatTask(data) {
nest-api | → 29 return this.prisma.task.create(
nest-api | The table `public.Task` does not exist in the current database.
nest-api | at cb (/node_modules/#prisma/client/runtime/index.js:38537:17)
nest-api | at async /node_modules/#nestjs/core/router/router-execution-context.js:46:28
nest-api | at async /node_modules/#nestjs/core/router/router-proxy.js:9:17
What changes should I make in the docker file to solve the problem?
Kubernetes newbie here.
I have some strange Skaffold/Kubernetes behavior. I'm working in Google Cloud but I've changed to the local environment just for test and it's the same. So probably it's me how's doing something wrong. The problem is that though I see Skaffold syncing changes these changes aren't reflected. All the files inside the pods are the old ones.
Skaffold.yaml:
apiVersion: skaffold/v2alpha3
kind: Config
deploy:
kubectl:
manifests:
- ./infra/k8s/*
build:
# local:
# push: false
googleCloudBuild:
projectId: ts-maps-286111
artifacts:
- image: us.gcr.io/ts-maps-286111/auth
context: auth
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/client
context: client
docker:
dockerfile: Dockerfile
sync:
manual:
- src: '**/*.js'
dest: .
- image: us.gcr.io/ts-maps-286111/tickets
context: tickets
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/orders
context: orders
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/expiration
context: expiration
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
When a file inside one of the directories is changed I see following logs:
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: .next"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=info msg="files modified: [expiration\\src\\index.ts]"
Syncing 1 files for us.gcr.io/ts-maps-286111/expiration:2aae7ff-dirty#sha256:2e31caedf3d9b2bcb2ea5693f8e22478a9d6caa21d1a478df5ff8ebcf562573e
time="2020-09-05T01:24:07+03:00" level=info msg="Copying files: map[expiration\\src\\index.ts:[/app/src/index.ts]] to us.gcr.io/ts-maps-286111/expiration:2aae7ff-dirty#sha256:2e31caedf3d9b2bcb2ea5693f8e22478a9d6caa21d1a478df5ff8ebcf562573e"
time="2020-09-05T01:24:07+03:00" level=debug msg="getting client config for kubeContext: ``"
time="2020-09-05T01:24:07+03:00" level=debug msg="Running command: [kubectl --context gke_ts-maps-286111_europe-west3-a_ticketing-dev exec expiration-depl-5cb997d597-p49lv --namespace default -c expiration -i -- tar xmf - -C / --no-same-owner]"
time="2020-09-05T01:24:09+03:00" level=debug msg="Command output: [], stderr: tar: removing leading '/' from member names\n"
Watching for changes...
time="2020-09-05T01:24:11+03:00" level=info msg="Streaming logs from pod: expiration-depl-5cb997d597-p49lv container: expiration"
time="2020-09-05T01:24:11+03:00" level=debug msg="Running command: [kubectl --context gke_ts-maps-286111_europe-west3-a_ticketing-dev logs --since=114s -f expiration-depl-5cb997d597-p49lv -c expiration --namespace default]"
[expiration]
[expiration] > expiration#1.0.0 start /app
[expiration] > ts-node-dev --watch src src/index.ts
[expiration]
[expiration] ts-node-dev ver. 1.0.0-pre.62 (using ts-node ver. 8.10.2, typescript ver. 3.9.7)
[expiration] starting expiration!kdd
[expiration] Connected to NATS!
NodeJS server inside the pod restarts. Sometimes I see this line, sometimes not, the result overall is the same
[expiration] [INFO] 22:23:42 Restarting: src/index.ts has been modified
But there are no changes made. If I cat the changed file inside a pod it's the old version, if I delete a pod it starts again with an old version.
My folder structure:
+---auth
| \---src
| +---models
| +---routes
| | \---__test__
| +---services
| \---test
+---client
| +---.next
| | +---cache
| | | \---next-babel-loader
| | +---server
| | | \---pages
| | | +---auth
| | | \---next
| | | \---dist
| | | \---pages
| | \---static
| | +---chunks
| | | \---pages
| | | +---auth
| | | \---next
| | | \---dist
| | | \---pages
| | +---development
| | \---webpack
| | \---pages
| | \---auth
| +---api
| +---components
| +---hooks
| \---pages
| \---auth
+---common
| +---build
| | +---errors
| | +---events
| | | \---types
| | \---middlewares
| \---src
| +---errors
| +---events
| | \---types
| \---middlewares
+---config
+---expiration
| \---src
| +---events
| | +---listeners
| | \---publishers
| +---queue
| \---__mocks__
+---infra
| \---k8s
+---orders
| \---src
| +---events
| | +---listeners
| | | \---__test__
| | \---publishers
| +---models
| +---routes
| | \---__test__
| +---test
| \---__mocks__
+---payment
\---tickets
\---src
+---events
| +---listeners
| | \---__test__
| \---publishers
+---models
| \---__test__
+---routes
| \---__test__
+---test
\---__mocks__
Would be grateful for any help!
What worked for me was using --poll flag with ts-node-dev.
My script looks like this
"start" : "ts-node-dev --respawn --poll --inspect --exit-child src/index.ts
For your start script try to add --poll. For example if you start script is "start" : "nodemon src/index.js", change it to "nodemon --poll src/index.js"
It looks like no major error in the logs, my guess is that the files are actually being put in another directory. You can try running in the container.
find / -name "index.ts"
to see if that didn't land somewhere else.
Another thing to check is the WORKDIR value in your container(s). Check what directory you land on when you run:
kubectl exec -it -c <container-name> <pod-name>
✌️
This is the error I got from when I type the command docker-compose up. This is my node.js application and I'm using MongoDB. My goal is to containerize this application and publish on docker hub.
1. Creating mongo ... done
2. Creating app ... done
3. Attaching to mongo, app
4. mongo | 2020-07-13T05:02:33.356+0000 I CONTROL [main] Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'
5. mongo | 2020-07-13T05:02:33.360+0000 W ASIO [main] No TransportLayer configured during NetworkInterface startup
6. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] MongoDB starting : pid=1 port=27017 dbpath=/data/db 64-bit host=03ce29ac0ecc
7. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] db version v4.2.8
8. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] git version: 43d25964249164d76d5e04dd6cf38f6111e21f5f
9. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] OpenSSL version: OpenSSL 1.1.1 11 Sep 2018
10. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] allocator: tcmalloc
11. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] modules: none
12. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] build environment:
13. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] distmod: ubuntu1804
14. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] distarch: x86_64
15. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] target_arch: x86_64
16. mongo | 2020-07-13T05:02:33.360+0000 I CONTROL [initandlisten] options: { net: { bindIp: "*" } }
17. mongo | 2020-07-13T05:02:33.361+0000 I STORAGE [initandlisten]
18. mongo | 2020-07-13T05:02:33.361+0000 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine
19. mongo | 2020-07-13T05:02:33.361+0000 I STORAGE [initandlisten] ** See http://dochub.mongodb.org/core/prodnotes-filesystem
20. mongo | 2020-07-13T05:02:33.361+0000 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=471M,cache_overflow=(file_max=0M),session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress],
21. mongo | 2020-07-13T05:02:33.863+0000 I STORAGE [initandlisten] WiredTiger message [1594616553:863974][1:0x7fa8ae84db00], txn-recover: Set global recovery timestamp: (0, 0)
22. mongo | 2020-07-13T05:02:33.885+0000 I RECOVERY [initandlisten] WiredTiger recoveryTimestamp. Ts: Timestamp(0, 0)
23. mongo | 2020-07-13T05:02:33.902+0000 I STORAGE [initandlisten] Timestamp monitor starting
24. mongo | 2020-07-13T05:02:33.910+0000 I CONTROL [initandlisten]
25. mongo | 2020-07-13T05:02:33.910+0000 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
26. mongo | 2020-07-13T05:02:33.910+0000 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
27. mongo | 2020-07-13T05:02:33.910+0000 I CONTROL [initandlisten]
28. mongo | 2020-07-13T05:02:33.911+0000 I STORAGE [initandlisten] createCollection: admin.system.version with provided UUID: 1c3e3ef6-c303-4517-9613-82f840f58488 and options: { uuid: UUID("1c3e3ef6-c303-4517-9613-82f840f58488") }
29. mongo | 2020-07-13T05:02:33.935+0000 I INDEX [initandlisten] index build: done building index _id_ on ns admin.system.version
30. mongo | 2020-07-13T05:02:33.935+0000 I SHARDING [initandlisten] Marking collection admin.system.version as collection version: <unsharded>
31. mongo | 2020-07-13T05:02:33.935+0000 I COMMAND [initandlisten] setting featureCompatibilityVersion to 4.2
32. mongo | 2020-07-13T05:02:33.935+0000 I SHARDING [initandlisten] Marking collection local.system.replset as collection version: <unsharded>
33. mongo | 2020-07-13T05:02:33.935+0000 I STORAGE [initandlisten] Flow Control is enabled on this deployment.
34. mongo | 2020-07-13T05:02:33.935+0000 I SHARDING [initandlisten] Marking collection admin.system.roles as collection version: <unsharded>
35. mongo | 2020-07-13T05:02:33.935+0000 I STORAGE [initandlisten] createCollection: local.startup_log with generated UUID: 03ec4702-b65a-4f88-8080-09ab0b26a7a4 and options: { capped: true, size: 10485760 }
36. mongo | 2020-07-13T05:02:33.954+0000 I INDEX [initandlisten] index build: done building index _id_ on ns local.startup_log
37. mongo | 2020-07-13T05:02:33.955+0000 I SHARDING [initandlisten] Marking collection local.startup_log as collection version: <unsharded>
38. mongo | 2020-07-13T05:02:33.955+0000 I FTDC [initandlisten] Initializing full-time diagnostic data capture with directory '/data/db/diagnostic.data'
39. mongo | 2020-07-13T05:02:33.957+0000 I SHARDING [LogicalSessionCacheReap] Marking collection config.system.sessions as collection version: <unsharded>
40. mongo | 2020-07-13T05:02:33.957+0000 I NETWORK [listener] Listening on /tmp/mongodb-27017.sock
41. mongo | 2020-07-13T05:02:33.957+0000 I NETWORK [listener] Listening on 0.0.0.0
42. mongo | 2020-07-13T05:02:33.957+0000 I NETWORK [listener] waiting for connections on port 27017
43. mongo | 2020-07-13T05:02:33.964+0000 I CONTROL [LogicalSessionCacheReap] Sessions collection is not set up; waiting until next sessions reap interval: config.system.sessions does not exist
44. mongo | 2020-07-13T05:02:33.964+0000 I STORAGE [LogicalSessionCacheRefresh] createCollection: config.system.sessions with provided UUID: 4c715ea5-9f5f-41b3-9101-fd44ce5455a4 and options: { uuid: UUID("4c715ea5-9f5f-41b3-9101-fd44ce5455a4") }
45. mongo | 2020-07-13T05:02:33.980+0000 I INDEX [LogicalSessionCacheRefresh] index build: done building index _id_ on ns config.system.sessions
46. mongo | 2020-07-13T05:02:33.997+0000 I INDEX [LogicalSessionCacheRefresh] index build: starting on config.system.sessions properties: { v: 2, key: { lastUse: 1 }, name: "lsidTTLIndex", ns: "config.system.sessions", expireAfterSeconds: 1800 } using method: Hybrid
47. mongo | 2020-07-13T05:02:33.997+0000 I INDEX [LogicalSessionCacheRefresh] build may temporarily use up to 200 megabytes of RAM
48. mongo | 2020-07-13T05:02:33.997+0000 I INDEX [LogicalSessionCacheRefresh] index build: collection scan done. scanned 0 total records in 0 seconds
49. mongo | 2020-07-13T05:02:33.997+0000 I INDEX [LogicalSessionCacheRefresh] index build: inserted 0 keys from external sorter into index in 0 seconds
50. mongo | 2020-07-13T05:02:34.000+0000 I SHARDING [ftdc] Marking collection local.oplog.rs as collection version: <unsharded>
51. mongo | 2020-07-13T05:02:34.005+0000 I INDEX [LogicalSessionCacheRefresh] index build: done building index lsidTTLIndex on ns config.system.sessions
52. app |
53. app | > main-application#1.0.0 start /usr/src/app
54. app | > concurrently "npm run server" "npm run client"
55. app |
56. app | [1]
57. app | [1] > main-application#1.0.0 client /usr/src/app
58. app | [1] > npm start --prefix view
59. app | [1]
60. app | [0]
61. app | [0] > main-application#1.0.0 server /usr/src/app
62. app | [0] > nodemon mainserver.js
63. app | [0]
64. app | [0] [nodemon] 2.0.4
65. app | [0] [nodemon] to restart at any time, enter `rs`
66. app | [0] [nodemon] watching path(s): *.*
67. app | [0] [nodemon] watching extensions: js,mjs,json
68. app | [0] [nodemon] starting `node mainserver.js`
69. app | [1]
70. app | [1] > main#0.1.0 start /usr/src/app/view
71. app | [1] > react-scripts start
72. app | [1]
73. app | [1] sh: 1: react-scripts: not found
74. app | [1] npm ERR! code ELIFECYCLE
75. app | [1] npm ERR! syscall spawn
76. app | [1] npm ERR! file sh
77. app | [1] npm ERR! errno ENOENT
78. app | [1] npm ERR! main#0.1.0 start: `react-scripts start`
79. app | [1] npm ERR! spawn ENOENT
80. app | [1] npm ERR!
81. app | [1] npm ERR! Failed at the main#0.1.0 start script.
82. app | [1] npm ERR! This is probably not a problem with npm. There is likely additional logging output above.
83. app | [1] npm WARN Local package.json exists, but node_modules missing, did you mean to install?
84. app | [1]
85. app | [1] npm ERR! A complete log of this run can be found in:
86. app | [1] npm ERR! /root/.npm/_logs/2020-07-13T05_02_35_310Z-debug.log
87. app | [1] npm ERR! code ELIFECYCLE
88. app | [1] npm ERR! errno 1
89. app | [1] npm ERR! main-application#1.0.0 client: `npm start --prefix view`
90. app | [1] npm ERR! Exit status 1
91. app | [1] npm ERR!
92. app | npm ERR! Failed at the main-application#1.0.0 client script.
93. app | [1] npm ERR! This is probably not a problem with npm. There is likely additional logging output above.
94. app | [1]
95. app | [1] npm ERR! A complete log of this run can be found in:
96. app | [1] npm ERR! /root/.npm/_logs/2020-07-13T05_02_35_348Z-debug.log
97. app | [1] npm run client exited with code 1
98. app | [0] (node:98) DeprecationWarning: current Server Discovery and Monitoring engine is deprecated, and will be removed in a future version. To use the new Server Discover and Monitoring engine, pass option { useUnifiedTopology: true } to the MongoClient constructor.
99. app | [0] Server is running on port: 5000
100. mongo | 2020-07-13T05:02:36.010+0000 I NETWORK [listener] connection accepted from 172.19.0.3:45254 #1 (1 connection now open)
101. mongo | 2020-07-13T05:02:36.015+0000 I NETWORK [conn1] received client metadata from 172.19.0.3:45254 conn1: { driver: { name: "nodejs", version: "3.5.9" }, os: { type: "Linux", name: "linux", architecture: "x64", version: "4.19.76-linuxkit" }, platform: "'Node.js v10.21.0, LE (legacy)" }
102. app | [0] MongoDB Connected
Dockerfile
FROM node:10
WORKDIR /usr/src/app
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3000
CMD ["npm", "start"]
docker-compose.yml
version: '3'
services:
app:
container_name: app
restart: always
build: .
ports:
- '80:3000'
links:
- mongo
mongo:
container_name: mongo
image: mongo
ports:
- '27017:27017'
.dockerignore
node_modules
npm-debug.log
mainserver.js
var express = require('express')
var cors = require('cors')
var bodyParser = require('body-parser')
var app = express()
const mongoose = require('mongoose')
var port = process.env.PORT || 5000
app.use(bodyParser.json())
app.use(cors())
app.use(
bodyParser.urlencoded({
extended: false
})
)
//copy and paste below into mongodb
const mongoURI = 'mongodb://mongo:27017/MainData'
mongoose
.connect(
mongoURI,
{ useNewUrlParser: true }
)
.then(() => console.log('MongoDB Connected'))
.catch(err => console.log(err))
var Users = require('./controller/Users')
var Users2 = require('./controller/Users2')
app.use('/users', Users)
app.use('/users', Users2)
app.listen(port, function() {
console.log('Server is running on port: ' + port)
})
package.json
{
"name": "main-application",
"version": "1.0.0",
"description": "",
"scripts": {
"server": "nodemon mainserver.js",
"client": "npm start --prefix view",
"start": "concurrently \"npm run server\" \"npm run client\""
},
"keywords": [
"nodejs",
"jwt",
"passport",
"express"
],
"author": "",
"license": "ISC",
"dependencies": {
"alert": "^4.1.1",
"bcrypt-nodejs": "0.0.3",
"bcryptjs": "^2.4.3",
"body-parser": "1.19.0",
"compare": "^2.0.0",
"concurrently": "^5.1.0",
"cors": "^2.8.4",
"dotenv": "^8.2.0",
"express": "^4.16.3",
"express-session": "^1.17.1",
"express-validator": "^6.6.0",
"generate-password": "^1.5.1",
"jsonwebtoken": "^8.5.1",
"latest-version": "^5.1.0",
"mongodb": "^3.1.6",
"mongoose": "^5.2.15",
"nodemailer": "^6.4.8",
"nodemon": "^2.0.3",
"truffle": "^5.1.10"
}
}
You have to add create-react-app globally in docker container, i.e, npm install -g create-react-app
FROM node:10
RUN npm install -g create-react-app
WORKDIR /usr/src/app
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3000
CMD ["npm", "start"]
I found the solution to this. On the root folder, type this npm i react-scripts.
I did find a few people with a slightly different setup but with the same issue. So I hope this doesn't feel like a duplicated question.
My setup is pretty simple and straight-forward. I have a container for my node app and a container for my Postgres database. When I run docker-compose up and I see the log both containers are up and running. The problem is my node app is not connecting to the database.
I can connect to the database using Postbird and it works as it should.
If I create a docker container only for the database and run the node app directly on my machine everything works fine. So it's not and issue with the DB or the app but with the setup.
Here's a few useful information:
Running a docker just for the DB (connects and works perfectly):
> vigna-backend#1.0.0 dev /Users/lucasbittar/Dropbox/Code/vigna/backend
> nodemon src/server.js
[nodemon] 2.0.2
[nodemon] to restart at any time, enter `rs`
[nodemon] watching dir(s): *.*
[nodemon] watching extensions: js,mjs,json
[nodemon] starting `node -r sucrase/register src/server.js`
Initializing database...
Connecting to DB -> vignadb | PORT: 5432
Executing (default): SELECT 1+1 AS result
Connection has been established successfully -> vignadb
Running a container for each using docker-compose:
Creating network "backend_default" with the default driver
Creating backend_db_1 ... done
Creating backend_app_1 ... done
Attaching to backend_db_1, backend_app_1
db_1 |
db_1 | PostgreSQL Database directory appears to contain a database; Skipping initialization
db_1 |
db_1 | 2020-07-24 13:23:32.875 UTC [1] LOG: starting PostgreSQL 12.1 (Debian 12.1-1.pgdg100+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 8.3.0-6) 8.3.0, 64-bit
db_1 | 2020-07-24 13:23:32.876 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432
db_1 | 2020-07-24 13:23:32.876 UTC [1] LOG: listening on IPv6 address "::", port 5432
db_1 | 2020-07-24 13:23:32.881 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"
db_1 | 2020-07-24 13:23:32.955 UTC [27] LOG: database system was shut down at 2020-07-23 13:21:09 UTC
db_1 | 2020-07-24 13:23:32.999 UTC [1] LOG: database system is ready to accept connections
app_1 |
app_1 | > vigna-backend#1.0.0 dev /usr/app
app_1 | > npx sequelize db:migrate && npx sequelize db:seed:all && nodemon src/server.js
app_1 |
app_1 |
app_1 | Sequelize CLI [Node: 14.5.0, CLI: 5.5.1, ORM: 5.21.3]
app_1 |
app_1 | Loaded configuration file "src/config/database.js".
app_1 |
app_1 | Sequelize CLI [Node: 14.5.0, CLI: 5.5.1, ORM: 5.21.3]
app_1 |
app_1 | Loaded configuration file "src/config/database.js".
app_1 | [nodemon] 2.0.2
app_1 | [nodemon] to restart at any time, enter `rs`
app_1 | [nodemon] watching dir(s): *.*
app_1 | [nodemon] watching extensions: js,mjs,json
app_1 | [nodemon] starting `node -r sucrase/register src/server.js`
app_1 | Initializing database...
app_1 | Connecting to DB -> vignadb | PORT: 5432
My database class:
class Database {
constructor() {
console.log('Initializing database...');
this.init();
}
async init() {
let retries = 5;
while (retries) {
console.log(`Connecting to DB -> ${databaseConfig.database} | PORT: ${databaseConfig.port}`);
const sequelize = new Sequelize(databaseConfig);
try {
await sequelize.authenticate();
console.log(`Connection has been established successfully -> ${databaseConfig.database}`);
models
.map(model => model.init(sequelize))
.map( model => model.associate && model.associate(sequelize.models));
break;
} catch (err) {
console.log(`Error: ${err.message}`);
retries -= 1;
console.log(`Retries left: ${retries}`);
// Wait 5 seconds before trying again
await new Promise(res => setTimeout(res, 5000));
}
}
}
}
Dockerfile:
FROM node:alpine
WORKDIR /usr/app
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3333
CMD ["npm", "start"]
docker-compose.yml:
version: "3"
services:
db:
image: postgres
restart: always
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: vignadb
volumes:
- ./pgdata:/var/lib/postgresql/data
ports:
- "5432:5432"
app:
build: .
depends_on:
- db
ports:
- "3333:3333"
volumes:
- .:/usr/app
command: npm run dev
package.json (scrips only):
"scripts": {
"dev-old": "nodemon src/server.js",
"dev": "npx sequelize db:migrate && npx sequelize db:seed:all && nodemon src/server.js",
"build": "sucrase ./src -d ./dist --transforms imports",
"start": "node dist/server.js"
},
.env:
# Database
DB_HOST=db
DB_USER=postgres
DB_PASS=postgres
DB_NAME=vignadb
DB_PORT=5432
database config:
require('dotenv/config');
module.exports = {
dialect: 'postgres',
host: process.env.DB_HOST,
username: process.env.DB_USER,
password: process.env.DB_PASS,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
define: {
timestamp: true,
underscored: true,
underscoredAll: true,
},
};
I know I'm messing up something I just don't know where.
Let me know if I can provide more information.
Thanks!
You should put your 2 containers in the same network https://docs.docker.com/compose/networking/
And call your db service inside your nodejs connexion string.
Something like: postgres://db:5432/vignadb