Docker cannot run angular container on different port that node express start - node.js

I'm trying to run angular app with ssr on docker.
My dockerfile is:
FROM node:10-alpine as build-stage
ENV PROD true
WORKDIR /app
COPY ./package.json ./package-lock.json /app/
RUN npm install
COPY . /app
RUN npm run build:ssr
# stage 2
FROM node:10-alpine
WORKDIR /app
# Copy dependency definitions
COPY --from=build-stage /app/package.json /app
# Get all the code needed to run the app
COPY --from=build-stage /app/dist /app/dist
ADD ./build.js /app
EXPOSE 4200
CMD ["/bin/sh", "-c", "node build.js && npm run serve:ssr"]
My docker-compose file:
version: "3.8"
services:
frontend:
build:
context: ../../frontend/
dockerfile: Dockerfile
container_name: "frontend"
ports:
- 4500:4200
networks:
- external-network
networks:
external-network:
external: true
Ok now when im trying get into localhost:4500 im getting error:
frontend |
frontend | > frontend#0.0.3 serve:ssr /app
frontend | > node --max_old_space_size=4096 dist/apps/frontend/server/server
frontend |
frontend | Node Express server listening on http://localhost:4200
frontend | (node:26) [DEP0005] DeprecationWarning: Buffer() is deprecated due to security and usability issues. Please use the Buffer.alloc(), Buffer.allocUnsafe(), or Buffer.from() methods instead.
frontend | ERROR Failed to load the config file
frontend | ERROR { Error: Uncaught (in promise): Failed to load the config file
frontend | at resolvePromise (/app/dist/apps/frontend/server/server.js:1028:31)
frontend | at resolvePromise (/app/dist/apps/frontend/server/server.js:985:17)
frontend | at /app/dist/apps/frontend/server/server.js:1089:17
frontend | at ZoneDelegate.invokeTask (/app/dist/apps/frontend/server/server.js:599:31)
frontend | at Object.onInvokeTask (/app/dist/apps/frontend/server/server.js:192931:33)
frontend | at ZoneDelegate.invokeTask (/app/dist/apps/frontend/server/server.js:598:60)
frontend | at Zone.runTask (/app/dist/apps/frontend/server/server.js:371:47)
frontend | at drainMicroTaskQueue (/app/dist/apps/frontend/server/server.js:777:35)
frontend | at ZoneTask.invokeTask (/app/dist/apps/frontend/server/server.js:678:21)
frontend | at ZoneTask.invoke (/app/dist/apps/frontend/server/server.js:663:48)
frontend | rejection: 'Failed to load the config file',
frontend | promise:
frontend | ZoneAwarePromise [Promise] {
frontend | __zone_symbol__state: 0,
frontend | __zone_symbol__value: 'Failed to load the config file' },
frontend | zone:
frontend | Zone {
frontend | _parent:
frontend | Zone {
frontend | _parent: null,
frontend | _name: '<root>',
frontend | _properties: {},
frontend | _zoneDelegate: [ZoneDelegate] },
frontend | _name: 'angular',
frontend | _properties: { isAngularZone: true },
frontend | _zoneDelegate:
frontend | ZoneDelegate {
frontend | _taskCounts: [Object],
frontend | zone: [Circular],
frontend | _parentDelegate: [ZoneDelegate],
frontend | _forkZS: null,
frontend | _forkDlgt: null,
frontend | _forkCurrZone: [Zone],
frontend | _interceptZS: null,
frontend | _interceptDlgt: null,
frontend | _interceptCurrZone: [Zone],
frontend | _invokeZS: [Object],
frontend | _invokeDlgt: [ZoneDelegate],
frontend | _invokeCurrZone: [Circular],
frontend | _handleErrorZS: [Object],
frontend | _handleErrorDlgt: [ZoneDelegate],
frontend | _handleErrorCurrZone: [Circular],
frontend | _scheduleTaskZS: [Object],
frontend | _scheduleTaskDlgt: [ZoneDelegate],
frontend | _scheduleTaskCurrZone: [Circular],
frontend | _invokeTaskZS: [Object],
frontend | _invokeTaskDlgt: [ZoneDelegate],
frontend | _invokeTaskCurrZone: [Circular],
frontend | _cancelTaskZS: [Object],
frontend | _cancelTaskDlgt: [ZoneDelegate],
frontend | _cancelTaskCurrZone: [Circular],
frontend | _hasTaskZS: [Object],
frontend | _hasTaskDlgt: [ZoneDelegate],
frontend | _hasTaskDlgtOwner: [Circular],
frontend | _hasTaskCurrZone: [Circular] } },
frontend | task:
frontend | ZoneTask {
frontend | _zone:
frontend | Zone {
frontend | _parent: [Zone],
frontend | _name: 'angular',
frontend | _properties: [Object],
frontend | _zoneDelegate: [ZoneDelegate] },
frontend | runCount: 0,
frontend | _zoneDelegates: null,
frontend | _state: 'notScheduled',
frontend | type: 'microTask',
frontend | source: 'Promise.then',
frontend | data:
frontend | ZoneAwarePromise [Promise] {
frontend | __zone_symbol__state: 0,
frontend | __zone_symbol__value: 'Failed to load the config file' },
frontend | scheduleFn: undefined,
frontend | cancelFn: undefined,
frontend | callback: [Function],
frontend | invoke: [Function] } }
frontend | Failed to load the config file
But why i can't run it on another port?
When i run my docker-compose like this:
version: "3.8"
services:
frontend:
build:
context: ../../frontend/
dockerfile: Dockerfile
container_name: "frontend"
ports:
- 4200:4200
networks:
- external-network
networks:
external-network:
external: true
Everything is running perfect.
Why i can only run app on default node express port?

Related

Differences in code between local project and Dockerized project break the app

I'm trying to dockerize my current pet project in which I use a NodeJS (ExpressJS) as a backend, React as a frontend and PostgreSQL as a database. On both backend and frontend I use TypeScript instead of JavaScript. I'm also using a Prisma as ORM for my database. I decided to have a standard three container's architecture, one for backend, one for database and one for frontend app. My Dockerfile's are as follows:
Frontend's Dockerfile
FROM node:alpine
WORKDIR /usr/src/frontend
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3000
CMD ["npm", "run", "start"]
Backend's Dockerfile
FROM node:lts
WORKDIR /usr/src/backend
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 8000
RUN npx prisma generate
CMD ["npm", "run", "dev"]
there's also a .dockerignore file in the backend folder:
node_modules/
and my docker-compose.yml looks like this:
version: '3.9'
services:
db:
image: 'postgres'
ports:
- '5432:5432'
environment:
POSTGRES_USER: 'postgres'
POSTGRES_PASSWORD: 'postgres'
POSTGRES_DB: 'hucuplant'
server:
build:
context: ./backend_express
ports:
- "8000:8000"
environment:
DATABASE_URL: 'postgresql://postgres:postgres#localhost:5432/hucuplant?schema=public'
client:
build:
context: ./frontend
ports:
- "3000:3000"
After doing a docker-compose up --build everything starts well but when I try to register a new user on my site then I get the following error:
Error:
hucuplant-server-1 | Invalid `prisma.user.findUnique()` invocation in
hucuplant-server-1 | /usr/src/backend/src/routes/Auth.ts:44:57
hucuplant-server-1 |
hucuplant-server-1 | 41 auth.post("/register", async (req: Request, res: Response) => {
hucuplant-server-1 | 42 const { email, username, password } = req.body;
hucuplant-server-1 | 43
hucuplant-server-1 | → 44 const usernameResult: User | null = await prisma.user.findUnique({
hucuplant-server-1 | where: {
hucuplant-server-1 | ? username?: String,
hucuplant-server-1 | ? id?: Int,
hucuplant-server-1 | ? email?: String
hucuplant-server-1 | }
hucuplant-server-1 | })
However, the existing code in my Auth.ts file on the line 44 looks like this:
auth.post("/register", async (req: Request, res: Response) => {
const { email, username, password } = req.body;
const usernameResult: User | null = await prisma.user.findUnique({
where: {
username: username,
},
});
When I run my project locally everything works just fine but when I try to run the containerized app then those things break and differ quite much. What is causing that? How do I fix that?

Problem about dockerizing a NestJS app with Prisma and PostgreSQL

I am trying to build a NestJS app with Prisma and PostgreSQL. I want to use docker; however, I got an error when I sent the request to the backend.
Here is my docker file
FROM node:14 AS builder
WORKDIR /app
COPY package*.json ./
COPY prisma ./prisma/
RUN npm install
RUN npx prisma generate
COPY . .
RUN npm run build
FROM node:14
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package*.json ./
COPY --from=builder /app/dist ./dist
EXPOSE 3000
CMD [ "npm", "run", "start:prod" ]
Here is my docker-compose.yml
version: '3.8'
services:
nest-api:
container_name: nest-api
build:
context: .
dockerfile: Dockerfile
ports:
- 3000:3000
depends_on:
- postgres
env_file:
- .env
postgres:
image: postgres:13
container_name: postgres
restart: always
ports:
- 5432:5432
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
POSTGRES_DB: task-management
env_file:
- .env
Here is my schema.prisma
// This is your Prisma schema file,
// learn more about it in the docs: https://pris.ly/d/prisma-schema
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("DATABASE_URL")
//url = "postgresql://postgres:postgres#localhost:5432/task-management?schema=public"
}
model Task {
id Int #id #default(autoincrement())
title String
description String
status TaskStatus #default(OPEN)
}
enum TaskStatus {
OPEN
IN_PRO
DOooNE
}
Here is the .env
# Environment variables declared in this file are automatically made available to Prisma.
# See the documentation for more detail: https://pris.ly/d/prisma-schema#using-environment-variables
# Prisma supports the native connection string format for PostgreSQL, MySQL, SQLite, SQL Server and MongoDB (Preview).
# See the documentation for all the connection string options: https://pris.ly/d/connection-strings
DATABASE_URL=postgresql://postgres:postgres#postgres:5432/task-management?schema=public
After I run the command:docker-compose up, everything is fine. However, if I send the request to the app, I get the following error:
nest-api | [Nest] 19 - 11/02/2021, 5:52:43 AM ERROR [ExceptionsHandler]
nest-api | Invalid `this.prisma.task.create()` invocation in
nest-api | /dist/tasks/tasks.service.js:29:33
nest-api |
nest-api | 26 return found;
nest-api | 27 }
nest-api | 28 async creatTask(data) {
nest-api | → 29 return this.prisma.task.create(
nest-api | The table `public.Task` does not exist in the current database.
nest-api | Error:
nest-api | Invalid `this.prisma.task.create()` invocation in
nest-api | /dist/tasks/tasks.service.js:29:33
nest-api |
nest-api | 26 return found;
nest-api | 27 }
nest-api | 28 async creatTask(data) {
nest-api | → 29 return this.prisma.task.create(
nest-api | The table `public.Task` does not exist in the current database.
nest-api | at cb (/node_modules/#prisma/client/runtime/index.js:38537:17)
nest-api | at async /node_modules/#nestjs/core/router/router-execution-context.js:46:28
nest-api | at async /node_modules/#nestjs/core/router/router-proxy.js:9:17
What changes should I make in the docker file to solve the problem?

Can't authenticate with mongoDB from docker-compose service

What I'm trying to do
I'm trying to set up a docker-compose definition, where I have a mongoDB container, and a nodeJS container that connects to it.
version: "3.9"
services:
events-db:
image: mongo
volumes:
- db-volume:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: $SANDBOX_DB_USER
MONGO_INITDB_ROOT_PASSWORD: $SANDBOX_DB_PASS
MONGO_INITDB_DATABASE: sandboxdb
app:
image: node:15.12.0
user: node
working_dir: /home/node/app
volumes:
- ./:/home/node/app:ro
environment:
MDB_CONNECTION: mongodb://$SANDBOX_DB_USER:$SANDBOX_DB_PASS#events-db:27017/sandboxdb
command: node myapp
depends_on:
- events-db
volumes:
db-volume:
Along with a .env file that declares the credentials (planning to use proper env variables when I deploy this to a production environment):
SANDBOX_DB_USER=myuser
SANDBOX_DB_PASS=myp4ss
Finally, my nodejs script, myapp.js is simply trying to connect, grab a reference to a collection, and insert a document:
require('dotenv').config()
const { MongoClient } = require('mongodb')
async function main () {
console.log('Connecting')
const client = new MongoClient(process.env.MDB_CONNECTION, {
connectTimeoutMS: 10000,
useUnifiedTopology: true,
})
await client.connect()
const db = client.db()
const events = db.collection('events')
console.log('Inserting an event')
await events.insertOne({
type: 'foo',
timestamp: new Date(),
})
console.log('Done.')
process.exit(0)
}
if (require.main === module) {
main()
}
Result
When I run docker-compose config I see the following output, so I would expect it to work:
$ docker-compose config
services:
app:
command: node myapp
depends_on:
events-db:
condition: service_started
environment:
MDB_CONNECTION: mongodb://myuser:myp4ss#events-db:27017/sandboxdb
image: node:15.12.0
user: node
volumes:
- C:\workspace\dcsandbox:/home/node/app:ro
working_dir: /home/node/app
events-db:
environment:
MONGO_INITDB_DATABASE: sandboxdb
MONGO_INITDB_ROOT_PASSWORD: myp4ss
MONGO_INITDB_ROOT_USERNAME: myuser
image: mongo
volumes:
- db-volume:/data/db:rw
version: '3.9'
volumes:
db-volume: {}
However, when I run docker-compose up I see that my node container is unable to connect to the mongoDB to insert an event:
events-db_1 | {"t":{"$date":"2021-04-07T13:57:36.793+00:00"},"s":"I", "c":"NETWORK", "id":23016, "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}
app_1 | Connecting
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.811+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"172.27.0.3:34164","connectionId":1,"connectionCount":1}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.816+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn1","msg":"client metadata","attr":{"remote":"172.27.0.3:34164","client":"conn1","doc":{"driver":{"name":"nodejs","version":"3.6.6"},"os":{"type":"Linux","name":"linux","architecture":"x64","version":"4.19.128-microsoft-standard"},"platform":"'Node.js v15.12.0, LE (unified)"}}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.820+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"172.27.0.3:34166","connectionId":2,"connectionCount":2}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.822+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn2","msg":"client metadata","attr":{"remote":"172.27.0.3:34166","client":"conn2","doc":{"driver":{"name":"nodejs","version":"3.6.6"},"os":{"type":"Linux","name":"linux","architecture":"x64","version":"4.19.128-microsoft-standard"},"platform":"'Node.js v15.12.0, LE (unified)"}}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.822+00:00"},"s":"I", "c":"ACCESS", "id":20251, "ctx":"conn2","msg":"Supported SASL mechanisms requested for unknown user","attr":{"user":"myuser#sandboxdb"}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.823+00:00"},"s":"I", "c":"ACCESS", "id":20249, "ctx":"conn2","msg":"Authentication failed","attr":{"mechanism":"SCRAM-SHA-256","principalName":"myuser","authenticationDatabase":"sandboxdb","client":"172.27.0.3:34166","result":"UserNotFound: Could not find user \"myuser\" for db \"sandboxdb\""}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.824+00:00"},"s":"I", "c":"ACCESS", "id":20249, "ctx":"conn2","msg":"Authentication failed","attr":{"mechanism":"SCRAM-SHA-1","principalName":"myuser","authenticationDatabase":"sandboxdb","client":"172.27.0.3:34166","result":"UserNotFound: Could not find user \"myuser\" for db \"sandboxdb\""}}
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.826+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn1","msg":"Connection ended","attr":{"remote":"172.27.0.3:34164","connectionId":1,"connectionCount":1}}
app_1 | /home/node/app/node_modules/mongodb/lib/cmap/connection.js:268
app_1 | callback(new MongoError(document));
app_1 | ^
app_1 |
app_1 | MongoError: Authentication failed.
app_1 | at MessageStream.messageHandler (/home/node/app/node_modules/mongodb/lib/cmap/connection.js:268:20)
app_1 | at MessageStream.emit (node:events:369:20)
app_1 | at processIncomingData (/home/node/app/node_modules/mongodb/lib/cmap/message_stream.js:144:12)
app_1 | at MessageStream._write (/home/node/app/node_modules/mongodb/lib/cmap/message_stream.js:42:5)
app_1 | at writeOrBuffer (node:internal/streams/writable:395:12)
app_1 | at MessageStream.Writable.write (node:internal/streams/writable:340:10)
app_1 | at Socket.ondata (node:internal/streams/readable:750:22)
app_1 | at Socket.emit (node:events:369:20)
app_1 | at addChunk (node:internal/streams/readable:313:12)
app_1 | at readableAddChunk (node:internal/streams/readable:288:9) {
app_1 | ok: 0,
app_1 | code: 18,
app_1 | codeName: 'AuthenticationFailed'
app_1 | }
events-db_1 | {"t":{"$date":"2021-04-07T13:57:38.832+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn2","msg":"Connection ended","attr":{"remote":"172.27.0.3:34166","connectionId":2,"connectionCount":0}}
dcsandbox_app_1 exited with code 1
I've put the full output at https://pastebin.com/uNyJ6tiy
and the example code at this repo: https://github.com/akatechis/example-docker-compose-mongo-node-auth
After some more digging, I managed to figure it out. The issue is that the MONGO_INITDB_ROOT_USERNAME and MONGO_INITDB_ROOT_PASSWORD variables simply set the root user's credentials, and the MONGO_INITDB_DATABASE simply sets the initial database for scripts in /docker-entrypoint-initdb.d.
By default, the root user is added to the admin database, so by removing the /sandboxdb part of the connection string, I was able to have my node app authenticate against the admin DB as the root user.
While this doesn't quite accomplish what I wanted initially (to create a separate, non-root user for my database, and use that to authenticate), I think this puts me on the right path to using an init script to set up the user accounts I want to have.

Skaffold syncs files but pod doesn't refresh

Kubernetes newbie here.
I have some strange Skaffold/Kubernetes behavior. I'm working in Google Cloud but I've changed to the local environment just for test and it's the same. So probably it's me how's doing something wrong. The problem is that though I see Skaffold syncing changes these changes aren't reflected. All the files inside the pods are the old ones.
Skaffold.yaml:
apiVersion: skaffold/v2alpha3
kind: Config
deploy:
kubectl:
manifests:
- ./infra/k8s/*
build:
# local:
# push: false
googleCloudBuild:
projectId: ts-maps-286111
artifacts:
- image: us.gcr.io/ts-maps-286111/auth
context: auth
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/client
context: client
docker:
dockerfile: Dockerfile
sync:
manual:
- src: '**/*.js'
dest: .
- image: us.gcr.io/ts-maps-286111/tickets
context: tickets
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/orders
context: orders
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
- image: us.gcr.io/ts-maps-286111/expiration
context: expiration
docker:
dockerfile: Dockerfile
sync:
manual:
- src: 'src/**/*.ts'
dest: .
When a file inside one of the directories is changed I see following logs:
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:06+03:00" level=debug msg="Change detected notify.Write: \"F:\\projects\\lrn_microservices\\complex\\expiration\\src\\index.ts\""
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: .next"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=debug msg="Found dependencies for dockerfile: [{package.json /app true} {. /app true}]"
time="2020-09-05T01:24:07+03:00" level=debug msg="Skipping excluded path: node_modules"
time="2020-09-05T01:24:07+03:00" level=info msg="files modified: [expiration\\src\\index.ts]"
Syncing 1 files for us.gcr.io/ts-maps-286111/expiration:2aae7ff-dirty#sha256:2e31caedf3d9b2bcb2ea5693f8e22478a9d6caa21d1a478df5ff8ebcf562573e
time="2020-09-05T01:24:07+03:00" level=info msg="Copying files: map[expiration\\src\\index.ts:[/app/src/index.ts]] to us.gcr.io/ts-maps-286111/expiration:2aae7ff-dirty#sha256:2e31caedf3d9b2bcb2ea5693f8e22478a9d6caa21d1a478df5ff8ebcf562573e"
time="2020-09-05T01:24:07+03:00" level=debug msg="getting client config for kubeContext: ``"
time="2020-09-05T01:24:07+03:00" level=debug msg="Running command: [kubectl --context gke_ts-maps-286111_europe-west3-a_ticketing-dev exec expiration-depl-5cb997d597-p49lv --namespace default -c expiration -i -- tar xmf - -C / --no-same-owner]"
time="2020-09-05T01:24:09+03:00" level=debug msg="Command output: [], stderr: tar: removing leading '/' from member names\n"
Watching for changes...
time="2020-09-05T01:24:11+03:00" level=info msg="Streaming logs from pod: expiration-depl-5cb997d597-p49lv container: expiration"
time="2020-09-05T01:24:11+03:00" level=debug msg="Running command: [kubectl --context gke_ts-maps-286111_europe-west3-a_ticketing-dev logs --since=114s -f expiration-depl-5cb997d597-p49lv -c expiration --namespace default]"
[expiration]
[expiration] > expiration#1.0.0 start /app
[expiration] > ts-node-dev --watch src src/index.ts
[expiration]
[expiration] ts-node-dev ver. 1.0.0-pre.62 (using ts-node ver. 8.10.2, typescript ver. 3.9.7)
[expiration] starting expiration!kdd
[expiration] Connected to NATS!
NodeJS server inside the pod restarts. Sometimes I see this line, sometimes not, the result overall is the same
[expiration] [INFO] 22:23:42 Restarting: src/index.ts has been modified
But there are no changes made. If I cat the changed file inside a pod it's the old version, if I delete a pod it starts again with an old version.
My folder structure:
+---auth
| \---src
| +---models
| +---routes
| | \---__test__
| +---services
| \---test
+---client
| +---.next
| | +---cache
| | | \---next-babel-loader
| | +---server
| | | \---pages
| | | +---auth
| | | \---next
| | | \---dist
| | | \---pages
| | \---static
| | +---chunks
| | | \---pages
| | | +---auth
| | | \---next
| | | \---dist
| | | \---pages
| | +---development
| | \---webpack
| | \---pages
| | \---auth
| +---api
| +---components
| +---hooks
| \---pages
| \---auth
+---common
| +---build
| | +---errors
| | +---events
| | | \---types
| | \---middlewares
| \---src
| +---errors
| +---events
| | \---types
| \---middlewares
+---config
+---expiration
| \---src
| +---events
| | +---listeners
| | \---publishers
| +---queue
| \---__mocks__
+---infra
| \---k8s
+---orders
| \---src
| +---events
| | +---listeners
| | | \---__test__
| | \---publishers
| +---models
| +---routes
| | \---__test__
| +---test
| \---__mocks__
+---payment
\---tickets
\---src
+---events
| +---listeners
| | \---__test__
| \---publishers
+---models
| \---__test__
+---routes
| \---__test__
+---test
\---__mocks__
Would be grateful for any help!
What worked for me was using --poll flag with ts-node-dev.
My script looks like this
"start" : "ts-node-dev --respawn --poll --inspect --exit-child src/index.ts
For your start script try to add --poll. For example if you start script is "start" : "nodemon src/index.js", change it to "nodemon --poll src/index.js"
It looks like no major error in the logs, my guess is that the files are actually being put in another directory. You can try running in the container.
find / -name "index.ts"
to see if that didn't land somewhere else.
Another thing to check is the WORKDIR value in your container(s). Check what directory you land on when you run:
kubectl exec -it -c <container-name> <pod-name>
✌️

docker-compose: nodejs container not communicating with postgres container

I did find a few people with a slightly different setup but with the same issue. So I hope this doesn't feel like a duplicated question.
My setup is pretty simple and straight-forward. I have a container for my node app and a container for my Postgres database. When I run docker-compose up and I see the log both containers are up and running. The problem is my node app is not connecting to the database.
I can connect to the database using Postbird and it works as it should.
If I create a docker container only for the database and run the node app directly on my machine everything works fine. So it's not and issue with the DB or the app but with the setup.
Here's a few useful information:
Running a docker just for the DB (connects and works perfectly):
> vigna-backend#1.0.0 dev /Users/lucasbittar/Dropbox/Code/vigna/backend
> nodemon src/server.js
[nodemon] 2.0.2
[nodemon] to restart at any time, enter `rs`
[nodemon] watching dir(s): *.*
[nodemon] watching extensions: js,mjs,json
[nodemon] starting `node -r sucrase/register src/server.js`
Initializing database...
Connecting to DB -> vignadb | PORT: 5432
Executing (default): SELECT 1+1 AS result
Connection has been established successfully -> vignadb
Running a container for each using docker-compose:
Creating network "backend_default" with the default driver
Creating backend_db_1 ... done
Creating backend_app_1 ... done
Attaching to backend_db_1, backend_app_1
db_1 |
db_1 | PostgreSQL Database directory appears to contain a database; Skipping initialization
db_1 |
db_1 | 2020-07-24 13:23:32.875 UTC [1] LOG: starting PostgreSQL 12.1 (Debian 12.1-1.pgdg100+1) on x86_64-pc-linux-gnu, compiled by gcc (Debian 8.3.0-6) 8.3.0, 64-bit
db_1 | 2020-07-24 13:23:32.876 UTC [1] LOG: listening on IPv4 address "0.0.0.0", port 5432
db_1 | 2020-07-24 13:23:32.876 UTC [1] LOG: listening on IPv6 address "::", port 5432
db_1 | 2020-07-24 13:23:32.881 UTC [1] LOG: listening on Unix socket "/var/run/postgresql/.s.PGSQL.5432"
db_1 | 2020-07-24 13:23:32.955 UTC [27] LOG: database system was shut down at 2020-07-23 13:21:09 UTC
db_1 | 2020-07-24 13:23:32.999 UTC [1] LOG: database system is ready to accept connections
app_1 |
app_1 | > vigna-backend#1.0.0 dev /usr/app
app_1 | > npx sequelize db:migrate && npx sequelize db:seed:all && nodemon src/server.js
app_1 |
app_1 |
app_1 | Sequelize CLI [Node: 14.5.0, CLI: 5.5.1, ORM: 5.21.3]
app_1 |
app_1 | Loaded configuration file "src/config/database.js".
app_1 |
app_1 | Sequelize CLI [Node: 14.5.0, CLI: 5.5.1, ORM: 5.21.3]
app_1 |
app_1 | Loaded configuration file "src/config/database.js".
app_1 | [nodemon] 2.0.2
app_1 | [nodemon] to restart at any time, enter `rs`
app_1 | [nodemon] watching dir(s): *.*
app_1 | [nodemon] watching extensions: js,mjs,json
app_1 | [nodemon] starting `node -r sucrase/register src/server.js`
app_1 | Initializing database...
app_1 | Connecting to DB -> vignadb | PORT: 5432
My database class:
class Database {
constructor() {
console.log('Initializing database...');
this.init();
}
async init() {
let retries = 5;
while (retries) {
console.log(`Connecting to DB -> ${databaseConfig.database} | PORT: ${databaseConfig.port}`);
const sequelize = new Sequelize(databaseConfig);
try {
await sequelize.authenticate();
console.log(`Connection has been established successfully -> ${databaseConfig.database}`);
models
.map(model => model.init(sequelize))
.map( model => model.associate && model.associate(sequelize.models));
break;
} catch (err) {
console.log(`Error: ${err.message}`);
retries -= 1;
console.log(`Retries left: ${retries}`);
// Wait 5 seconds before trying again
await new Promise(res => setTimeout(res, 5000));
}
}
}
}
Dockerfile:
FROM node:alpine
WORKDIR /usr/app
COPY package*.json ./
RUN npm install
COPY . .
EXPOSE 3333
CMD ["npm", "start"]
docker-compose.yml:
version: "3"
services:
db:
image: postgres
restart: always
environment:
POSTGRES_PASSWORD: postgres
POSTGRES_USER: postgres
POSTGRES_DB: vignadb
volumes:
- ./pgdata:/var/lib/postgresql/data
ports:
- "5432:5432"
app:
build: .
depends_on:
- db
ports:
- "3333:3333"
volumes:
- .:/usr/app
command: npm run dev
package.json (scrips only):
"scripts": {
"dev-old": "nodemon src/server.js",
"dev": "npx sequelize db:migrate && npx sequelize db:seed:all && nodemon src/server.js",
"build": "sucrase ./src -d ./dist --transforms imports",
"start": "node dist/server.js"
},
.env:
# Database
DB_HOST=db
DB_USER=postgres
DB_PASS=postgres
DB_NAME=vignadb
DB_PORT=5432
database config:
require('dotenv/config');
module.exports = {
dialect: 'postgres',
host: process.env.DB_HOST,
username: process.env.DB_USER,
password: process.env.DB_PASS,
database: process.env.DB_NAME,
port: process.env.DB_PORT,
define: {
timestamp: true,
underscored: true,
underscoredAll: true,
},
};
I know I'm messing up something I just don't know where.
Let me know if I can provide more information.
Thanks!
You should put your 2 containers in the same network https://docs.docker.com/compose/networking/
And call your db service inside your nodejs connexion string.
Something like: postgres://db:5432/vignadb

Resources