Configuring firewall settings for Docker Swarm on Digital Ocean - linux

How can I configure my digital ocean boxes to have the correct firewall settings?
I've followed the official guide for getting Digital Ocean and Docker containers working together.
I have 3 docker nodes that I can see when I docker-machine ls. I have created a master docker node and have joined the other docker nodes as workers. However, if I attempt to visit the url of the node, the connection hangs. This setup is working on local.
Here is my docker-compose that I am using for production.
version: "3"
services:
api:
image: "api"
command: rails server -b "0.0.0.0" -e production
depends_on:
- db
- redis
deploy:
replicas: 3
resources:
cpus: "0.1"
memory: 50M
restart_policy:
condition: on-failure
env_file:
- .env-prod
networks:
- apinet
ports:
- "3000:3000"
client:
image: "client"
depends_on:
- api
deploy:
restart_policy:
condition: on-failure
env_file:
- .env-prod
networks:
- apinet
- clientnet
ports:
- "4200:4200"
- "35730:35730"
db:
deploy:
placement:
constaints: [node.role == manager]
restart_policy:
condition: on-failure
env_file: .env-prod
image: mysql
ports:
- "3306:3306"
volumes:
- ~/.docker-volumes/app/mysql/data:/var/lib/mysql/data
redis:
deploy:
placement:
constaints: [node.role == manager]
restart_policy:
condition: on-failure
image: redis:alpine
ports:
- "6379:6379"
volumes:
- ~/.docker-volumes/app/redis/data:/var/lib/redis/data
nginx:
image: app_nginx
deploy:
restart_policy:
condition: on-failure
env_file: .env-prod
depends_on:
- client
- api
networks:
- apinet
- clientnet
ports:
- "80:80"
networks:
apinet:
driver: overlay
clientnet:
driver: overlay
I'm pretty confident that the problem is with the firewall settings. I'm not sure however, the ports that need to be open though. I've consulted this guide.

Related

How to docker compose to run azure container instance

I have a docker-compose file with multiple services like prometheus, grafana, spring boot ms and elk. I am able to start containers on my local machine. But after modifying the file to deploy to azure container instance its failing with errors like:
service "prometheus" refers to undefined volume fsefileshare: invalid compose project
error looking up volume plugin azure_file: plugin "azure_file" not found
Sample docker compose to run in local
version: '3.9'
services:
setup:
build:
context: ./config/setup/
dockerfile: Dockerfile
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
init: true
volumes:
- setup:/state:Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- fse_net
database_mysql:
image: mysql:8.0
restart: always
volumes:
- mysql_data:/var/lib/mysql
- ./fse_auth.sql:/docker-entrypoint-initdb.d/fse_auth.sql:ro
environment:
MYSQL_ROOT_PASSWORD: root
networks:
- fse_net
#************Mongo DB - 1***************
database_mongo:
restart: always
container_name: database_mongo
image: mongo:latest
volumes:
- mongo_data:/data/db
- ./mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
ports:
- 27017:27017
networks:
- fse_net
#************prometheus***************
prometheus:
image: prom/prometheus
container_name: prometheus
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- 9090:9090
depends_on:
- database_mongo
- registery
- company
- stock
- gateway
networks:
- fse_net
#************company***************
company:
container_name: company
restart: always
environment:
- EUREKA_REGISTERY=registery
- DATABASE_HOST=database_mongo
build:
context: ./company
dockerfile: Dockerfile
ports:
- 8086:8086
depends_on:
- database_mongo
- database_mysql
networks:
- fse_net
#************stock***************
stock:
container_name: stock
environment:
- EUREKA_REGISTERY=registery
- DATABASE_HOST=database_mongo
build:
context: ./stock
dockerfile: Dockerfile
ports:
- 8081:8081
depends_on:
- database_mongo
networks:
- fse_net
volumes:
setup:
mysql_data:
mongo_data:
grafana-storage:
zookeeper_data:
zookeeper_log:
kafka_data:
elasticsearch:
networks:
fse_net:
driver: bridge
Docker compose after modification for azure
version: '3.9'
services:
setup:
build:
context: ./config/setup/
dockerfile: Dockerfile
args:
ELASTIC_VERSION: ${ELASTIC_VERSION}
image: myazureacr.azurecr.io/setup
init: true
volumes:
- setup:/state:Z
environment:
ELASTIC_PASSWORD: ${ELASTIC_PASSWORD:-}
LOGSTASH_INTERNAL_PASSWORD: ${LOGSTASH_INTERNAL_PASSWORD:-}
KIBANA_SYSTEM_PASSWORD: ${KIBANA_SYSTEM_PASSWORD:-}
networks:
- fse_net
database_mysql:
image: mysql:8.0
restart: always
volumes:
- fse_data:/var/lib/mysql
- fsefileshare/fse_auth.sql:/docker-entrypoint-initdb.d/fse_auth.sql:ro
environment:
MYSQL_ROOT_PASSWORD: root
networks:
- fse_net
#************Mongo DB - 1***************
database_mongo:
restart: always
container_name: database_mongo
image: mongo:latest
volumes:
- fse_data:/data/db
- fsefileshare/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
ports:
- 27017:27017
networks:
- fse_net
#************prometheus***************
prometheus:
image: prom/prometheus
container_name: prometheus
# environment:
# - APP_GATEWAY=gateway
# - REGISTERY_APP=registery
volumes:
- fsefileshare/prometheus.yml:/etc/prometheus/prometheus.yml
ports:
- 9090:9090
depends_on:
- database_mongo
- company
- stock
networks:
- fse_net
#************company***************
company:
container_name: company
restart: always
environment:
- EUREKA_REGISTERY=registery
- DATABASE_HOST=database_mongo
build:
context: ./company
dockerfile: Dockerfile
image: myazureacr.azurecr.io/company
ports:
- 8086:8086
depends_on:
- database_mongo
- database_mysql
networks:
- fse_net
#************stock***************
stock:
container_name: stock
environment:
- EUREKA_REGISTERY=registery
- DATABASE_HOST=database_mongo
build:
context: ./stock
dockerfile: Dockerfile
image: myazureacr.azurecr.io/stock
ports:
- 8081:8081
depends_on:
- database_mongo
networks:
- fse_net
volumes:
fse_data:
driver: azure_file
driver_opts:
share_name: fsefileshare
storage_account_name: fsestorageaccount
networks:
fse_net:
driver: bridge
Your YAML looks wrong. According to the documentation, this is how you should define the volume:
volumes: # Array of volumes available to the instances
- name: string
azureFile:
shareName: string
readOnly: boolean
storageAccountName: string
storageAccountKey: string

Docker - Elasticsearch Container Unhealthy (even with OFFICIAL tuto)

I'm trying to make a docker compose file running Elasticsearch and my app.
The docker-compose.yml :
version: "3.7"
networks:
net:
driver: bridge
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.17.1
container_name: elasticsearch_dev
environment:
- xpack.security.enabled=false
- discovery.type=single-node
ulimits:
memlock:
soft: -1
hard: -1
nofile:
soft: 65536
hard: 65536
cap_add:
- IPC_LOCK
healthcheck:
test: ["CMD-SHELL", "curl", "elasticsearch:9211/_cluster/health?wait_for_status=green&timeout=1s || exit 1"]
interval: 5s
timeout: 10s
retries: 2
volumes:
- elasticsearch-data:/usr/share/elasticsearch/data
ports:
- 9211:9200
networks:
- net
#kibana:
# container_name: kibana_dev
# image: docker.elastic.co/kibana/kibana:7.4.0
# environment:
# - ELASTICSEARCH_URL=http://elasticsearch:9211
# ports:
# - 5601:5601
# depends_on:
# - elasticsearch
# networks:
# - net
engine:
container_name: engine
build: .
volumes:
- .:/usr/app/
- /usr/app/node_modules
env_file:
- .env
ports:
- 3010:3010
depends_on:
elasticsearch:
condition: service_healthy
networks:
- net
volumes:
elasticsearch-data:
driver: local
My problem :
ERROR: for engine Container "49da392d9992" is unhealthy.
I'm running out of idea to understand that problem...
I'm quite surprised because, when i'm trying to run the "tuto" on the official Elasticsearch page :
tuto
it gives me a unhealthy error too...
If someone has a clue, I will owe him a debt...

Docker Cassandra Access in Client Running Under Same Docker

My docker-compose file as below,
cassandra-db:
container_name: cassandra-db
image: cassandra:4.0-beta1
ports:
- "9042:9042"
restart: on-failure
volumes:
- ./out/cassandra_data:/var/lib/cassandra
environment:
- CASSANDRA_CLUSTER_NAME='cassandra-cluster'
- CASSANDRA_NUM_TOKENS=256
- CASSANDRA_RPC_ADDRESS=0.0.0.0
networks:
- my-network
client-service:
container_name: client-service
image: client-service
environment:
- SPRING_PROFILES_ACTIVE=dev
ports:
- 8087:8087
links:
- cassandra-db
networks:
- my-network
networks:
my-network:
I use Datastax Java driver to connect cassandra in client service, which also runs inside docker.
CqlSession.builder()
.addContactEndPoint(new DefaultEndPoint(
InetSocketAddress.createUnresolved("cassandra-db",9042)))
.withKeyspace(CassandraConstant.KEY_SPACE_NAME.getValue())
.build()
I use DNS name to connect but not connected, i tried with Docker IP of cassandra container, and depends-on also.
Any issue with docker-compose file?

Keycloak, Nodejs API and Traefik all in Docker -> only 403

maybe someone can help me.
I have keycloak, my nodejs-server, and traefik all installed with docker-compose. Everything seemed to be fine until I called a route from my frontend to the nodejs API. No matter what I tried I get a 403 all the time. When the nodejs server is running not in a docker it works. Strange in my opinion.
Here my Docker Compose if it helps:
version: '3.8'
services:
mariadb:
image: mariadb:latest
container_name: mariadb
labels:
- "traefik.enable=false"
networks:
- keycloak-network
environment:
- MYSQL_ROOT_PASSWORD=
- MYSQL_DATABASE=
- MYSQL_USER=
- MYSQL_PASSWORD=
command: mysqld --lower_case_table_names=1
volumes:
- ./:/docker-entrypoint-initdb.d
keycloak:
image: jboss/keycloak
container_name: keycloak
labels:
- "traefik.http.routers.keycloak.rule=Host(`keycloak.localhost`)"
- "traefik.http.routers.keycloak.tls=true"
networks:
- keycloak-network
environment:
- DB_DATABASE=
- DB_USER=
- DB_PASSWORD=
- KEYCLOAK_USER=
- KEYCLOAK_PASSWORD=
- KEYCLOAK_IMPORT=/tmp/example-realm.json
- PROXY_ADDRESS_FORWARDING=true
ports:
- 8443:8443
volumes:
- ./realm-export.json:/tmp/example-realm.json
depends_on:
- mariadb
phpmyadmin:
image: phpmyadmin/phpmyadmin
container_name: phpmyadmin
labels:
- "traefik.http.routers.phpmyadmin.rule=Host(`phpmyadmin.localhost`)"
networks:
- keycloak-network
links:
- mariadb:db
ports:
- 8081:80
depends_on:
- mariadb
spectory-backend:
image: spectory-backend
container_name: spectory-backend
labels:
- "traefik.http.routers.spectory-backend.rule=Host(`api.localhost`)"
- "traefik.port=4000"
ports:
- 4000:4000
networks:
- keycloak-network
depends_on:
- mariadb
- keycloak
spectory-frontend:
image: spectory-frontend
container_name: spectory-frontend
labels:
- "traefik.http.routers.spectory-frontend.rule=Host(`spectory.localhost`)"
ports:
- 4200:80
depends_on:
- mariadb
- keycloak
- spectory-backend
traefik-reverse-proxy:
image: traefik:v2.2
command:
- --api.insecure=true
- --providers.docker
- --entrypoints.web-secure.address=:443
- --entrypoints.web.address=:80
- --providers.file.directory=/configuration/
- --providers.file.watch=true
labels:
- "traefik.http.routers.traefik-reverse-proxy.rule=Host(`traefik.localhost`)"
ports:
- "80:80"
- "443:443"
- "8082:8080"
networks:
- keycloak-network
volumes:
- ./traefik.toml:/configuration/traefik.toml
- /var/run/docker.sock:/var/run/docker.sock
- ./ssl/tls.key:/etc/https/tls.key
- ./ssl/tls.crt:/etc/https/tls.crt
networks:
keycloak-network:
name: keycloak-network
I also tried static ip addresses for nodejs and keycloak -> didn't work.
Here on StackOverflow someone mentioned using https would help -> didn't work
Pretty much my situation: Link . The goal for me is that even the API is reachable through traefik
Btw my angular frontend can communicate with keycloak. Also in a docker. I can also ping the keycloak docker from the nodejs docker. Nodejs configuration parameters directly form keycloak.
I really don't know what to do next.
Did someone tried something similar?

Accessing enviroment variables from a linked container

I would like to find out how to access the environment variables from a linked docker container. I would like to access the host/port in my node app from a linked rethinkdb container. Using docker compose (bluemixservice and rethinkdb):
version: '2'
services:
twitterservice:
build: ./workerTwitter
links:
- mongodb:mongolink
- rabbitmq:rabbitlink
ports:
- "8082:8082"
depends_on:
- mongodb
- rabbitmq
bluemixservice:
build: ./workerBluemix
links:
- rabbitmq:rabbitlink
- rethinkdb:rethinkdb
ports:
- "8083:8083"
depends_on:
- rabbitmq
- rethinkdb
mongodb:
image: mongo:latest
ports:
- "27017:27017"
volumes:
- mongo-data:/var/lib/mongo
command: mongod
rabbitmq:
image: rabbitmq:management
ports:
- '15672:15672'
- '5672:5672'
rethinkdb:
image: rethinkdb:latest
ports:
- "8080:8080"
- "28015:28015"
volumes:
mongo-data:
driver: local
rethink-data:
driver: local
I would like to access them in my pm2 processes.json:
{
"apps": [
{
"name": "sentiment-service",
"script": "./src",
"merge_logs": true,
"max_restarts": 40,
"restart_delay": 10000,
"instances": 1,
"max_memory_restart": "200M",
"env": {
"PORT": 8080,
"NODE_ENV": "production",
"RABBIT_MQ": "amqp://rabbitlink:5672/",
"ALCHEMY_KEY": "xxxxxxx",
"RETHINK_DB_HOST": "Rethink DB Container Hostname?",
"RETHINK_DB_PORT": "Rethink DB Container Port?",
"RETHINK_DB_AUTHKEY": ""
}
}
]
}
This used to be possible (see here), but now the suggestion is to just use the linked service name as hostname, as you are already doing in your example with rabbitmq. Regarding port numbers, I don't think it adds much to use variables for that; I'd just go with the plain number. You can however parameterize the whole docker-compose.yml using variables in case you want to be able to quickly change a value from outside.
Note that there is no need to alias links, I find it much clearer to just use the service name.
Also, a links already implies depends_on.
I solved it by using consul to and registrator to detect all my containers.
version: '2'
services:
consul:
command: -server -bootstrap -advertise 192.168.99.101
image: progrium/consul:latest
ports:
- 8300:8300
- 8400:8400 # rpc/rest
- 8500:8500 # ui
- 8600:53/udp # dns
registrator:
command: -ip=192.168.99.101 consul://consul:8500
image: gliderlabs/registrator:latest
volumes:
- "/var/run/docker.sock:/tmp/docker.sock"
links:
- consul
twitterservice:
build: ./workerTwitter
container_name: twitterservice
links:
- mongodb:mongolink
- rabbitmq:rabbitlink
- consul
ports:
- "8082:8082"
depends_on:
- mongodb
- rabbitmq
- consul
bluemixservice:
build: ./workerBluemix
container_name: bluemixservice
links:
- rabbitmq:rabbitlink
- rethinkdb:rethinkdb
- consul
ports:
- "8083:8083"
depends_on:
- rabbitmq
- rethinkdb
- consul
mongodb:
image: mongo:latest
container_name: mongo
ports:
- "27017:27017"
links:
- consul
volumes:
- mongo-data:/var/lib/mongo
command: mongod
rabbitmq:
image: rabbitmq:management
container_name: rabbitmq
ports:
- '15672:15672'
- '5672:5672'
links:
- consul
depends_on:
- consul
rethinkdb:
image: rethinkdb:latest
container_name: rethinkdb
ports:
- "8080:8080"
- "28015:28015"
links:
- consul
depends_on:
- consul
volumes:
mongo-data:
driver: local
rethink-data:
driver: local

Resources