Instantiate the chaincode - hyperledger-fabric
Error: could not assemble transaction, err proposal response was not successful, error code 500, msg chaincode registration failed: container exited with 254
this is peer-base.yaml
version: '2'
services:
peer-base:
image: hyperledger/fabric-peer:latest
environment:
- CORE_VM_ENDPOINT=unix:///host/var/run/docker.sock
# the following setting starts chaincode containers on the same
# bridge network as the peers
# https://docs.docker.com/compose/networking/
#- CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=composer_default
- FABRIC_LOGGING_SPEC=INFO
#- FABRIC_LOGGING_SPEC=DEBUG
- CORE_PEER_TLS_ENABLED=true
- CORE_PEER_GOSSIP_USELEADERELECTION=true
- CORE_PEER_GOSSIP_ORGLEADER=false
- CORE_PEER_PROFILE_ENABLED=true
- CORE_PEER_TLS_CERT_FILE=/etc/hyperledger/fabric/tls/server.crt
- CORE_PEER_TLS_KEY_FILE=/etc/hyperledger/fabric/tls/server.key
- CORE_PEER_TLS_ROOTCERT_FILE=/etc/hyperledger/fabric/tls/ca.crt
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/peer
command: peer node start
orderer-base:
image: hyperledger/fabric-orderer:latest
environment:
- FABRIC_LOGGING_SPEC=INFO
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer
/orderer1.genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
# enabled TLS
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer
/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
- ORDERER_KAFKA_TOPIC_REPLICATIONFACTOR=1
- ORDERER_KAFKA_VERBOSE=true
- ORDERER_GENERAL_CLUSTER_CLIENTCERTIFICATE=/var/hyperledger/orderer
/tls/server.crt
- ORDERER_GENERAL_CLUSTER_CLIENTPRIVATEKEY=/var/hyperledger/orderer
/tls/server.key
- ORDERER_GENERAL_CLUSTER_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
working_dir: /opt/gopath/src/github.com/hyperledger/fabric
command: orderer
docker-compose-base .yaml
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
orderer1.example.com:
container_name: orderer1.example.com
extends:
file: peer-base.yaml
service: orderer-base
volumes:
- ../channel-artifacts/genesis.block:/var/hyperledger/orderer/orderer1.genesis.block
- ../crypto-config/ordererOrganizations/example.com/orderers/orderer1.example.com/msp:/var/hyperledger/orderer/msp
- ../crypto-config/ordererOrganizations/example.com/orderers/orderer1.example.com/tls/:/var/hyperledger/orderer/tls
- orderer1.example.com:/var/hyperledger/production/orderer
ports:
- 7050:7050
peer0.org1.example.com:
container_name: peer0.org1.example.com
extends:
file: peer-base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer0.org1.example.com
- CORE_PEER_ADDRESS=peer0.org1.example.com:7051
- CORE_PEER_LISTENADDRESS=0.0.0.0:7051
- CORE_PEER_CHAINCODEADDRESS=peer0.org1.example.com:7052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:7052
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org1.example.com:8051
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org1.example.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
volumes:
- /var/run/:/host/var/run/
- ../crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/msp:/etc/hyperledger/fabric/msp
- ../crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls:/etc/hyperledger/fabric/tls
- peer0.org1.example.com:/var/hyperledger/production
ports:
- 7051:7051
peer1.org1.example.com:
container_name: peer1.org1.example.com
extends:
file: peer-base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.org1.example.com
- CORE_PEER_ADDRESS=peer1.org1.example.com:8051
- CORE_PEER_LISTENADDRESS=0.0.0.0:8051
- CORE_PEER_CHAINCODEADDRESS=peer1.org1.example.com:8052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:8052
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org1.example.com:8051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org1.example.com:7051
- CORE_PEER_LOCALMSPID=Org1MSP
volumes:
- /var/run/:/host/var/run/
- ../crypto-config/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/msp:/etc/hyperledger/fabric/msp
- ../crypto-config/peerOrganizations/org1.example.com/peers/peer1.org1.example.com/tls:/etc/hyperledger/fabric/tls
- peer1.org1.example.com:/var/hyperledger/production
ports:
- 8051:8051
peer0.org2.example.com:
container_name: peer0.org2.example.com
extends:
file: peer-base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer0.org2.example.com
- CORE_PEER_ADDRESS=peer0.org2.example.com:9051
- CORE_PEER_LISTENADDRESS=0.0.0.0:9051
- CORE_PEER_CHAINCODEADDRESS=peer0.org2.example.com:9052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:9052
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer0.org2.example.com:9051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer1.org2.example.com:10051
- CORE_PEER_LOCALMSPID=Org2MSP
volumes:
- /var/run/:/host/var/run/
- ../crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/msp:/etc/hyperledger/fabric/msp
- ../crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls:/etc/hyperledger/fabric/tls
- peer0.org2.example.com:/var/hyperledger/production
ports:
- 9051:9051
peer1.org2.example.com:
container_name: peer1.org2.example.com
extends:
file: peer-base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer1.org2.example.com
- CORE_PEER_ADDRESS=peer1.org2.example.com:10051
- CORE_PEER_LISTENADDRESS=0.0.0.0:10051
- CORE_PEER_CHAINCODEADDRESS=peer1.org2.example.com:10052
- CORE_PEER_CHAINCODELISTENADDRESS=0.0.0.0:10052
- CORE_PEER_GOSSIP_EXTERNALENDPOINT=peer1.org2.example.com:10051
- CORE_PEER_GOSSIP_BOOTSTRAP=peer0.org2.example.com:9051
- CORE_PEER_LOCALMSPID=Org2MSP
volumes:
- /var/run/:/host/var/run/
- ../crypto-config/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/msp:/etc/hyperledger/fabric/msp
- ../crypto-config/peerOrganizations/org2.example.com/peers/peer1.org2.example.com/tls:/etc/hyperledger/fabric/tls
- peer1.org2.example.com:/var/hyperledger/production
ports:
Now i am not able to instantiate the code on the channel
chaincode instantiate -o orderer1.example.com:7050 --tls --cafile /opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/example.com/orderers/orderer1.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -C $CHANNEL_NAME -n mycc -v 1.0 -c '{"Args":["init","a", "100", "b","200"]}' -P "AND ('Org1MSP.peer','Org2MSP.peer')"
## the error is
Error: could not assemble transaction, err proposal response was not successful, error code 500, msg chaincode registration failed: container exited with 254
- 10051:10051
CORE_VM_DOCKER_HOSTCONFIG_NETWORKMODE=composer_default
I have commented the above environment variable ,but after uncommenting the above environment variable the error still persisits
the error is :
error: could not assemble transaction, err proposal response was not successful, error code 500, msg chaincode registration failed: container exited with 254
Related
could not translate host name to address (Data lineage- tokern)
version: '3.6' services: tokern-demo-catalog: image: tokern/demo-catalog:latest container_name: tokern-demo-catalog restart: unless-stopped networks: - tokern-internal volumes: - tokern_demo_catalog_data:/var/lib/postgresql/data environment: POSTGRES_PASSWORD: xxx POSTGRES_USER: xxx POSTGRES_DB: table1 tokern-api: image: tokern/data-lineage:latest container_name: tokern-data-lineage restart: unless-stopped networks: - tokern-internal environment: CATALOG_PASSWORD: xxx CATALOG_USER: xxx CATALOG_DB: table1 CATALOG_HOST: "xxxxxxxx.amazon.com" GUNICORN_CMD_ARGS: "--bind 0.0.0.0:4142" toker-viz: image: tokern/data-lineage-viz:latest container_name: tokern-data-lineage-visualizer restart: unless-stopped networks: - tokern-internal - tokern-net ports: - "39284:80" networks: tokern-net: # Exposed by your host. # external: true name: "tokern-net" driver: bridge ipam: driver: default config: - subnet: 10.10.0.0/24 tokern-internal: name: "tokern-internal" driver: bridge internal: true ipam: driver: default config: - subnet: 10.11.0.0/24 volumes: tokern_demo_catalog_data: trying to implement data lineage into my database i have followed according to this documentation "https://pypi.org/project/data-lineage/" and https://tokern.io/docs/data-lineage/installation/ not able to solve this error sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) could not translate host name "xxx.amazonaws.com" to address: Temporary failure in name resolution
Taigaio when i create a project the page doesn't refresh automaticaly
Hello i try to configure a linux server for use Taigaio project and when i try to create, delete or save a modification the page doesn't auto refresh (he turn in void) but when i do by myself (CTRL + Maj + R or F5) i can see the data. did i forget to configure something ? i have install rabbitMq and configure like the doc. exemple : exemple this is my docker-compose.yml : version: "3.5" x-environment: &default-back-environment # Database settings POSTGRES_DB: taiga POSTGRES_USER: taiga POSTGRES_PASSWORD: taiga POSTGRES_HOST: taiga-db # Taiga settings TAIGA_SECRET_KEY: "taiga-back-secret-key" TAIGA_SITES_DOMAIN: "192.168.60.25:9000" TAIGA_SITES_SCHEME: "http" # Email settings. Uncomment following lines and configure your SMTP server # EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend" # DEFAULT_FROM_EMAIL: "no-reply#example.com" # EMAIL_USE_TLS: "False" # EMAIL_USE_SSL: "False" # EMAIL_HOST: "smtp.host.example.com" # EMAIL_PORT: 587 # EMAIL_HOST_USER: "user" # EMAIL_HOST_PASSWORD: "password" # Rabbitmq settings # Should be the same as in taiga-async-rabbitmq and taiga-events-rabbitmq RABBITMQ_USER: taiga RABBITMQ_PASS: taiga # Telemetry settings ENABLE_TELEMETRY: "True" x-volumes: &default-back-volumes - taiga-static-data:/taiga-back/static - taiga-media-data:/taiga-back/media # - ./config.py:/taiga-back/settings/config.py services: taiga-db: image: postgres:12.3 environment: POSTGRES_DB: taiga POSTGRES_USER: taiga POSTGRES_PASSWORD: taiga volumes: - taiga-db-data:/var/lib/postgresql/data networks: - taiga taiga-back: image: taigaio/taiga-back:latest environment: *default-back-environment volumes: *default-back-volumes networks: - taiga depends_on: - taiga-db - taiga-events-rabbitmq - taiga-async-rabbitmq taiga-async: image: taigaio/taiga-back:latest entrypoint: ["/taiga-back/docker/async_entrypoint.sh"] environment: *default-back-environment volumes: *default-back-volumes networks: - taiga depends_on: - taiga-db - taiga-back - taiga-async-rabbitmq taiga-async-rabbitmq: image: rabbitmq:3-management-alpine environment: RABBITMQ_ERLANG_COOKIE: secret-erlang-cookie RABBITMQ_DEFAULT_USER: taiga RABBITMQ_DEFAULT_PASS: taiga RABBITMQ_DEFAULT_VHOST: taiga volumes: - taiga-async-rabbitmq-data:/var/lib/rabbitmq networks: - taiga taiga-front: image: taigaio/taiga-front:latest environment: TAIGA_URL: "http://192.168.60.25:9000" TAIGA_WEBSOCKETS_URL: "ws://192.168.60.25:9000" networks: - taiga # volumes: # - ./conf.json:/usr/share/nginx/html/conf.json taiga-events: image: taigaio/taiga-events:latest environment: RABBITMQ_USER: taiga RABBITMQ_PASS: taiga TAIGA_SECRET_KEY: "taiga-back-secret-key" networks: - taiga depends_on: - taiga-events-rabbitmq taiga-events-rabbitmq: image: rabbitmq:3-management-alpine environment: RABBITMQ_ERLANG_COOKIE: secret-erlang-cookie RABBITMQ_DEFAULT_USER: taiga RABBITMQ_DEFAULT_PASS: taiga RABBITMQ_DEFAULT_VHOST: taiga volumes: - taiga-events-rabbitmq-data:/var/lib/rabbitmq networks: - taiga taiga-protected: image: taigaio/taiga-protected:latest environment: MAX_AGE: 360 SECRET_KEY: "taiga-back-secret-key" networks: - taiga taiga-gateway: image: nginx:1.19-alpine ports: - "9000:80" volumes: - ./taiga-gateway/taiga.conf:/etc/nginx/conf.d/default.conf - taiga-static-data:/taiga/static - taiga-media-data:/taiga/media networks: - taiga depends_on: - taiga-front - taiga-back - taiga-events volumes: taiga-static-data: taiga-media-data: taiga-db-data: taiga-async-rabbitmq-data: taiga-events-rabbitmq-data: networks: taiga:
Airflow triggering Spark application results in error "Too large frame"
I have a Docker-compose pipeline with containers for Airflow and for Spark. I want to schedule a SparkSubmitOperator job, but it fails with the error java.lang.IllegalArgumentException: Too large frame: 5211883372140375593. The Spark application consists only of creating a Spark session (I already commented out all other stuff). When I manually run the Spark app (by going to the bash of the Spark container and executing a spark-submit), everything works fine! Also, when I don't create a Spark session but just a SparkContext, it works! Here is my docker-compose.yml: version: '3' x-airflow-common: &airflow-common build: ./airflow/ image: ${AIRFLOW_IMAGE_NAME:-apache/airflow:2.0.2} environment: &airflow-common-env AIRFLOW__CORE__EXECUTOR: CeleryExecutor AIRFLOW__CORE__SQL_ALCHEMY_CONN: postgresql+psycopg2://airflow:airflow#postgres/airflow AIRFLOW__CELERY__RESULT_BACKEND: db+postgresql://airflow:airflow#postgres/airflow AIRFLOW__CELERY__BROKER_URL: redis://:#redis:6379/0 AIRFLOW__CORE__FERNET_KEY: '' AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION: 'false' AIRFLOW__CORE__LOAD_EXAMPLES: 'false' AIRFLOW__API__AUTH_BACKEND: 'airflow.api.auth.backend.basic_auth' AIRFLOW__CORE__DEFAULT_TIMEZONE: 'Europe/Berlin' volumes: - ./airflow/dags:/opt/airflow/dags - ./airflow/logs:/opt/airflow/logs - ./airflow/plugins:/opt/airflow/plugins user: "${AIRFLOW_UID:-50000}:${AIRFLOW_GID:-50000}" networks: - app-tier depends_on: redis: condition: service_healthy postgres: condition: service_healthy services: postgres: container_name: airflowPostgres image: postgres:13 environment: POSTGRES_USER: airflow POSTGRES_PASSWORD: airflow POSTGRES_DB: airflow volumes: - postgres-db-volume:/var/lib/postgresql/data healthcheck: test: ["CMD", "pg_isready", "-U", "airflow"] interval: 5s retries: 5 restart: always networks: - app-tier redis: container_name: airflowRedis image: redis:latest ports: - 6380:6379 healthcheck: test: ["CMD", "redis-cli", "ping"] interval: 5s timeout: 30s retries: 50 restart: always networks: - app-tier airflow-webserver: <<: *airflow-common container_name: airflowWebserver command: webserver ports: - 8081:8080 healthcheck: test: ["CMD", "curl", "--fail", "http://localhost:8080/health"] interval: 10s timeout: 10s retries: 5 restart: always airflow-scheduler: <<: *airflow-common container_name: airflowScheduler command: scheduler restart: always airflow-worker: <<: *airflow-common container_name: airflowWorker command: celery worker restart: always airflow-init: <<: *airflow-common container_name: airflowInit command: version environment: <<: *airflow-common-env _AIRFLOW_DB_UPGRADE: 'true' _AIRFLOW_WWW_USER_CREATE: 'true' _AIRFLOW_WWW_USER_USERNAME: ${_AIRFLOW_WWW_USER_USERNAME:-airflow} _AIRFLOW_WWW_USER_PASSWORD: ${_AIRFLOW_WWW_USER_PASSWORD:-airflow} spark: image: docker.io/bitnami/spark:3 user: root environment: - SPARK_MODE=master - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no ports: - '8080:8080' volumes: - ./:/app networks: - app-tier spark-worker-1: image: docker.io/bitnami/spark:3 environment: - SPARK_MODE=worker - SPARK_MASTER_URL=spark://spark:7077 - SPARK_WORKER_MEMORY=1G - SPARK_WORKER_CORES=1 - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no networks: - app-tier spark-worker-2: image: docker.io/bitnami/spark:3 environment: - SPARK_MODE=worker - SPARK_MASTER_URL=spark://spark:7077 - SPARK_WORKER_MEMORY=1G - SPARK_WORKER_CORES=1 - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no networks: - app-tier volumes: postgres-db-volume: networks: app-tier: driver: bridge name: app-tier My Airflow DAG: from datetime import datetime, timedelta from airflow import DAG from airflow.operators.python import PythonOperator from functions import send_to_kafka, send_to_mongo # * AIRFLOW ################################ # default arguments default_args = { 'owner': 'daniel', 'start_date': datetime(2021, 5, 9), 'email': [''], 'email_on_failure': False, 'email_on_retry': False, "retries": 3, "retry_delay": timedelta(minutes = 1) } # * spark DAG from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator dag_spark = DAG('spark', description = '', catchup = False, schedule_interval = "#once", default_args = default_args) s1 = SparkSubmitOperator( task_id = "spark-job", application = "/opt/airflow/dags/application.py", conn_id = "spark_default", # defined under Admin/Connections in Airflow webserver packages = "org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.2,postgresql:postgresql:9.1-901-1.jdbc4", dag = dag_spark ) My application (application.py) which does NOT work: from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .appName("myApp") \ .getOrCreate() The application which DOES work: from pyspark import SparkContext sc = SparkContext("local", "First App") The connection defined in the Admin menu of Airflow: And here is the log created by the DAG: https://pastebin.com/FMW3kJ9g Any ideas why this fails?
Problem was solved by adding a .master("local") to the SparkSession.
how to create keyspace for cassandra using docker compose v3
I am trying to create keyspace using docker-compose v3, but it is not working out, my docker-compose.yaml looks like following : version: '3' services: cassandra: image: cassandra:latest networks: - default ports: - "9042:9042" volumes: - ../compi${COMPI}/data/cassandra:/var/lib/cassandra - ../../sql:/compi/sql - ../compi${COMPI}/docker-entrypoint-initdb.d:/compi/docker-entrypoint-initdb.d:ro healthcheck: test: ["CMD-SHELL", "[ $$(nodetool statusgossip) = running ]"] interval: 30s timeout: 10s retries: 5 compi: environment: - DOCKER=true depends_on: - cassandra links: - cassandra build: context: ../.. dockerfile: ./docker.local/compi/Dockerfile volumes: - ../config:/compi/config - ../compi${COMPI}/log:/compi/log - ../compi${COMPI}/data:/compi/data ports: - "717${compi}:717${compi}" volumes: data: config: my docker-entrypoint-initdb.d/init.cql looks like following: CREATE KEYSPACE IF NOT EXISTS sample WITH REPLICATION = { 'class''SimpleStrategy', 'replication_factor' : 1 } AND DURABLE_WRITES = true;
Gitlab 'Gateway Timeout' behind traefik proxy
So I'm trying to set up a gitlab-ce instance on docker swarm using traefik as reverse proxy. This is my proxy stack; version: '3' services: traefik: image: traefik:alpine command: --entryPoints="Name:http Address::80 Redirect.EntryPoint:https" --entryPoints="Name:https Address::443 TLS" --defaultentrypoints="http,https" --acme --acme.acmelogging="true" --acme.email="freelyformd#gmail.com" --acme.entrypoint="https" --acme.storage="acme.json" --acme.onhostrule="true" --docker --docker.swarmmode --docker.domain="mydomain.com" --docker.watch --web ports: - 80:80 - 443:443 - 8080:8080 networks: - traefik-net volumes: - /var/run/docker.sock:/var/run/docker.sock deploy: placement: constraints: - node.role == manager networks: traefik-net: external: true And my gitlab stack version: '3' services: omnibus: image: 'gitlab/gitlab-ce:latest' hostname: 'lab.mydomain.com' environment: GITLAB_OMNIBUS_CONFIG: | external_url 'https://lab.mydomain.com' nginx['listen_port'] = 80 nginx['listen_https'] = false registry_external_url 'https://registry.mydomain.com' registry_nginx['listen_port'] = 80 registry_nginx['listen_https'] = false gitlab_rails['gitlab_shell_ssh_port'] = 2222 gitlab_rails['gitlab_email_from'] = 'lab#mydomain.com' gitlab_rails['gitlab_email_reply_to'] = 'lab#mydomain.com' ports: - 2222:22 volumes: - gitlab_config:/etc/gitlab - gitlab_logs:/var/log/gitlab - gitlab_data:/var/opt/gitlab networks: - traefik-net deploy: labels: traefik.enable: "port" traefik.frontend.rule: 'Host: lab.mydomain.com, Host: registry.mydomain.com' traefik.port: 80 placement: constraints: - node.role == manager runner: image: 'gitlab/gitlab-runner:v1.11.4' volumes: - gitlab_runner_config:/etc/gitlab-runner - /var/run/docker.sock:/var/run/docker.sock volumes: gitlab_config: gitlab_logs: gitlab_data: gitlab_runner_config: networks: traefik-net: external: true traefik-net is an overlay network So when I deploy using docker stack deploy and visit lab.mydomain.com, i get the Gateway Timeout error. When I execute curl localhost within the gitlab container, it seems to work fine. Not sure what the problem is, any pointers would be appreciated
Turns out all I had to do was set the traefik label, traefik.docker.network to traefik-net, see https://github.com/containous/traefik/issues/1254