I currently try to deploy an image build on a gitlab ci/cd to registry.
runner and gitlab containers are setup into the gitlab network.
Therefore, I couldn't make it work.
Here are my configs :
# gitlab-runner.toml
concurrent = 1
check_interval = 0
[[runners]]
name = "runner1"
url = "http://gitlab"
token = "t4ihZ8Tc4Kxy5i5EgHYt"
executor = "docker"
[runners.docker]
host = ""
tls_verify = false
image = "ruby:2.1"
privileged = false
disable_cache = false
shm_size = 0
network_mode = "gitlab"
# gitlab.rb
external_url 'https://gitlab.domain.com/'
gitlab_rails['initial_root_password'] = File.read('/run/secrets/gitlab_root_password')
nginx['listen_https'] = false
nginx['listen_port'] = 80
nginx['redirect_http_to_https'] = false
letsencrypt['enable'] = false
gitlab_rails['smtp_enable'] = true
gitlab_rails['smtp_address'] ="smtp.domain.com"
gitlab_rails['smtp_port'] = 587
gitlab_rails['smtp_user_name'] = "gitlab"
gitlab_rails['smtp_password'] = "password"
gitlab_rails['smtp_domain'] = "domain.com"
gitlab_rails['smtp_authentication'] = "login"
gitlab_rails['smtp_enable_starttls_auto'] = true
gitlab_rails['smtp_openssl_verify_mode'] = 'peer'
gitlab_rails['gitlab_email_from'] = 'gitlab#domain.com'
gitlab_rails['gitlab_email_reply_to'] = 'noreply#domain.com'
# gitlab-compose.yml
version: "3.6"
services:
gitlab:
image: gitlab/gitlab-ce:latest
volumes:
- gitlab_data:/var/opt/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_config:/etc/gitlab
shm_size: '256m'
environment:
GITLAB_OMNIBUS_CONFIG: "from_file('/omnibus_config.rb')"
GITLAB_ROOT_EMAIL: "contact#domain.com"
GITLAB_ROOT_PASSWORD: "password"
configs:
- source: gitlab
target: /omnibus_config.rb
secrets:
- gitlab_root_password
deploy:
placement:
constraints:
- node.labels.role == compute
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik-public"
- traefik.constraint-label=traefik-public
- "traefik.http.services.gitlab.loadbalancer.server.port=80"
- "traefik.http.routers.gitlab.rule=Host(`gitlab.domain.com`)"
- "traefik.http.routers.gitlab.entrypoints=websecure"
- "traefik.http.routers.gitlab.tls.certresolver=lets-encrypt"
networks:
- gitlab
- traefik-public
configs:
gitlab:
file: ./gitlab.rb
secrets:
gitlab_root_password:
file: ./root_password.txt
volumes:
gitlab_data:
driver: local
gitlab_logs:
driver: local
gitlab_config:
driver: local
networks:
gitlab:
external: true
traefik-public:
external: true
#gitlab-ci
stages:
- gulp_build
- docker_build_deploy
cache:
paths:
- node_modules/
variables:
DEPLOY_USER: $DEPLOY_USER
DEPLOY_TOKEN: $DEPLOY_TOKEN
build app:
stage: gulp_build
image: node:14.17
before_script:
- npm install
script:
- ./node_modules/.bin/gulp build -production
artifacts:
paths:
- public
docker deploy:
stage: docker_build_deploy
image: docker:latest
services:
- name: docker:dind
command: ["--insecure-registry=gitlab"]
before_script:
- docker login -u $DEPLOY_USER -p $DEPLOY_TOKEN gitlab
script:
- echo $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG
- docker build -t gitlab/laurene/domain.com:$CI_COMMIT_REF_SLUG -t gitlab/laurene/domain.com:latest .
- docker push gitlab/laurene/domain.com:$CI_COMMIT_REF_SLUG
- docker push gitlab/laurene/domain.com:latest
Deployment logs :
[0KRunning with gitlab-runner 14.9.1 (bd40e3da)[0;m
[0K on runner1 t4ihZ8Tc[0;m
section_start:1650242087:prepare_executor
[0K[0K[36;1mPreparing the "docker" executor[0;m[0;m
[0KUsing Docker executor with image docker:latest ...[0;m
[0KStarting service docker:dind ...[0;m
[0KPulling docker image docker:dind ...[0;m
[0KUsing docker image sha256:a072474332af3e4cf06e349685c4cea8f9e631f0c5cab5b582f3a3ab4cff9b6a for docker:dind with digest docker#sha256:210076c7772f47831afaf7ff200cf431c6cd191f0d0cb0805b1d9a996e99fb5e ...[0;m
[0KWaiting for services to be up and running...[0;m
[0;33m*** WARNING:[0;m Service runner-t4ihz8tc-project-2-concurrent-0-2cd68d823b0d9914-docker-0 probably didn't start properly.
Health check error:
service "runner-t4ihz8tc-project-2-concurrent-0-2cd68d823b0d9914-docker-0-wait-for-service" timeout
Health check container logs:
Service container logs:
2022-04-18T00:34:50.436194142Z Generating RSA private key, 4096 bit long modulus (2 primes)
2022-04-18T00:34:50.490663718Z ............++++
2022-04-18T00:34:50.549517108Z ...............++++
2022-04-18T00:34:50.549802329Z e is 65537 (0x010001)
2022-04-18T00:34:50.562099799Z Generating RSA private key, 4096 bit long modulus (2 primes)
2022-04-18T00:34:50.965975282Z .....................................................................................................................++++
2022-04-18T00:34:51.033998142Z ..................++++
2022-04-18T00:34:51.034281623Z e is 65537 (0x010001)
2022-04-18T00:34:51.056355164Z Signature ok
2022-04-18T00:34:51.056369034Z subject=CN = docker:dind server
2022-04-18T00:34:51.056460584Z Getting CA Private Key
2022-04-18T00:34:51.065394153Z /certs/server/cert.pem: OK
2022-04-18T00:34:51.067347859Z Generating RSA private key, 4096 bit long modulus (2 primes)
2022-04-18T00:34:51.210090561Z ........................................++++
2022-04-18T00:34:51.491331619Z .................................................................................++++
2022-04-18T00:34:51.491620790Z e is 65537 (0x010001)
2022-04-18T00:34:51.509644008Z Signature ok
2022-04-18T00:34:51.509666918Z subject=CN = docker:dind client
2022-04-18T00:34:51.509757628Z Getting CA Private Key
2022-04-18T00:34:51.519103998Z /certs/client/cert.pem: OK
2022-04-18T00:34:51.594873133Z ip: can't find device 'ip_tables'
2022-04-18T00:34:51.595519686Z ip_tables 32768 3 iptable_mangle,iptable_filter,iptable_nat
2022-04-18T00:34:51.595526296Z x_tables 40960 14 xt_REDIRECT,xt_ipvs,xt_state,xt_policy,iptable_mangle,xt_mark,xt_u32,xt_nat,xt_tcpudp,xt_conntrack,xt_MASQUERADE,xt_addrtype,iptable_filter,ip_tables
2022-04-18T00:34:51.595866717Z modprobe: can't change directory to '/lib/modules': No such file or directory
2022-04-18T00:34:51.597027030Z mount: permission denied (are you root?)
2022-04-18T00:34:51.597064490Z Could not mount /sys/kernel/security.
2022-04-18T00:34:51.597067880Z AppArmor detection and --privileged mode might break.
2022-04-18T00:34:51.597608422Z mount: permission denied (are you root?)
[0;33m*********[0;m
[0KPulling docker image docker:latest ...[0;m
[0KUsing docker image sha256:7417809fdb730b60c1b903077030aacc708677cdf02f2416ce413f38e81ec7e0 for docker:latest with digest docker#sha256:41978d1974f05f80e1aef23ac03040491a7e28bd4551d4b469b43e558341864e ...[0;m
section_end:1650242124:prepare_executor
[0Ksection_start:1650242124:prepare_script
[0K[0K[36;1mPreparing environment[0;m[0;m
Running on runner-t4ihz8tc-project-2-concurrent-0 via fed5cebcc8e6...
section_end:1650242125:prepare_script
[0Ksection_start:1650242125:get_sources
[0K[0K[36;1mGetting source from Git repository[0;m[0;m
[32;1mFetching changes with git depth set to 20...[0;m
Reinitialized existing Git repository in /builds/laurene/oelabs.co/.git/
[32;1mChecking out a63a1f2a as master...[0;m
Removing node_modules/
[32;1mSkipping Git submodules setup[0;m
section_end:1650242128:get_sources
[0Ksection_start:1650242128:restore_cache
[0K[0K[36;1mRestoring cache[0;m[0;m
[32;1mChecking cache for default...[0;m
No URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.[0;m
[32;1mSuccessfully extracted cache[0;m
section_end:1650242129:restore_cache
[0Ksection_start:1650242129:download_artifacts
[0K[0K[36;1mDownloading artifacts[0;m[0;m
[32;1mDownloading artifacts for build app (97)...[0;m
Downloading artifacts from coordinator... ok [0;m id[0;m=97 responseStatus[0;m=200 OK token[0;m=Uvp--J3i
section_end:1650242131:download_artifacts
[0Ksection_start:1650242131:step_script
[0K[0K[36;1mExecuting "step_script" stage of the job script[0;m[0;m
[0KUsing docker image sha256:7417809fdb730b60c1b903077030aacc708677cdf02f2416ce413f38e81ec7e0 for docker:latest with digest docker#sha256:41978d1974f05f80e1aef23ac03040491a7e28bd4551d4b469b43e558341864e ...[0;m
[32;1m$ docker login -u $DEPLOY_USER -p $DEPLOY_TOKEN gitlab[0;m
WARNING! Using --password via the CLI is insecure. Use --password-stdin.
time="2022-04-18T00:35:33Z" level=info msg="Error logging in to endpoint, trying next endpoint" error="Get \"https://gitlab/v2/\": dial tcp 10.0.18.4:443: connect: connection refused"
Get "https://gitlab/v2/": dial tcp 10.0.18.4:443: connect: connection refused
section_end:1650242133:step_script
[0K[31;1mERROR: Job failed: exit code 1
[0;m
Related
There are a lot of answers about this topic, but I cannot find a solution to my problem here my log:
Waiting for services to be up and running...
*** WARNING: Service runner-hgz7smm8-project-3-concurrent-0-c2b622f72cceadc3-docker-0 probably didn't start properly.
Health check error:
service "runner-hgz7smm8-project-3-concurrent-0-c2b622f72cceadc3-docker-0-wait-for-service" timeout
Health check container logs:
Service container logs:
2021-12-07T16:13:47.326235886Z mount: permission denied (are you root?)
2021-12-07T16:13:47.326275450Z Could not mount /sys/kernel/security.
2021-12-07T16:13:47.326284427Z AppArmor detection and --privileged mode might break.
My docker version inside the runner:
root#gitlab-runner-2:~# docker -v
Docker version 20.10.7, build 20.10.7-0ubuntu5.1
Gitlab-runner:
root#gitlab-runner-2:~# gitlab-runner -v
Version: 14.5.1
Git revision: de104fcd
Git branch: 14-5-stable
GO version: go1.13.8
Built: 2021-12-01T15:41:35+0000
OS/Arch: linux/amd64
Runner is an LXD container running inside PROXMOX and is configured like this with "docker" executor:
concurrent = 1
check_interval = 0
[session_server]
session_timeout = 1800
[[runners]]
name = "gitlab-runner-2"
url = "http://gitlab.XXXXXX.com"
token = "XXXXXXXXXX"
executor = "docker"
pre_build_script = "export DOCKER_HOST=tcp://docker:2375"
[runners.custom_build_dir]
[runners.cache]
[runners.cache.s3]
[runners.cache.gcs]
[runners.cache.azure]
[runners.docker]
tls_verify = false
image = "alpine:latest"
privileged = true
disable_entrypoint_overwrite = false
oom_kill_disable = false
disable_cache = false
volumes = ["/cache"]
shm_size = 0
Any advices?
The solution that I've achieved, for GitLab 14.10, to solve those Warnings/Errors was to perform the following changes.
On gitlab-runner config.toml:
concurrent = 1
check_interval = 0
[session_server]
session_timeout = 1800
[[runners]]
name = "runnertothehills"
url = "https://someexample.com/"
token = "aRunnerToken"
executor = "docker"
[runners.docker]
image = "docker:20.10.14"
privileged = true
disable_cache = false
volumes = ["/cache:/cache", "/var/run/docker.sock:/var/run/docker.sock", "/builds:/builds"]
group = 1000
environment = ["DOCKER_AUTH_CONFIG={\"auths\":{\"some.docker.registry.com:12345\":{\"auth\":\"AdockerLoginToken=\"}}}"]
extra_hosts = ["one.extra.host.com:100.111.120.231"]
The main configuration here is the docker executor and the volumes mount point "/var/run/docker.sock:/var/run/docker.sock".
In .gitlab-ci.yml instead of using
service: docker:dind
Use docker commands directly.
Example:
deploy:
stage: deploy
script:
- docker login -u myuser -p my_password
This solved the following problems:
** WARNING: Service runner-3tm987o3-project-131-concurrent-0-ce49f8u8c582bf56-docker-0
probably didn't start properly.
The problem of docker group not found
2022-05-23T14:24:57.167991289Z time="2022-05-23T14:24:57.167893989Z"
level=warning msg="could not change group /var/run/docker.sock to
docker: group docker not found"
and
2022-05-23T14:24:57.168164288Z failed to load listeners: can't create
unix socket /var/run/docker.sock: device or resource busy
I am running a python script to connect to AWS SSM.
My docker-compose has this volume set up:
- ~/.aws/:/home/airflow/.aws
Boto3 Code:
LOCALHOST = 1
SERVICE = 'ssm'
PROFILE = 'profile3'
#File path
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
def get_aws_client(localhost=None):
"""
Creates boto3 aws client for any service.
:param localhost: Parameter that enables use of roles in localhost.
:return: aws client object
"""
if localhost is not None:
globals().update(LOCALHOST=localhost)
boto_object = Boto3AwsClient(localhost=LOCALHOST, profile=PROFILE)
aws_client = boto_object.aws_client_connect(service=SERVICE)
return aws_client
It returns:
botocore.exceptions.ProfileNotFound: The config profile (profile3) could not be found
If I run:
docker exec -it webserver bash
And print
cat /home/airflow/.aws/credentials
cat /home/airflow/.aws/config
I see for credentials:
[default]
aws_access_key_id = XXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXxxxxxxxxxxxxxXXXXXXXXXXX
For config:
[default]
region=eu-west-1
output=json
[profile profile3]
region=eu-west-1
role_arn=arn:aws:iam::333333333333:role/AllowBlablahblah
source_profile=default
[profile profile2]
region=eu-west-1
role_arn=arn:aws:iam::22222222222:role/AllowBliblihblih
source_profile=default
[profile profile1]
region=eu-west-1
role_arn=arn:aws:iam::1111111111111:role/AllowBlubluhbluh
source_profile=default
And event I can run without problem:
aws s3 ls
aws s3 ls --profile profile3
So I guess config and credentials are not really missing, and no format issue as aws cli is working.
I don't know what's going on here. Any idea?
Dockerfile:
FROM apache/airflow:2.1.2-python3.8
ARG AIRFLOW_USER_HOME=/opt/airflow
ENV PYTHONPATH "${PYTHONPATH}:/"
ADD ./environtment_config/docker_src ./environtment_config/docker_src
RUN pip install -r environtment_config/docker_src/requirements.pip
Full docker-compose:
version: '3'
services:
webserver:
image: own-airflow2
command: webserver
ports:
- 8080:8080
healthcheck:
test: [ "CMD", "curl", "--fail", "http://localhost:8080/health" ]
interval: 10s
timeout: 10s
retries: 5
restart: always
build:
context: .
dockerfile: Dockerfile3
env_file:
- ./airflow.env
container_name: webserver
volumes:
- ./database_utils:/database_utils
- ./maintenance:/maintenance
- ./utils:/utils
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./airflow_sqlite:/opt/airflow
- ~/.aws/:/home/airflow/.aws
scheduler:
image: own-airflow2
command: scheduler
healthcheck:
test: [ "CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"' ]
interval: 10s
timeout: 10s
retries: 5
restart: always
container_name: scheduler
build:
context: .
dockerfile: Dockerfile3
env_file:
- ./airflow.env
volumes:
- ./database_utils:/database_utils
- ./maintenance:/maintenance
- ./utils:/utils
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./airflow_sqlite:/opt/airflow
- ~/.aws/:/home/airflow/.aws
depends_on:
- webserver
EDIT:
I forgot to say that I added env vars such as:
#Boto3
AWS_CONFIG_FILE=/home/airflow/.aws/config
AWS_SHARED_CREDENTIALS_FILE=/home/airflow/.aws/credentials
To specify clearly which one is the correct path of the file.
So, I am trying to mount a working directory with project files into a child instance on a gitlab runner in sort of a DinD setup. I want to be able to mount a volume in a docker instance, which would allow me to muck around and test stuff. Like e2e testing and such… without compiling a new container to inject the files I need… Ideally, so I can share data in a DinD environment without having to build a new container for each job that runs…
I tried following (Docker volumes not mounted when using docker:dind (#41227) · Issues · GitLab.org / GitLab FOSS · GitLab) and I have some directories being mounted, but it is not the project data I am looking for.
So, the test jobs, I created a dummy file, and I wish to mount the directory in a container and view the files…
I have a test ci yml, which sort of does what I am looking for. I make test files in the volume I which to mount, which I would like to see in a directory listing, but sadly do not. I my second attempt at this, I couldn’t get the container ID becuase the labels don’t exist on the runner and it always comes up blank… However, the first stages show promise as It works perfectly on a “shell” runner outside of k8s. But, as soon as I change the tag to use a k8s runner it craps out. I can see old directory files /web and my directory I am mounting, but not the files within it. weird?
ci.yml
image: docker:stable
services:
- docker:dind
stages:
- compile
variables:
SHARED_PATH: /builds/$CI_PROJECT_PATH/shared/
DOCKER_DRIVER: overlay2
.test: &test
stage: compile
tags:
- k8s-vols
script:
- docker version
- 'export TESTED_IMAGE=$(echo ${CI_JOB_NAME} | sed "s/test //")'
- docker pull ${TESTED_IMAGE}
- 'export SHARED_PATH="$(dirname ${CI_PROJECT_DIR})/shared"'
- echo ${SHARED_PATH}
- echo ${CI_PROJECT_DIR}
- mkdir -p ${SHARED_PATH}
- touch ${SHARED_PATH}/test_file
- touch ${CI_PROJECT_DIR}/test_file2
- find ${SHARED_PATH}
#- find ${CI_PROJECT_DIR}
- docker run --rm -v ${CI_PROJECT_DIR}:/mnt ${TESTED_IMAGE} find /mnt
- docker run --rm -v ${CI_PROJECT_DIR}:/mnt ${TESTED_IMAGE} ls -lR /mnt
- docker run --rm -v ${SHARED_PATH}:/mnt ${TESTED_IMAGE} find /mnt
- docker run --rm -v ${SHARED_PATH}:/mnt ${TESTED_IMAGE} ls -lR /mnt
test alpine: *test
test ubuntu: *test
test centos: *test
testing:
stage: compile
tags:
- k8s-vols
image:
name: docker:stable
entrypoint: ["/bin/sh", "-c"]
script:
# get id of container
- export CONTAINER_ID=$(docker ps -q -f "label=com.gitlab.gitlab-runner.job.id=$CI_JOB_ID" -f "label=com.gitlab.gitlab-runner.type=build")
# get mount name
- export MOUNT_NAME=$(docker inspect $CONTAINER_ID -f "{{ range .Mounts }}{{ if eq .Destination \"/builds/${CI_PROJECT_NAMESPACE}\" }}{{ .Source }}{{end}}{{end}}" | cut -d "/" -f 6)
# run container
- docker run -v $MOUNT_NAME:/builds -w /builds/$CI_PROJECT_NAME --entrypoint=/bin/sh busybox -c "ls -la"
This is the values files I am working with…
image: docker-registry.corp.com/base-images/gitlab-runner:alpine-v13.3.1
imagePullPolicy: IfNotPresent
gitlabUrl: http://gitlab.corp.com
runnerRegistrationToken: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
runnerToken: ""
unregisterRunners: true
terminationGracePeriodSeconds: 3600
concurrent: 5
checkInterval: 10
rbac:
create: true
resources: ["pods", "pods/exec", "secrets"]
verbs: ["get", "list", "watch","update", "create", "delete"]
clusterWideAccess: false
metrics:
enabled: true
runners:
image: docker-registry.corp.com/base-images/docker-dind:v1
imagePullPolicy: "if-not-present"
requestConcurrency: 5
locked: true
tags: "k8s-vols"
privileged: true
secret: gitlab-runner-vols
namespace: gitlab-runner-k8s-vols
pollTimeout: 180
outputLimit: 4096
kubernetes:
volumes:
- type: host_path
volume:
name: docker
host_path: /var/run/docker.sock
mount_path: /var/run/docker.sock
read_only: false
cache: {}
builds: {}
services: {}
helpers:
cpuLimit: 200m
memoryLimit: 256Mi
cpuRequests: 100m
memoryRequests: 128Mi
image: docker-registry.corp.com/base-images/gitlab-runner-helper:x86_64-latest
env:
NAME: VALUE
CI_SERVER_URL: http://gitlab.corp.com
CLONE_URL:
RUNNER_REQUEST_CONCURRENCY: '1'
RUNNER_EXECUTOR: kubernetes
REGISTER_LOCKED: 'true'
RUNNER_TAG_LIST: k8s-vols
RUNNER_OUTPUT_LIMIT: '4096'
KUBERNETES_IMAGE: ubuntu:18.04
KUBERNETES_PRIVILEGED: 'true'
KUBERNETES_NAMESPACE: gitlab-runners-k8s-vols
KUBERNETES_POLL_TIMEOUT: '180'
KUBERNETES_CPU_LIMIT:
KUBERNETES_MEMORY_LIMIT:
KUBERNETES_CPU_REQUEST:
KUBERNETES_MEMORY_REQUEST:
KUBERNETES_SERVICE_ACCOUNT:
KUBERNETES_SERVICE_CPU_LIMIT:
KUBERNETES_SERVICE_MEMORY_LIMIT:
KUBERNETES_SERVICE_CPU_REQUEST:
KUBERNETES_SERVICE_MEMORY_REQUEST:
KUBERNETES_HELPER_CPU_LIMIT:
KUBERNETES_HELPER_MEMORY_LIMIT:
KUBERNETES_HELPER_CPU_REQUEST:
KUBERNETES_HELPER_MEMORY_REQUEST:
KUBERNETES_HELPER_IMAGE:
KUBERNETES_PULL_POLICY:
securityContext:
fsGroup: 65533
runAsUser: 100
resources: {}
affinity: {}
nodeSelector: {}
tolerations: []
envVars:
- name: CI_SERVER_URL
value: http://gitlab.corp.com
- name: CLONE_URL
- name: RUNNER_REQUEST_CONCURRENCY
value: '1'
- name: RUNNER_EXECUTOR
value: kubernetes
- name: REGISTER_LOCKED
value: 'true'
- name: RUNNER_TAG_LIST
value: k8s-vols
- name: RUNNER_OUTPUT_LIMIT
value: '4096'
- name: KUBERNETES_IMAGE
value: ubuntu:18.04
- name: KUBERNETES_PRIVILEGED
value: 'true'
- name: KUBERNETES_NAMESPACE
value: gitlab-runner-k8s-vols
- name: KUBERNETES_POLL_TIMEOUT
value: '180'
- name: KUBERNETES_CPU_LIMIT
- name: KUBERNETES_MEMORY_LIMIT
- name: KUBERNETES_CPU_REQUEST
- name: KUBERNETES_MEMORY_REQUEST
- name: KUBERNETES_SERVICE_ACCOUNT
- name: KUBERNETES_SERVICE_CPU_LIMIT
- name: KUBERNETES_SERVICE_MEMORY_LIMIT
- name: KUBERNETES_SERVICE_CPU_REQUEST
- name: KUBERNETES_SERVICE_MEMORY_REQUEST
- name: KUBERNETES_HELPER_CPU_LIMIT
- name: KUBERNETES_HELPER_MEMORY_LIMIT
- name: KUBERNETES_HELPER_CPU_REQUEST
- name: KUBERNETES_HELPER_MEMORY_REQUEST
- name: KUBERNETES_HELPER_IMAGE
- name: KUBERNETES_PULL_POLICY
hostAliases:
- ip: "10.10.x.x"
hostnames:
- "ch01"
podAnnotations:
prometheus.io/path: "/metrics"
prometheus.io/scrape: "true"
prometheus.io/port: "9252"
podLabels: {}
So, I have made a couple of tweaks to the helm chart. I have added a a volumes section in the config map…
config.toml: |
concurrent = {{ .Values.concurrent }}
check_interval = {{ .Values.checkInterval }}
log_level = {{ default “info” .Values.logLevel | quote }}
{{- if .Values.metrics.enabled }}
listen_address = ‘[::]:9252’
{{- end }}
volumes = ["/builds:/builds"]
#volumes = ["/var/run/docker.sock:/var/run/docker.sock", “/cache”, “/builds:/builds”]
I tried using the last line, which includes the docker sock mount, but when it ran, it complained that it could no find mount docker.sock, file not found, so I used the builds directory only in this section, and in the values files, added, the docker.sock mount. and it seems to work fine. for everything else but this mounting thing…
I also saw examples of setting the runner to privileged, but that didn’t seem to do much for me…
when I run the pipeline, this is the output…
So as you can see no files…
Thanks for taking the time to be thorough in your request, it really helps!
I have a selenium grid running under docker-compose on a Jenkins machine. My docker-compose includes a simple web server that serves up a single page application, and a test-runner container that orchestrates tests.
version: "3"
services:
hub:
image: selenium/hub
networks:
- selenium
privileged: true
restart: unless-stopped
container_name: hub
ports:
- "4444:4444"
environment:
- SE_OPTS=-browserTimeout 10 -timeout 20
chrome:
image: selenium/node-chrome-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5900:5900"
firefox:
image: selenium/node-firefox-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5901:5900"
runner:
build:
context: ./
dockerfile: ./python.dockerfile
security_opt:
- seccomp=unconfined
cap_add:
- SYS_PTRACE
command: sleep infinity
networks:
- selenium
volumes:
- ./:/app
depends_on:
- hub
- app
- chrome
- firefox
environment:
HUB_CONNECTION_STRING: http://hub:4444/wd/hub
TEST_DOMAIN: "app"
app:
image: nginx:alpine
networks:
- selenium
volumes:
- ../dist:/usr/share/nginx/html
ports:
- "8081:80"
networks:
selenium:
When my tests run (in the runner container above) I can load the home page as long as I use an ip address -
def test_home_page_loads(self):
host = socket.gethostbyname(self.test_domain) // this is the TEST_DOMAIN env var above
self.driver.get(f"http://{host}")
header = WebDriverWait(self.driver, 40).until(
EC.presence_of_element_located((By.ID, 'welcome-message')))
assert(self.driver.title == "My Page Title")
assert(header.text == "My Header")
But I can't use the host name app. The following times out -
def test_home_page_with_hostname(self):
self.driver.get("http://app/")
email = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.ID, 'email')))
The problem I'm facing is that I can't do all this using IP addresses because the web app is connecting to an external IP and I need to configure the API for CORS requests.
I'd assumed the problem was that the chrome container couldn't reach the app container - the issue was that the web server on the app container wasn't serving pages for the hostname I was using. Updating the Nginx conf to include the correct server has fixed the issue.
I can now add the hostname to the access-control-allow-origin settings on the api's that the webpage is using.
I'm attaching a basic working config here for anyone else looking to do something similar.
docker-compose.yml
version: "3"
services:
hub:
image: selenium/hub
networks:
- selenium
privileged: true
restart: unless-stopped
container_name: hub
ports:
- "4444:4444"
environment:
- SE_OPTS=-browserTimeout 10 -timeout 20
chrome:
image: selenium/node-chrome-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5900:5900"
firefox:
image: selenium/node-firefox-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5901:5900"
runner:
build:
context: ./
dockerfile: ./python.dockerfile
security_opt:
- seccomp=unconfined
cap_add:
- SYS_PTRACE
command: sleep infinity
networks:
- selenium
volumes:
- ./:/app
depends_on:
- hub
- webserver
- chrome
- firefox
environment:
HUB_CONNECTION_STRING: http://hub:4444/wd/hub
TEST_DOMAIN: "webserver"
webserver:
image: nginx:alpine
networks:
- selenium
volumes:
- ../dist:/usr/share/nginx/html
- ./nginx_conf:/etc/nginx/conf.d
ports:
- "8081:80"
networks:
selenium:
default.conf
server {
listen 80;
server_name webserver;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
The 'runner' container is based on the docker image from python:3 and includes pytest. A simple working test looks like -
test.py
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import pytest
import socket
#Fixture for Chrome
#pytest.fixture(scope="class")
def chrome_driver_init(request):
hub_connection_string = os.getenv('HUB_CONNECTION_STRING')
test_domain = os.getenv('TEST_DOMAIN')
chrome_driver = webdriver.Remote(
command_executor=hub_connection_string,
desired_capabilities={
'browserName': 'chrome',
'version': '',
"chrome.switches": ["disable-web-security"],
'platform': 'ANY'})
request.cls.driver = chrome_driver
request.cls.test_domain = test_domain
yield
chrome_driver.close()
#pytest.mark.usefixtures("chrome_driver_init")
class Basic_Chrome_Test:
driver = None
test_domain = None
pass
class Test_Atlas(Basic_Chrome_Test):
def test_home_page_loads(self):
self.driver.get(f"http://{self.test_domain}")
header = WebDriverWait(self.driver, 40).until(
EC.presence_of_element_located((By.ID, 'welcome-message')))
assert(self.driver.title == "My Page Title")
assert(header.text == "My Header")
This can be run with something like docker exec -it $(docker-compose ps -q runner) pytest test.py (exec into the runner container and run the tests using pytest).
This framework can then be added to a Jenkins step -
Jenkinsfile
stage('Run Functional Tests') {
steps {
echo 'Running Selenium Grid'
dir("${env.WORKSPACE}/functional_testing") {
sh "/usr/local/bin/docker-compose -f ${env.WORKSPACE}/functional_testing/docker-compose.yml -p ${currentBuild.displayName} run runner ./wait-for-webserver.sh pytest tests/atlas_test.py"
}
}
}
wait-for-webserver.sh
#!/bin/bash
# wait-for-webserver.sh
set -e
cmd="$#"
while ! curl -sSL "http://hub:4444/wd/hub/status" 2>&1 \
| jq -r '.value.ready' 2>&1 | grep "true" >/dev/null; do
echo 'Waiting for the Grid'
sleep 1
done
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' http://webserver)" != "200" ]]; do
echo 'Waiting for Webserver'
sleep 1;
done
>&2 echo "Grid & Webserver are ready - executing tests"
exec $cmd
Hope this is useful for someone.
So I'm trying to set up a gitlab-ce instance on docker swarm using traefik as reverse proxy.
This is my proxy stack;
version: '3'
services:
traefik:
image: traefik:alpine
command: --entryPoints="Name:http Address::80 Redirect.EntryPoint:https" --entryPoints="Name:https Address::443 TLS" --defaultentrypoints="http,https" --acme --acme.acmelogging="true" --acme.email="freelyformd#gmail.com" --acme.entrypoint="https" --acme.storage="acme.json" --acme.onhostrule="true" --docker --docker.swarmmode --docker.domain="mydomain.com" --docker.watch --web
ports:
- 80:80
- 443:443
- 8080:8080
networks:
- traefik-net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager
networks:
traefik-net:
external: true
And my gitlab stack
version: '3'
services:
omnibus:
image: 'gitlab/gitlab-ce:latest'
hostname: 'lab.mydomain.com'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'https://lab.mydomain.com'
nginx['listen_port'] = 80
nginx['listen_https'] = false
registry_external_url 'https://registry.mydomain.com'
registry_nginx['listen_port'] = 80
registry_nginx['listen_https'] = false
gitlab_rails['gitlab_shell_ssh_port'] = 2222
gitlab_rails['gitlab_email_from'] = 'lab#mydomain.com'
gitlab_rails['gitlab_email_reply_to'] = 'lab#mydomain.com'
ports:
- 2222:22
volumes:
- gitlab_config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
networks:
- traefik-net
deploy:
labels:
traefik.enable: "port"
traefik.frontend.rule: 'Host: lab.mydomain.com, Host: registry.mydomain.com'
traefik.port: 80
placement:
constraints:
- node.role == manager
runner:
image: 'gitlab/gitlab-runner:v1.11.4'
volumes:
- gitlab_runner_config:/etc/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock
volumes:
gitlab_config:
gitlab_logs:
gitlab_data:
gitlab_runner_config:
networks:
traefik-net:
external: true
traefik-net is an overlay network
So when I deploy using docker stack deploy and visit lab.mydomain.com, i get the Gateway Timeout error. When I execute curl localhost within the gitlab container, it seems to work fine. Not sure what the problem is, any pointers would be appreciated
Turns out all I had to do was set the traefik label, traefik.docker.network to traefik-net, see https://github.com/containous/traefik/issues/1254