I have 3 projects in Gitlab.
Frontend
Backend
Deployment
Each project has separate pipeline definition which has CI pipeline and using Multi-project pipeline concept, it will invoke the deployment project pipeline for deploying each module.
Frontend
image: frontend:runner1.1
# Cache modules in between jobs
cache:
key: ${CI_COMMIT_REF_SLUG}
paths:
- .npm/
variables:
GIT_SUBMODULE_STRATEGY: recursive
JAVA_OPTS: "Dlog4j.formatMsgNoLookups=true"
LOG4J_FORMAT_MSG_NO_LOOKUPS: "true"
stages:
- VersionCheck
- Static Analysis
- Test
- SonarQube
- Tag
- Version
- Build
- Deploy
.build:
stage: Build
image: google/cloud-sdk
services:
- docker:dind
before_script:
- mkdir -p $HOME/.docker && echo $DOCKER_AUTH_CONFIG > $HOME/.docker/config.json
script:
- echo $CI_REGISTRY_USER
- echo $CI_REGISTRY
- echo ${IMAGE_TAG}
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
- docker build -t $IMAGE_TAG . --build-arg REACT_APP_ENV=${CI_COMMIT_BRANCH} --build-arg REACT_APP_BACKEND_API=${REACT_APP_BACKEND_API} --build-arg REACT_APP_GOOGLE_CLIENT_ID=${REACT_APP_GOOGLE_CLIENT_ID}
- docker push $IMAGE_TAG
VersionCheck:
stage: VersionCheck
allow_failure: false
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == "sandbox"'
before_script:
- git fetch origin $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:$CI_MERGE_REQUEST_TARGET_BRANCH_NAME
- git fetch origin $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:$CI_MERGE_REQUEST_SOURCE_BRANCH_NAME
script:
- deployed_version=`git show $CI_MERGE_REQUEST_TARGET_BRANCH_NAME:package.json | sed -nE 's/^\\s*\"version\"\:\ \"(.*?)\",$/\\1/p'`
- new_version=`git show $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:package.json | sed -nE 's/^\\s*\"version\"\:\ \"(.*?)\",$/\\1/p'`
- >
echo "sandbox version: $deployed_version"
echo "feature version: $new_version"
if [ "$(printf '%s\n' "$deployed_version" "$new_version" | sort -V | head -n1)" = "$deployed_version" ]; then
echo "version is incremented"
else
echo "Version need to be incremented on the feature branch. See the README.md file"
exit 1
fi
eslint:
stage: Static Analysis
allow_failure: false
before_script:
- npm ci --cache .npm --prefer-offline
script:
- echo "Start building App"
- npm install
- npm run eslint-report
- echo "Build successfully!"
artifacts:
reports:
junit: coverage/eslint-report.xml
paths:
- coverage/eslint-report.json
test:
stage: Test
allow_failure: false
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: always
- when: always
before_script:
- npm ci --cache .npm --prefer-offline
script:
- echo "Testing App"
- npm install
- npm run generate-tests-report
- echo "Test successfully!"
artifacts:
reports:
junit: junit.xml
paths:
- test-report.xml
- coverage/lcov.info
sonarqube-check:
stage: SonarQube
allow_failure: true
image:
name: sonarsource/sonar-scanner-cli:4.6
entrypoint: [""]
variables:
SONAR_USER_HOME: "${CI_PROJECT_DIR}/.sonar" # Defines the location of the analysis task cache
GIT_DEPTH: "0" # Tells git to fetch all the branches of the project, required by the analysis task
cache:
key: "${CI_JOB_NAME}"
paths:
- .sonar/cache
script:
# wait for the quality results, true/false
- sonar-scanner -X -Dsonar.qualitygate.wait=false -Dsonar.branch.name=$CI_COMMIT_BRANCH -Dsonar.login=$SONAR_TOKEN -Dsonar.projectVersion=$(npm run print-version --silent)
only:
- merge_requests
- release
- master
- develop
- sandbox
Version:
stage: Version
allow_failure: false
only:
- sandbox
- develop
- release
- master
script:
- VERSION=`sed -nE 's/^\\s*\"version\"\:\ \"(.*?)\",$/\\1/p' package.json`
- echo "VERSION=$CI_COMMIT_REF_SLUG$VERSION" >> build.env
artifacts:
reports:
dotenv: build.env
build:sb:
stage: Build
allow_failure: false
environment:
name: sb
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$VERSION
TF_ENV: "sb"
extends:
- .build
only:
- sandbox
dependencies:
- Version
build:dev:
stage: Build
allow_failure: false
environment:
name: dev
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$VERSION
TF_ENV: "dev"
extends:
- .build
only:
- develop
dependencies:
- Version
build:qa:
stage: Build
allow_failure: false
environment:
name: qa
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$VERSION
TF_ENV: "qa"
extends:
- .build
only:
- release
dependencies:
- Version
build:prod:
stage: Build
allow_failure: false
environment:
name: prod
variables:
IMAGE_TAG: $CI_REGISTRY_IMAGE:$VERSION
TF_ENV: "prod"
extends:
- .build
only:
- master
dependencies:
- Version
deployment:sandbox:
rules:
- if: "$CI_COMMIT_BRANCH =~ /^feature/"
when: never
- if: $CI_COMMIT_BRANCH == "sandbox"
variables:
TF_ENV: "sb"
MODULE: "frontend"
VERSION: $VERSION
stage: Deploy
allow_failure: false
trigger:
project: in-silico-prediction/isp/isp-deployment
strategy: depend
needs:
- job: Version
artifacts: true
- job: build:sb
artifacts: false
deployment:dev:
rules:
- if: "$CI_COMMIT_BRANCH =~ /^feature/"
when: never
- if: $CI_COMMIT_BRANCH == "develop"
variables:
TF_ENV: "dev"
MODULE: "frontend"
VERSION: $VERSION
stage: Deploy
allow_failure: false
trigger:
project: deployment
strategy: depend
needs:
- job: Version
artifacts: true
- job: build:dev
artifacts: false
deployment:qa:
rules:
- if: "$CI_COMMIT_BRANCH =~ /^feature/"
when: never
- if: $CI_COMMIT_BRANCH == "release"
variables:
TF_ENV: "qa"
MODULE: "frontend"
VERSION: $VERSION
stage: Deploy
allow_failure: false
trigger:
project: deployment
strategy: depend
needs:
- job: Version
artifacts: true
- job: build:qa
artifacts: false
deployment:prod:
rules:
- if: "$CI_COMMIT_BRANCH =~ /^feature/"
when: never
- if: $CI_COMMIT_BRANCH == "master"
variables:
TF_ENV: "prod"
MODULE: "frontend"
VERSION: $VERSION
stage: Deploy
allow_failure: false
trigger:
project: deployment
strategy: depend
needs:
- job: Version
artifacts: true
- job: build:prod
artifacts: false
The deployment stage will invoke downstream project. The backend project also have same pipeline definition. Now both Frontend and Backend project will trigger the deployment project independently.
The deployment project should wait for the trigger from both project and run only 1 time which deploys both frontend and backend in single run into the environment.
For Merge train, is it possible to configure 2 different project merge request
As long as those MRs are from the same project, yes, the all idea of merge train is to, as in this example, list multiple MRs and combine them.
However, that would trigger the FE (FrontEnd) deployment, then it would trigger FE and BE deployments, which is not what you want.
I would rather use a scheduled cron job which detects when both FE and BE have been deployed, for instance by query the the date of their respective published images (latest FE and BE in the registry): if that date is more recent than the latest completed deployment, both for FE and BE build, then the deployment cron job would trigger an actual and full deployment.
Related
I have this gitlab-ci.yml:
build-docker:
stage: build-docker
rules:
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
changes:
- app/Dockerfile
- app/requirements.txt
when: always
- when: manual
allow_failure: true
image:
name: alpine
entrypoint: [""]
script:
- echo 'Git Pulling, building and restarting'
deploy:
stage: deploy
rules:
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
changes:
- app/**/*
when: always
- when: manual
allow_failure: true
image:
name: alpine
entrypoint: [""]
script:
- echo 'Git Pulling and restarting'
My problem is that I doesn't need to run deploy if the changed files are only app/Dockerfile and/or app/requirements.txt (because the build job already ran and does the same as the deploy stage, and more), but I need it to run if changes happen on any other file inside app folder.
I already tried this in the deploy stage:
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH'
changes:
- "app/!(Dockerfile)"
- "app/!(requirements.txt)"
- app/**/*
when: always
- when: manual
allow_failure: true
But this doesn't work as expected.
I have a variable stored into env file :
stages:
- build
- execute
build:
stage: build
script:
- Add-Content properties.env -Value PROD="TRUE"
artifacts:
reports:
dotenv: properties.env
tags:
- windows
Now i wanted to read this value in a stage and then decide whether to execute that stage or not.
I did the following, but it isn't working:
execute:
stage: execute
rules:
- if: $PROD == "TRUE"
when: always
script:
- echo "happy"
tags:
- windows
dependencies:
- build
Any help ?
Thanks.
You can try and use the approach described in "Pass an environment variable to another job":
build:
stage: build
script:
- echo "BUILD_VERSION=hello" >> build.env
artifacts:
reports:
dotenv: build.env
deploy_one:
stage: deploy
script:
- echo "$BUILD_VERSION" # Output is: 'hello'
dependencies:
- build
environment:
name: customer1
deployment_tier: production
Currently I have this script in my .gitlab-ci.yml file:
image: node:16
cache:
paths:
- .npm
- cache/Cypress
- node_modules
stages:
- build
- deploy
- test
install:dependencies:
stage: build
script:
- yarn install
artifacts:
paths:
- node_modules/
only:
- merge_requests
test:unit:
stage: test
script: yarn test --ci --coverage
needs: ["install:dependencies"]
artifacts:
when: always
paths:
- coverage
expire_in: 30 days
only:
- merge_requests
deploy-to-vercel:
stage: deploy
image: node:16
script:
- npm i -g vercel
- DEPLOYMENT_URL=$(vercel -t $VERCEL_TOKEN --confirm)
- echo $DEPLOYMENT_URL > vercel_deployment_url.txt
- cat vercel_deployment_url.txt
artifacts:
when: on_success
paths:
- vercel_deployment_url.txt
only:
- merge_requests
I need to trigger a pipeline to an environment called playground but only when a pipeline from test enviroment is finished, when a pipeline to master happens, I don't to mirror to the playground environment.
Everything is deployed to vercel, and the project is powered by Next JS.
I need to set up a demo server, which is a copy of the production server, but pointed at a different API. I want to run 2 separate build/deploy whenever the main branch is updated to accomplish this, as I need to run the demo build (Vue) to use different env variables pointing at the demo API (which will also need a dual deploy). Is this possible, and how would I go about it? Here's the existing:
stages:
- build
- deploy
- test
include:
- template: Security/SAST.gitlab-ci.yml
- template: Security/Secret-Detection.gitlab-ci.yml
build-main:
image: node:12
stage: build
only:
- main
script:
- yarn global add #quasar/cli
- rm package-lock.json
- yarn
- npm run build:prod
artifacts:
expire_in: 1 hour
paths:
- dist
deploy-main:
stage: deploy
only:
- main
script:
- echo $CI_PROJECT_DIR
- whoami
- sudo rsync -rav --exclude '.git' $CI_PROJECT_DIR/dist/spa/. /var/www/console
tags:
- deploy
build-beta:
image: node:12
stage: build
only:
- beta
script:
- yarn global add #quasar/cli
- rm package-lock.json
- yarn
- npm run build:beta
artifacts:
expire_in: 1 hour
paths:
- dist
deploy-beta:
stage: deploy
only:
- beta
script:
- echo $CI_PROJECT_DIR
- whoami
# - sudo /usr/local/bin/rsync -rav --exclude '.git' $CI_PROJECT_DIR/dist/spa/. /var/www/console.beta
- sudo rsync -rav --exclude '.git' $CI_PROJECT_DIR/dist/spa/. /var/www/console.beta
tags:
- deploy
build-dev:
image: node:12
stage: build
only:
- dev
script:
- yarn global add #quasar/cli
- rm package-lock.json
- yarn
- npm run build:dev
artifacts:
expire_in: 1 hour
paths:
- dist
deploy-dev:
stage: deploy
only:
- dev
script:
- echo $CI_PROJECT_DIR
- whoami
- sudo rsync -rav --exclude '.git' $CI_PROJECT_DIR/dist/spa/. /var/www/console.dev
tags:
- deploy
sast:
stage: test
artifacts:
reports:
sast: gl-sast-report.json
paths:
- 'gl-sast-report.json'
You can do something like the following ,that will create 2 build jobs and 2 deploy jobs, that are linked together using needs:
stages:
- build
- deploy
build-main:
stage: build
script: echo
only:
- main
artifacts:
expire_in: 1 hour
paths:
- dist
deploy-main:
stage: deploy
script: echo
only:
- main
needs:
- job: build-main
artifacts: true
build-demo:
stage: build
script: echo
only:
- main
artifacts:
expire_in: 1 hour
paths:
- dist
deploy-demo:
stage: deploy
script: echo
only:
- main
needs:
- job: build-demo
artifacts: true
you might also want to extract common jobs to hidden jobs to simplify your pipeline, for instance:
stages:
- build
- deploy
.build:
stage: build
artifacts:
expire_in: 1 hour
paths:
- dist
build-main:
extends: .build
only: main
# other specific codes
Also you might want to improve readability and management of workflow rules like the following, which will centralize rules logics:
workflow:
rules:
- if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH
variables:
DEPLOY_PROD: "true"
DEPOLOY_DEMO: "true"
# Some other conditional variables
- if: $CI_COMMIT_REF_NAME == "dev"
variables:
DEPLOY_DEV: "true"
- when: always
build-main:
rules:
- if: $DEPLOY_PROD
# other jobs
I have four build stages, which are operated manually. What I'd like is to execute the deploy stage, after one build stage is finished. Right now my implementation only works when all four build stages are finished.
Use case: Dev klicks on the environment he wants to build. After build is done, it is deployed to the systems. Deploy should start automatically after build is finished
Q: Is there a way to execute the deploy stage after only one build is done?
My implementation of the build pipelines (simplified):
production:
stage: env
script:
- echo build one
when: manual
allow_failure: false
production2:
stage: env
script:
- echo build two
when: manual
allow_failure: false
staging:
stage: env
script:
- echo build three
when: manual
allow_failure: false
staging2:
stage: env
script:
- echo build four
when: manual
allow_failure: false
This is my deploy stage
build:
stage: build
needs: [production, production2, staging, staging2]
when: on_success
script:
- echo do deploy stuff
Many thanks and I wish you a nice day
Maybe something like this help?
build:
stage: build
needs:
- job: production
optional: true
- job: production2
optional: true
- job: staging
optional: true
- job: staging2
optional: true
when: on_success
script:
- echo do deploy stuff
So far I've only found a solution that works.
stages:
- env
- connections
- build
production:
stage: env
script:
- echo build one
when: manual
allow_failure: false
production2:
stage: env
script:
- echo build two
when: manual
allow_failure: false
staging:
stage: env
script:
- echo build three
when: manual
allow_failure: false
staging2:
stage: env
script:
- echo build four
when: manual
allow_failure: false
# Connections
run:build:production:
extends: .build
stage: connections
needs:
- job: production
run:build:production2:
extends: .build
stage: connections
needs:
- job: production2
run:build:staging:
extends: .build
stage: connections
needs:
- job: staging
run:build:staging2:
extends: .build
stage: connections
needs:
- job: staging2
.build:
stage: build
script:
- echo do deploy stuff