AWS cli working but boto3 not finding profile - python-3.x

I am running a python script to connect to AWS SSM.
My docker-compose has this volume set up:
- ~/.aws/:/home/airflow/.aws
Boto3 Code:
LOCALHOST = 1
SERVICE = 'ssm'
PROFILE = 'profile3'
#File path
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
def get_aws_client(localhost=None):
"""
Creates boto3 aws client for any service.
:param localhost: Parameter that enables use of roles in localhost.
:return: aws client object
"""
if localhost is not None:
globals().update(LOCALHOST=localhost)
boto_object = Boto3AwsClient(localhost=LOCALHOST, profile=PROFILE)
aws_client = boto_object.aws_client_connect(service=SERVICE)
return aws_client
It returns:
botocore.exceptions.ProfileNotFound: The config profile (profile3) could not be found
If I run:
docker exec -it webserver bash
And print
cat /home/airflow/.aws/credentials
cat /home/airflow/.aws/config
I see for credentials:
[default]
aws_access_key_id = XXXXXXXXXXXXXX
aws_secret_access_key = XXXXXXXXXXXXXXXXxxxxxxxxxxxxxXXXXXXXXXXX
For config:
[default]
region=eu-west-1
output=json
[profile profile3]
region=eu-west-1
role_arn=arn:aws:iam::333333333333:role/AllowBlablahblah
source_profile=default
[profile profile2]
region=eu-west-1
role_arn=arn:aws:iam::22222222222:role/AllowBliblihblih
source_profile=default
[profile profile1]
region=eu-west-1
role_arn=arn:aws:iam::1111111111111:role/AllowBlubluhbluh
source_profile=default
And event I can run without problem:
aws s3 ls
aws s3 ls --profile profile3
So I guess config and credentials are not really missing, and no format issue as aws cli is working.
I don't know what's going on here. Any idea?
Dockerfile:
FROM apache/airflow:2.1.2-python3.8
ARG AIRFLOW_USER_HOME=/opt/airflow
ENV PYTHONPATH "${PYTHONPATH}:/"
ADD ./environtment_config/docker_src ./environtment_config/docker_src
RUN pip install -r environtment_config/docker_src/requirements.pip
Full docker-compose:
version: '3'
services:
webserver:
image: own-airflow2
command: webserver
ports:
- 8080:8080
healthcheck:
test: [ "CMD", "curl", "--fail", "http://localhost:8080/health" ]
interval: 10s
timeout: 10s
retries: 5
restart: always
build:
context: .
dockerfile: Dockerfile3
env_file:
- ./airflow.env
container_name: webserver
volumes:
- ./database_utils:/database_utils
- ./maintenance:/maintenance
- ./utils:/utils
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./airflow_sqlite:/opt/airflow
- ~/.aws/:/home/airflow/.aws
scheduler:
image: own-airflow2
command: scheduler
healthcheck:
test: [ "CMD-SHELL", 'airflow jobs check --job-type SchedulerJob --hostname "$${HOSTNAME}"' ]
interval: 10s
timeout: 10s
retries: 5
restart: always
container_name: scheduler
build:
context: .
dockerfile: Dockerfile3
env_file:
- ./airflow.env
volumes:
- ./database_utils:/database_utils
- ./maintenance:/maintenance
- ./utils:/utils
- ./dags:/opt/airflow/dags
- ./logs:/opt/airflow/logs
- ./airflow_sqlite:/opt/airflow
- ~/.aws/:/home/airflow/.aws
depends_on:
- webserver
EDIT:
I forgot to say that I added env vars such as:
#Boto3
AWS_CONFIG_FILE=/home/airflow/.aws/config
AWS_SHARED_CREDENTIALS_FILE=/home/airflow/.aws/credentials
To specify clearly which one is the correct path of the file.

Related

Grafana Loki does not trigger or push alert on alertmanager

I have configured PLG (Promtail, Grafana & Loki) on an AWS EC2 instance for log management. The Loki uses BoltDB shipper & AWS store.
Grafana - 7.4.5,
Loki - 2.2,
Prommtail - 2.2,
AlertManager - 0.21
The issue I am facing is that the Loki does not trigger or push alerts on alertmanager. I cannot see any alert on the AlertManager dashboard though I can run a LogQL query on Grafana which shows the condition was met for triggering an alert.
The following is a screenshot of my query on Grafana.
LogQL Query Screenshot
The following are my configs.
Docker Compose
$ cat docker-compose.yml
version: "3.4"
services:
alertmanager:
image: prom/alertmanager:v0.21.0
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
volumes:
- ./config/alertmanager/alertmanager.yml:/etc/alertmanager/config.yml
ports:
- 9093:9093
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
loki:
image: grafana/loki:2.2.0-amd64
container_name: loki
volumes:
- ./config/loki/loki.yml:/etc/config/loki.yml:ro
- ./config/loki/rules/rules.yml:/etc/loki/rules/rules.yml
entrypoint:
- /usr/bin/loki
- -config.file=/etc/config/loki.yml
ports:
- "3100:3100"
depends_on:
- alertmanager
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
grafana:
image: grafana/grafana:7.4.5
container_name: grafana
volumes:
- ./config/grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml
- ./config/grafana/defaults.ini:/usr/share/grafana/conf/defaults.ini
- grafana:/var/lib/grafana
ports:
- "3000:3000"
depends_on:
- loki
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
promtail:
image: grafana/promtail:2.2.0-amd64
container_name: promtail
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers
- /var/log:/var/log
- ./config/promtail/promtail.yml:/etc/promtail/promtail.yml:ro
command: -config.file=/etc/promtail/promtail.yml
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
nginx:
image: nginx:latest
container_name: nginx
volumes:
- ./config/nginx/nginx.conf:/etc/nginx/nginx.conf
- ./config/nginx/default.conf:/etc/nginx/conf.d/default.conf
- ./config/nginx/loki.conf:/etc/nginx/conf.d/loki.conf
- ./config/nginx/ssl:/etc/ssl
ports:
- "80:80"
- "443:443"
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
loki-url: http://localhost:3100/loki/api/v1/push
loki-external-labels: job=containerlogs
tag: "{{.Name}}"
depends_on:
- grafana
networks:
- loki-br
networks:
loki-br:
driver: bridge
ipam:
config:
- subnet: 192.168.0.0/24
volumes:
grafana: {}
Loki Config
$ cat config/loki/loki.yml
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-11-20
store: boltdb-shipper
#object_store: filesystem
object_store: s3 # Config for AWS S3 storage.
schema: v11
index:
prefix: index_loki_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /tmp/loki/boltdb-shipper-active
cache_location: /tmp/loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: s3 # Config for AWS S3 storage.
#filesystem:
# directory: /tmp/loki/chunks
# Config for AWS S3 storage.
aws:
s3: s3://eu-west-1/loki #Uses AWS IAM roles on AWS EC2 instance.
region: eu-west-1
compactor:
working_directory: /tmp/loki/boltdb-shipper-compactor
shared_store: aws
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: true
retention_period: 720h
ruler:
storage:
type: local
local:
directory: /etc/loki/rules
rule_path: /tmp/loki/rules-temp
evaluation_interval: 1m
alertmanager_url: http://alertmanager:9093
ring:
kvstore:
store: inmemory
enable_api: true
enable_alertmanager_v2: true
Loki Rules
$ cat config/loki/rules/rules.yml
groups:
- name: rate-alerting
rules:
- alert: HighLogRate
expr: |
sum by (job, compose_service)
(rate({job="containerlogs"}[1m]))
> 60
for: 1m
labels:
severity: warning
team: devops
category: logs
annotations:
title: "High LogRate Alert"
description: "something is logging a lot"
impact: "impact"
action: "action"
dashboard: "https://grafana.com/service-dashboard"
runbook: "https://wiki.com"
logurl: "https://grafana.com/log-explorer"
AlertManager config
$ cat config/alertmanager/alertmanager.yml
global:
resolve_timeout: 5m
route:
group_by: ['alertname', 'severity', 'instance']
group_wait: 45s
group_interval: 10m
repeat_interval: 12h
receiver: 'email-notifications'
receivers:
- name: email-notifications
email_configs:
- to: me#example.com
from: 'alerts#example.com'
smarthost: smtp.gmail.com:587
auth_username: alerts#example.com
auth_identity: alerts#example.com
auth_password: PassW0rD
send_resolved: true
Let me know if I am missing something. I followed Ruan Bekker's blog to set things up
If Loki is running in single tenant mode, the required ID is fake (yes we know this might seem alarming but it’s totally fine, no it can’t be changed).
mkdir /etc/loki/rules/fake
mkdir /tmp/loki/rules-temp/fake
copy your rule files into /etc/loki/rules/fake
So you have to add a fake sub-directory to the rule directory in single tenant mode and everthing worked perfectly.
https://grafana.com/docs/loki/latest/alerting/#interacting-with-the-ruler

elasticsearch check indices exist returns true on first run

I have a docker-compose running 2 containers, each with its own service, node and elasticsearch.
app.js
...
const isElasticReady = await elastic.checkConnection();
if (isElasticReady) {
const elasticIndex = await elastic.esclient.indices.exists({index:elastic.index});
if (!elasticIndex.body) {
await elastic.createIndex(elastic.index);
await elastic.setMapping();
await data.populateDatabase();
}
}
...
Whenever I run docker-compose up, esclient.indices.exists always returns false, even though the index already exists. As a result I always get thrown a resource_already_exists_exception.
The strange thing is that I am using nodemon for development, and whenever I make changes while development esclient.indices.exists will return true. So the problem only happens when I run docker-compose up. I suspect something is happening asynchronously, but I am not sure what.
*docker-compose.yml - depends_on has been set.
version: '3.6'
services:
api:
image: nodeservice/node:10.15.3-alpine
container_name: nodeservice
build: .
ports:
- 3000:3000
environment:
- NODE_ENV=local
- ES_HOST=elasticsearch
- NODE_PORT=3000
- ELASTIC_URL=http://elasticsearch:9200
volumes:
- .:/usr/src/app
- /usr/src/app/node_modules
command: npm run dev
links:
- elasticsearch
depends_on:
- elasticsearch
networks:
- esnet
elasticsearch:
container_name:my_elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.0.1
volumes:
- esdata:/usr/share/elasticsearch/data
environment:
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- discovery.type=single-node
logging:
driver: none
ports:
- 9300:9300
- 9200:9200
networks:
- esnet
volumes:
esdata:
networks:
esnet:
Any hints?

Fix DNS on a docker-compose selenium grid so the selenium node connects to a docker-compose hostname

I have a selenium grid running under docker-compose on a Jenkins machine. My docker-compose includes a simple web server that serves up a single page application, and a test-runner container that orchestrates tests.
version: "3"
services:
hub:
image: selenium/hub
networks:
- selenium
privileged: true
restart: unless-stopped
container_name: hub
ports:
- "4444:4444"
environment:
- SE_OPTS=-browserTimeout 10 -timeout 20
chrome:
image: selenium/node-chrome-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5900:5900"
firefox:
image: selenium/node-firefox-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5901:5900"
runner:
build:
context: ./
dockerfile: ./python.dockerfile
security_opt:
- seccomp=unconfined
cap_add:
- SYS_PTRACE
command: sleep infinity
networks:
- selenium
volumes:
- ./:/app
depends_on:
- hub
- app
- chrome
- firefox
environment:
HUB_CONNECTION_STRING: http://hub:4444/wd/hub
TEST_DOMAIN: "app"
app:
image: nginx:alpine
networks:
- selenium
volumes:
- ../dist:/usr/share/nginx/html
ports:
- "8081:80"
networks:
selenium:
When my tests run (in the runner container above) I can load the home page as long as I use an ip address -
def test_home_page_loads(self):
host = socket.gethostbyname(self.test_domain) // this is the TEST_DOMAIN env var above
self.driver.get(f"http://{host}")
header = WebDriverWait(self.driver, 40).until(
EC.presence_of_element_located((By.ID, 'welcome-message')))
assert(self.driver.title == "My Page Title")
assert(header.text == "My Header")
But I can't use the host name app. The following times out -
def test_home_page_with_hostname(self):
self.driver.get("http://app/")
email = WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located((By.ID, 'email')))
The problem I'm facing is that I can't do all this using IP addresses because the web app is connecting to an external IP and I need to configure the API for CORS requests.
I'd assumed the problem was that the chrome container couldn't reach the app container - the issue was that the web server on the app container wasn't serving pages for the hostname I was using. Updating the Nginx conf to include the correct server has fixed the issue.
I can now add the hostname to the access-control-allow-origin settings on the api's that the webpage is using.
I'm attaching a basic working config here for anyone else looking to do something similar.
docker-compose.yml
version: "3"
services:
hub:
image: selenium/hub
networks:
- selenium
privileged: true
restart: unless-stopped
container_name: hub
ports:
- "4444:4444"
environment:
- SE_OPTS=-browserTimeout 10 -timeout 20
chrome:
image: selenium/node-chrome-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5900:5900"
firefox:
image: selenium/node-firefox-debug
networks:
- selenium
privileged: true
restart: unless-stopped
volumes:
- /dev/shm:/dev/shm
depends_on:
- hub
environment:
- HUB_HOST=hub
- HUB_PORT=4444
- SE_OPTS=-browserTimeout 10 -timeout 20
ports:
- "5901:5900"
runner:
build:
context: ./
dockerfile: ./python.dockerfile
security_opt:
- seccomp=unconfined
cap_add:
- SYS_PTRACE
command: sleep infinity
networks:
- selenium
volumes:
- ./:/app
depends_on:
- hub
- webserver
- chrome
- firefox
environment:
HUB_CONNECTION_STRING: http://hub:4444/wd/hub
TEST_DOMAIN: "webserver"
webserver:
image: nginx:alpine
networks:
- selenium
volumes:
- ../dist:/usr/share/nginx/html
- ./nginx_conf:/etc/nginx/conf.d
ports:
- "8081:80"
networks:
selenium:
default.conf
server {
listen 80;
server_name webserver;
location / {
root /usr/share/nginx/html;
index index.html index.htm;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
The 'runner' container is based on the docker image from python:3 and includes pytest. A simple working test looks like -
test.py
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import os
import pytest
import socket
#Fixture for Chrome
#pytest.fixture(scope="class")
def chrome_driver_init(request):
hub_connection_string = os.getenv('HUB_CONNECTION_STRING')
test_domain = os.getenv('TEST_DOMAIN')
chrome_driver = webdriver.Remote(
command_executor=hub_connection_string,
desired_capabilities={
'browserName': 'chrome',
'version': '',
"chrome.switches": ["disable-web-security"],
'platform': 'ANY'})
request.cls.driver = chrome_driver
request.cls.test_domain = test_domain
yield
chrome_driver.close()
#pytest.mark.usefixtures("chrome_driver_init")
class Basic_Chrome_Test:
driver = None
test_domain = None
pass
class Test_Atlas(Basic_Chrome_Test):
def test_home_page_loads(self):
self.driver.get(f"http://{self.test_domain}")
header = WebDriverWait(self.driver, 40).until(
EC.presence_of_element_located((By.ID, 'welcome-message')))
assert(self.driver.title == "My Page Title")
assert(header.text == "My Header")
This can be run with something like docker exec -it $(docker-compose ps -q runner) pytest test.py (exec into the runner container and run the tests using pytest).
This framework can then be added to a Jenkins step -
Jenkinsfile
stage('Run Functional Tests') {
steps {
echo 'Running Selenium Grid'
dir("${env.WORKSPACE}/functional_testing") {
sh "/usr/local/bin/docker-compose -f ${env.WORKSPACE}/functional_testing/docker-compose.yml -p ${currentBuild.displayName} run runner ./wait-for-webserver.sh pytest tests/atlas_test.py"
}
}
}
wait-for-webserver.sh
#!/bin/bash
# wait-for-webserver.sh
set -e
cmd="$#"
while ! curl -sSL "http://hub:4444/wd/hub/status" 2>&1 \
| jq -r '.value.ready' 2>&1 | grep "true" >/dev/null; do
echo 'Waiting for the Grid'
sleep 1
done
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' http://webserver)" != "200" ]]; do
echo 'Waiting for Webserver'
sleep 1;
done
>&2 echo "Grid & Webserver are ready - executing tests"
exec $cmd
Hope this is useful for someone.

how to create keyspace for cassandra using docker compose v3

I am trying to create keyspace using docker-compose v3, but it is not working out, my docker-compose.yaml looks like following :
version: '3'
services:
cassandra:
image: cassandra:latest
networks:
- default
ports:
- "9042:9042"
volumes:
- ../compi${COMPI}/data/cassandra:/var/lib/cassandra
- ../../sql:/compi/sql
- ../compi${COMPI}/docker-entrypoint-initdb.d:/compi/docker-entrypoint-initdb.d:ro
healthcheck:
test: ["CMD-SHELL", "[ $$(nodetool statusgossip) = running ]"]
interval: 30s
timeout: 10s
retries: 5
compi:
environment:
- DOCKER=true
depends_on:
- cassandra
links:
- cassandra
build:
context: ../..
dockerfile: ./docker.local/compi/Dockerfile
volumes:
- ../config:/compi/config
- ../compi${COMPI}/log:/compi/log
- ../compi${COMPI}/data:/compi/data
ports:
- "717${compi}:717${compi}"
volumes:
data:
config:
my docker-entrypoint-initdb.d/init.cql looks like following:
CREATE KEYSPACE IF NOT EXISTS sample WITH REPLICATION = {
'class''SimpleStrategy', 'replication_factor' : 1 } AND DURABLE_WRITES
= true;

Gitlab 'Gateway Timeout' behind traefik proxy

So I'm trying to set up a gitlab-ce instance on docker swarm using traefik as reverse proxy.
This is my proxy stack;
version: '3'
services:
traefik:
image: traefik:alpine
command: --entryPoints="Name:http Address::80 Redirect.EntryPoint:https" --entryPoints="Name:https Address::443 TLS" --defaultentrypoints="http,https" --acme --acme.acmelogging="true" --acme.email="freelyformd#gmail.com" --acme.entrypoint="https" --acme.storage="acme.json" --acme.onhostrule="true" --docker --docker.swarmmode --docker.domain="mydomain.com" --docker.watch --web
ports:
- 80:80
- 443:443
- 8080:8080
networks:
- traefik-net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager
networks:
traefik-net:
external: true
And my gitlab stack
version: '3'
services:
omnibus:
image: 'gitlab/gitlab-ce:latest'
hostname: 'lab.mydomain.com'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'https://lab.mydomain.com'
nginx['listen_port'] = 80
nginx['listen_https'] = false
registry_external_url 'https://registry.mydomain.com'
registry_nginx['listen_port'] = 80
registry_nginx['listen_https'] = false
gitlab_rails['gitlab_shell_ssh_port'] = 2222
gitlab_rails['gitlab_email_from'] = 'lab#mydomain.com'
gitlab_rails['gitlab_email_reply_to'] = 'lab#mydomain.com'
ports:
- 2222:22
volumes:
- gitlab_config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
networks:
- traefik-net
deploy:
labels:
traefik.enable: "port"
traefik.frontend.rule: 'Host: lab.mydomain.com, Host: registry.mydomain.com'
traefik.port: 80
placement:
constraints:
- node.role == manager
runner:
image: 'gitlab/gitlab-runner:v1.11.4'
volumes:
- gitlab_runner_config:/etc/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock
volumes:
gitlab_config:
gitlab_logs:
gitlab_data:
gitlab_runner_config:
networks:
traefik-net:
external: true
traefik-net is an overlay network
So when I deploy using docker stack deploy and visit lab.mydomain.com, i get the Gateway Timeout error. When I execute curl localhost within the gitlab container, it seems to work fine. Not sure what the problem is, any pointers would be appreciated
Turns out all I had to do was set the traefik label, traefik.docker.network to traefik-net, see https://github.com/containous/traefik/issues/1254

Resources