could not translate host name to address (Data lineage- tokern) - python-3.x

version: '3.6'
services:
tokern-demo-catalog:
image: tokern/demo-catalog:latest
container_name: tokern-demo-catalog
restart: unless-stopped
networks:
- tokern-internal
volumes:
- tokern_demo_catalog_data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: xxx
POSTGRES_USER: xxx
POSTGRES_DB: table1
tokern-api:
image: tokern/data-lineage:latest
container_name: tokern-data-lineage
restart: unless-stopped
networks:
- tokern-internal
environment:
CATALOG_PASSWORD: xxx
CATALOG_USER: xxx
CATALOG_DB: table1
CATALOG_HOST: "xxxxxxxx.amazon.com"
GUNICORN_CMD_ARGS: "--bind 0.0.0.0:4142"
toker-viz:
image: tokern/data-lineage-viz:latest
container_name: tokern-data-lineage-visualizer
restart: unless-stopped
networks:
- tokern-internal
- tokern-net
ports:
- "39284:80"
networks:
tokern-net: # Exposed by your host.
# external: true
name: "tokern-net"
driver: bridge
ipam:
driver: default
config:
- subnet: 10.10.0.0/24
tokern-internal:
name: "tokern-internal"
driver: bridge
internal: true
ipam:
driver: default
config:
- subnet: 10.11.0.0/24
volumes:
tokern_demo_catalog_data:
trying to implement data lineage into my database
i have followed according to this documentation "https://pypi.org/project/data-lineage/" and https://tokern.io/docs/data-lineage/installation/
not able to solve this error
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) could not translate host name "xxx.amazonaws.com" to address: Temporary failure in name resolution

Related

Taigaio when i create a project the page doesn't refresh automaticaly

Hello i try to configure a linux server for use Taigaio project and when i try to create, delete or save a modification the page doesn't auto refresh (he turn in void) but when i do by myself (CTRL + Maj + R or F5) i can see the data. did i forget to configure something ? i have install rabbitMq and configure like the doc.
exemple :
exemple
this is my docker-compose.yml :
version: "3.5"
x-environment:
&default-back-environment
# Database settings
POSTGRES_DB: taiga
POSTGRES_USER: taiga
POSTGRES_PASSWORD: taiga
POSTGRES_HOST: taiga-db
# Taiga settings
TAIGA_SECRET_KEY: "taiga-back-secret-key"
TAIGA_SITES_DOMAIN: "192.168.60.25:9000"
TAIGA_SITES_SCHEME: "http"
# Email settings. Uncomment following lines and configure your SMTP server
# EMAIL_BACKEND: "django.core.mail.backends.smtp.EmailBackend"
# DEFAULT_FROM_EMAIL: "no-reply#example.com"
# EMAIL_USE_TLS: "False"
# EMAIL_USE_SSL: "False"
# EMAIL_HOST: "smtp.host.example.com"
# EMAIL_PORT: 587
# EMAIL_HOST_USER: "user"
# EMAIL_HOST_PASSWORD: "password"
# Rabbitmq settings
# Should be the same as in taiga-async-rabbitmq and taiga-events-rabbitmq
RABBITMQ_USER: taiga
RABBITMQ_PASS: taiga
# Telemetry settings
ENABLE_TELEMETRY: "True"
x-volumes:
&default-back-volumes
- taiga-static-data:/taiga-back/static
- taiga-media-data:/taiga-back/media
# - ./config.py:/taiga-back/settings/config.py
services:
taiga-db:
image: postgres:12.3
environment:
POSTGRES_DB: taiga
POSTGRES_USER: taiga
POSTGRES_PASSWORD: taiga
volumes:
- taiga-db-data:/var/lib/postgresql/data
networks:
- taiga
taiga-back:
image: taigaio/taiga-back:latest
environment: *default-back-environment
volumes: *default-back-volumes
networks:
- taiga
depends_on:
- taiga-db
- taiga-events-rabbitmq
- taiga-async-rabbitmq
taiga-async:
image: taigaio/taiga-back:latest
entrypoint: ["/taiga-back/docker/async_entrypoint.sh"]
environment: *default-back-environment
volumes: *default-back-volumes
networks:
- taiga
depends_on:
- taiga-db
- taiga-back
- taiga-async-rabbitmq
taiga-async-rabbitmq:
image: rabbitmq:3-management-alpine
environment:
RABBITMQ_ERLANG_COOKIE: secret-erlang-cookie
RABBITMQ_DEFAULT_USER: taiga
RABBITMQ_DEFAULT_PASS: taiga
RABBITMQ_DEFAULT_VHOST: taiga
volumes:
- taiga-async-rabbitmq-data:/var/lib/rabbitmq
networks:
- taiga
taiga-front:
image: taigaio/taiga-front:latest
environment:
TAIGA_URL: "http://192.168.60.25:9000"
TAIGA_WEBSOCKETS_URL: "ws://192.168.60.25:9000"
networks:
- taiga
# volumes:
# - ./conf.json:/usr/share/nginx/html/conf.json
taiga-events:
image: taigaio/taiga-events:latest
environment:
RABBITMQ_USER: taiga
RABBITMQ_PASS: taiga
TAIGA_SECRET_KEY: "taiga-back-secret-key"
networks:
- taiga
depends_on:
- taiga-events-rabbitmq
taiga-events-rabbitmq:
image: rabbitmq:3-management-alpine
environment:
RABBITMQ_ERLANG_COOKIE: secret-erlang-cookie
RABBITMQ_DEFAULT_USER: taiga
RABBITMQ_DEFAULT_PASS: taiga
RABBITMQ_DEFAULT_VHOST: taiga
volumes:
- taiga-events-rabbitmq-data:/var/lib/rabbitmq
networks:
- taiga
taiga-protected:
image: taigaio/taiga-protected:latest
environment:
MAX_AGE: 360
SECRET_KEY: "taiga-back-secret-key"
networks:
- taiga
taiga-gateway:
image: nginx:1.19-alpine
ports:
- "9000:80"
volumes:
- ./taiga-gateway/taiga.conf:/etc/nginx/conf.d/default.conf
- taiga-static-data:/taiga/static
- taiga-media-data:/taiga/media
networks:
- taiga
depends_on:
- taiga-front
- taiga-back
- taiga-events
volumes:
taiga-static-data:
taiga-media-data:
taiga-db-data:
taiga-async-rabbitmq-data:
taiga-events-rabbitmq-data:
networks:
taiga:

ECONNREFUSED at TCPConnectWrap.afterConnect NodeJS

i created a nodejs app which should use a URI to connect to rabbitmq. both are containerized with docker and are created by a docker-compose file. after running of "docker-compose up" the nodejs app returns an error:
Error: connect ECONNREFUSED X.X.X:X:5672
at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1133:16) {
errno: -111,
code: 'ECONNREFUSED',
syscall: 'connect',
address: '172.26.0.4',
port: 5672
}
when starting the api-server locally (not as a container-> as an node application), the connection to the containerized rabbitmq server estabilish without any problems.
my rabbitmq.conf file looks like:
default_vhost = /
default_user = guest
default_pass = guest
default_user_tags.administrator = true
default_permissions.configure = .*
default_permissions.read = .*
default_permissions.write = .*
loopback_users = none
listeners.tcp.default = 5672
management.listener.port = 15672
management.listener.ssl = false
management.load_definitions = /etc/rabbitmq/definitions.json
URI for connecting:
{
"mongoURI":"mongodb://mongo:27017",
"amqpURI": "amqp://guest:guest#rabbitmq:5672"
}
as you can see, the hostname is equal to the one, which is within the docker-compose file
finally the docker-compose file:
version: "3.8"
services:
react-app:
image: react-app
stdin_open: true
ports:
- "3000:3000"
networks:
- mern-app
api-server:
image: api-server
ports:
- "5000:5000"
networks:
- mern-app
depends_on:
- mongo
- rabbitmq
process-schedular:
image: process-schedular
ports:
- "5005:5005"
networks:
- mern-app
depends_on:
- mongo
- rabbitmq
mongo:
image: mongo:3.6.19-xenial
ports:
- "27017:27017"
networks:
- mern-app
volumes:
- mongo-data:/data/db
rabbitmq:
image: rabbitmq:3-management
hostname: rabbitmq
volumes:
- ./server/amqp/docker/enabled_plugins:/etc/rabbitmq/enabled_plugins
- ./server/amqp/docker/definitions.json:/etc/rabbitmq/definitions.json
- ./server/amqp/docker/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf
ports:
- "5672:5672"
- "15672:15672"
networks:
- mern-app
networks:
mern-app:
driver: bridge
volumes:
mongo-data:
driver: local

Grafana Loki does not trigger or push alert on alertmanager

I have configured PLG (Promtail, Grafana & Loki) on an AWS EC2 instance for log management. The Loki uses BoltDB shipper & AWS store.
Grafana - 7.4.5,
Loki - 2.2,
Prommtail - 2.2,
AlertManager - 0.21
The issue I am facing is that the Loki does not trigger or push alerts on alertmanager. I cannot see any alert on the AlertManager dashboard though I can run a LogQL query on Grafana which shows the condition was met for triggering an alert.
The following is a screenshot of my query on Grafana.
LogQL Query Screenshot
The following are my configs.
Docker Compose
$ cat docker-compose.yml
version: "3.4"
services:
alertmanager:
image: prom/alertmanager:v0.21.0
container_name: alertmanager
command:
- '--config.file=/etc/alertmanager/config.yml'
- '--storage.path=/alertmanager'
volumes:
- ./config/alertmanager/alertmanager.yml:/etc/alertmanager/config.yml
ports:
- 9093:9093
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
loki:
image: grafana/loki:2.2.0-amd64
container_name: loki
volumes:
- ./config/loki/loki.yml:/etc/config/loki.yml:ro
- ./config/loki/rules/rules.yml:/etc/loki/rules/rules.yml
entrypoint:
- /usr/bin/loki
- -config.file=/etc/config/loki.yml
ports:
- "3100:3100"
depends_on:
- alertmanager
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
grafana:
image: grafana/grafana:7.4.5
container_name: grafana
volumes:
- ./config/grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml
- ./config/grafana/defaults.ini:/usr/share/grafana/conf/defaults.ini
- grafana:/var/lib/grafana
ports:
- "3000:3000"
depends_on:
- loki
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
promtail:
image: grafana/promtail:2.2.0-amd64
container_name: promtail
volumes:
- /var/lib/docker/containers:/var/lib/docker/containers
- /var/log:/var/log
- ./config/promtail/promtail.yml:/etc/promtail/promtail.yml:ro
command: -config.file=/etc/promtail/promtail.yml
restart: unless-stopped
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
tag: "{{.Name}}"
networks:
- loki-br
nginx:
image: nginx:latest
container_name: nginx
volumes:
- ./config/nginx/nginx.conf:/etc/nginx/nginx.conf
- ./config/nginx/default.conf:/etc/nginx/conf.d/default.conf
- ./config/nginx/loki.conf:/etc/nginx/conf.d/loki.conf
- ./config/nginx/ssl:/etc/ssl
ports:
- "80:80"
- "443:443"
logging:
driver: "json-file"
options:
max-file: "5"
max-size: "10m"
loki-url: http://localhost:3100/loki/api/v1/push
loki-external-labels: job=containerlogs
tag: "{{.Name}}"
depends_on:
- grafana
networks:
- loki-br
networks:
loki-br:
driver: bridge
ipam:
config:
- subnet: 192.168.0.0/24
volumes:
grafana: {}
Loki Config
$ cat config/loki/loki.yml
auth_enabled: false
server:
http_listen_port: 3100
ingester:
lifecycler:
address: 127.0.0.1
ring:
kvstore:
store: inmemory
replication_factor: 1
final_sleep: 0s
chunk_idle_period: 1h # Any chunk not receiving new logs in this time will be flushed
max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h
chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first
chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m)
max_transfer_retries: 0 # Chunk transfers disabled
schema_config:
configs:
- from: 2020-11-20
store: boltdb-shipper
#object_store: filesystem
object_store: s3 # Config for AWS S3 storage.
schema: v11
index:
prefix: index_loki_
period: 24h
storage_config:
boltdb_shipper:
active_index_directory: /tmp/loki/boltdb-shipper-active
cache_location: /tmp/loki/boltdb-shipper-cache
cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space
shared_store: s3 # Config for AWS S3 storage.
#filesystem:
# directory: /tmp/loki/chunks
# Config for AWS S3 storage.
aws:
s3: s3://eu-west-1/loki #Uses AWS IAM roles on AWS EC2 instance.
region: eu-west-1
compactor:
working_directory: /tmp/loki/boltdb-shipper-compactor
shared_store: aws
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 168h
chunk_store_config:
max_look_back_period: 0s
table_manager:
retention_deletes_enabled: true
retention_period: 720h
ruler:
storage:
type: local
local:
directory: /etc/loki/rules
rule_path: /tmp/loki/rules-temp
evaluation_interval: 1m
alertmanager_url: http://alertmanager:9093
ring:
kvstore:
store: inmemory
enable_api: true
enable_alertmanager_v2: true
Loki Rules
$ cat config/loki/rules/rules.yml
groups:
- name: rate-alerting
rules:
- alert: HighLogRate
expr: |
sum by (job, compose_service)
(rate({job="containerlogs"}[1m]))
> 60
for: 1m
labels:
severity: warning
team: devops
category: logs
annotations:
title: "High LogRate Alert"
description: "something is logging a lot"
impact: "impact"
action: "action"
dashboard: "https://grafana.com/service-dashboard"
runbook: "https://wiki.com"
logurl: "https://grafana.com/log-explorer"
AlertManager config
$ cat config/alertmanager/alertmanager.yml
global:
resolve_timeout: 5m
route:
group_by: ['alertname', 'severity', 'instance']
group_wait: 45s
group_interval: 10m
repeat_interval: 12h
receiver: 'email-notifications'
receivers:
- name: email-notifications
email_configs:
- to: me#example.com
from: 'alerts#example.com'
smarthost: smtp.gmail.com:587
auth_username: alerts#example.com
auth_identity: alerts#example.com
auth_password: PassW0rD
send_resolved: true
Let me know if I am missing something. I followed Ruan Bekker's blog to set things up
If Loki is running in single tenant mode, the required ID is fake (yes we know this might seem alarming but it’s totally fine, no it can’t be changed).
mkdir /etc/loki/rules/fake
mkdir /tmp/loki/rules-temp/fake
copy your rule files into /etc/loki/rules/fake
So you have to add a fake sub-directory to the rule directory in single tenant mode and everthing worked perfectly.
https://grafana.com/docs/loki/latest/alerting/#interacting-with-the-ruler

how to create keyspace for cassandra using docker compose v3

I am trying to create keyspace using docker-compose v3, but it is not working out, my docker-compose.yaml looks like following :
version: '3'
services:
cassandra:
image: cassandra:latest
networks:
- default
ports:
- "9042:9042"
volumes:
- ../compi${COMPI}/data/cassandra:/var/lib/cassandra
- ../../sql:/compi/sql
- ../compi${COMPI}/docker-entrypoint-initdb.d:/compi/docker-entrypoint-initdb.d:ro
healthcheck:
test: ["CMD-SHELL", "[ $$(nodetool statusgossip) = running ]"]
interval: 30s
timeout: 10s
retries: 5
compi:
environment:
- DOCKER=true
depends_on:
- cassandra
links:
- cassandra
build:
context: ../..
dockerfile: ./docker.local/compi/Dockerfile
volumes:
- ../config:/compi/config
- ../compi${COMPI}/log:/compi/log
- ../compi${COMPI}/data:/compi/data
ports:
- "717${compi}:717${compi}"
volumes:
data:
config:
my docker-entrypoint-initdb.d/init.cql looks like following:
CREATE KEYSPACE IF NOT EXISTS sample WITH REPLICATION = {
'class''SimpleStrategy', 'replication_factor' : 1 } AND DURABLE_WRITES
= true;

Gitlab 'Gateway Timeout' behind traefik proxy

So I'm trying to set up a gitlab-ce instance on docker swarm using traefik as reverse proxy.
This is my proxy stack;
version: '3'
services:
traefik:
image: traefik:alpine
command: --entryPoints="Name:http Address::80 Redirect.EntryPoint:https" --entryPoints="Name:https Address::443 TLS" --defaultentrypoints="http,https" --acme --acme.acmelogging="true" --acme.email="freelyformd#gmail.com" --acme.entrypoint="https" --acme.storage="acme.json" --acme.onhostrule="true" --docker --docker.swarmmode --docker.domain="mydomain.com" --docker.watch --web
ports:
- 80:80
- 443:443
- 8080:8080
networks:
- traefik-net
volumes:
- /var/run/docker.sock:/var/run/docker.sock
deploy:
placement:
constraints:
- node.role == manager
networks:
traefik-net:
external: true
And my gitlab stack
version: '3'
services:
omnibus:
image: 'gitlab/gitlab-ce:latest'
hostname: 'lab.mydomain.com'
environment:
GITLAB_OMNIBUS_CONFIG: |
external_url 'https://lab.mydomain.com'
nginx['listen_port'] = 80
nginx['listen_https'] = false
registry_external_url 'https://registry.mydomain.com'
registry_nginx['listen_port'] = 80
registry_nginx['listen_https'] = false
gitlab_rails['gitlab_shell_ssh_port'] = 2222
gitlab_rails['gitlab_email_from'] = 'lab#mydomain.com'
gitlab_rails['gitlab_email_reply_to'] = 'lab#mydomain.com'
ports:
- 2222:22
volumes:
- gitlab_config:/etc/gitlab
- gitlab_logs:/var/log/gitlab
- gitlab_data:/var/opt/gitlab
networks:
- traefik-net
deploy:
labels:
traefik.enable: "port"
traefik.frontend.rule: 'Host: lab.mydomain.com, Host: registry.mydomain.com'
traefik.port: 80
placement:
constraints:
- node.role == manager
runner:
image: 'gitlab/gitlab-runner:v1.11.4'
volumes:
- gitlab_runner_config:/etc/gitlab-runner
- /var/run/docker.sock:/var/run/docker.sock
volumes:
gitlab_config:
gitlab_logs:
gitlab_data:
gitlab_runner_config:
networks:
traefik-net:
external: true
traefik-net is an overlay network
So when I deploy using docker stack deploy and visit lab.mydomain.com, i get the Gateway Timeout error. When I execute curl localhost within the gitlab container, it seems to work fine. Not sure what the problem is, any pointers would be appreciated
Turns out all I had to do was set the traefik label, traefik.docker.network to traefik-net, see https://github.com/containous/traefik/issues/1254

Resources