How setup GCP CloudSQL by terraform - terraform

I would like to setup GCP CloudSQL by terraform. im reading this doc. (https://www.terraform.io/docs/providers/google/r/sql_database_instance.html)
and I execute "terraform plan" with such code.
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "asia-northeasteast1"
database_version = "MYSQL_5_6"
project = "test-141901"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
name = "character_set_server"
value = "utf8mb4"
}
}
}
but I can not setup MULTIPLE database_flags
settings.0.database_flags.#: "1"
settings.0.database_flags.0.name: "character_set_server"
settings.0.database_flags.0.value: "utf8mb4"
How can I setup CloudSQL with multiple database_flags? I can not understand "sublist support" at that document.

You should be able to do this by using multiple database_flags blocks:
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
database_flags {
name = "slow_query_log"
value = "on"
}
database_flags {
name = "character_set_server"
value = "utf8mb4"
}
}
}
Here is the output of terraform plan with the above tf:
+ google_sql_database_instance.master
database_version: "MYSQL_5_6"
ip_address.#: "<computed>"
name: "terraform-master"
project: "test-project"
region: "us-central1"
self_link: "<computed>"
settings.#: "1"
settings.0.backup_configuration.#: "1"
settings.0.backup_configuration.0.enabled: "true"
settings.0.backup_configuration.0.start_time: "17:00"
settings.0.database_flags.#: "2"
settings.0.database_flags.0.name: "slow_query_log"
settings.0.database_flags.0.value: "on"
settings.0.database_flags.1.name: "character_set_server"
settings.0.database_flags.1.value: "utf8mb4"
settings.0.ip_configuration.#: "1"
settings.0.ip_configuration.0.ipv4_enabled: "true"
settings.0.replication_type: "SYNCHRONOUS"
settings.0.tier: "db-f1-micro"
settings.0.version: "<computed>"

I want to elaborate on this answer as I needed to generate the database_flags-block based on input. Suppose you have a variable
variable "database-flags" {
type = "map"
default = {
character_set_server = "utf8mb4"
slow_query_log = "on"
}
}
using terraform v0.12.X this can be written as
resource "google_sql_database_instance" "master" {
name = "terraform-master"
region = "us-central1"
database_version = "MYSQL_5_6"
project = "test-project"
settings {
tier = "db-f1-micro"
replication_type = "SYNCHRONOUS"
backup_configuration {
enabled = true
start_time = "17:00"
}
ip_configuration {
ipv4_enabled = true
}
dynamic "database_flags" {
iterator = flag
for_each = var.database-flags
content {
name = flag.key
value = flag.value
}
}
}
}
Using the above pattern, you can use the database-creating-part of the code as a module and let the consumer decide what flags should be set

Related

Create VPS in GCP via Terraform module using count

getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come

Terraform: How to set the Amazon Athena "Query result location" with Terraform

Please assist me if anybody knows how to set Athena Query result location via Terraform
My code looks like below
resource "aws_athena_workgroup" "athena_query_result" {
name = var.name
configuration {
enforce_workgroup_configuration = true
publish_cloudwatch_metrics_enabled = true
result_configuration {
output_location = "s3://athena-query-location/output/"
}
}
}
You need to configure it with aws_athena_workgroup resource
resource "aws_athena_workgroup" "example" {
name = "example"
configuration {
enforce_workgroup_configuration = true
publish_cloudwatch_metrics_enabled = true
result_configuration {
output_location = "s3://${aws_s3_bucket.example.bucket}/output/"
encryption_configuration {
encryption_option = "SSE_KMS"
kms_key_arn = aws_kms_key.example.arn
}
}
}
}

gcp cloud sql instance times out after 15 mins - "waiting for state to become 'DONE' (last state: 'RUNNING')

Should I increase the timeout? The database is up and running and everything works, it's just that terraform times out and then does not adopt/take the created database within the state.
On following runs it errors out saying instance with that name already exists -- although terraform created that to begin with.
I'm creating it via this module: https://registry.terraform.io/modules/GoogleCloudPlatform/sql-db/google/latest/submodules/postgresql using the postgresql submodule.
The creation works and I can connect to it and run operations, but obviously no further actions run on it, such as creating additional database users, database, etc.
Configuration:
module "postgresql-db" {
source = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
version = "8.0.0"
name = var.environment
random_instance_name = true
database_version = "POSTGRES_14"
project_id = var.gcp_project
zone = var.gcp_zone
region = var.gcp_region
tier = "db-f1-micro"
db_name = "yeo"
deletion_protection = false
ip_configuration = {
ipv4_enabled = true
require_ssl = false
authorized_networks = [{
name = "public"
value = "0.0.0.0/0"
}]
}
}
I don't think it has anything to do with the module itself, I looked at the source code and it merely attempts to create a google_sql_database_instance as such:
resource "google_sql_database_instance" "default" {
provider = google-beta
project = var.project_id
name = local.master_instance_name
database_version = var.database_version
region = var.region
encryption_key_name = var.encryption_key_name
deletion_protection = var.deletion_protection
settings {
tier = var.tier
activation_policy = var.activation_policy
availability_type = var.availability_type
dynamic "backup_configuration" {
for_each = [var.backup_configuration]
content {
binary_log_enabled = false
enabled = lookup(backup_configuration.value, "enabled", null)
start_time = lookup(backup_configuration.value, "start_time", null)
location = lookup(backup_configuration.value, "location", null)
point_in_time_recovery_enabled = lookup(backup_configuration.value, "point_in_time_recovery_enabled", false)
transaction_log_retention_days = lookup(backup_configuration.value, "transaction_log_retention_days", null)
dynamic "backup_retention_settings" {
for_each = local.retained_backups != null || local.retention_unit != null ? [var.backup_configuration] : []
content {
retained_backups = local.retained_backups
retention_unit = local.retention_unit
}
}
}
}
dynamic "ip_configuration" {
for_each = [local.ip_configurations[local.ip_configuration_enabled ? "enabled" : "disabled"]]
content {
ipv4_enabled = lookup(ip_configuration.value, "ipv4_enabled", null)
private_network = lookup(ip_configuration.value, "private_network", null)
require_ssl = lookup(ip_configuration.value, "require_ssl", null)
dynamic "authorized_networks" {
for_each = lookup(ip_configuration.value, "authorized_networks", [])
content {
expiration_time = lookup(authorized_networks.value, "expiration_time", null)
name = lookup(authorized_networks.value, "name", null)
value = lookup(authorized_networks.value, "value", null)
}
}
}
}
dynamic "insights_config" {
for_each = var.insights_config != null ? [var.insights_config] : []
content {
query_insights_enabled = true
query_string_length = lookup(insights_config.value, "query_string_length", 1024)
record_application_tags = lookup(insights_config.value, "record_application_tags", false)
record_client_address = lookup(insights_config.value, "record_client_address", false)
}
}
disk_autoresize = var.disk_autoresize
disk_size = var.disk_size
disk_type = var.disk_type
pricing_plan = var.pricing_plan
dynamic "database_flags" {
for_each = var.database_flags
content {
name = lookup(database_flags.value, "name", null)
value = lookup(database_flags.value, "value", null)
}
}
user_labels = var.user_labels
location_preference {
zone = var.zone
}
maintenance_window {
day = var.maintenance_window_day
hour = var.maintenance_window_hour
update_track = var.maintenance_window_update_track
}
}
lifecycle {
ignore_changes = [
settings[0].disk_size
]
}
timeouts {
create = var.create_timeout
update = var.update_timeout
delete = var.delete_timeout
}
depends_on = [null_resource.module_depends_on]
}
Any ideas on how to bypass this?
As #SebastianG mentioned in his comment, the issue was fixed by changing the timeout to 30 minutes as resource completes after ~18 minutes.

unknown block type dns_config

i am getting error "Blocks of type "dns_config" are not expected here" in my terraform
main.tf file. Here is my code.
I am trying this in GCP
This code is breaking at dns_config section.
provider google {
project = var.project
region = var.region
version = "4.22.0"
credentials = var.credentials
}
resource "google_container_cluster" "primary" {
name = "${var.service-name}-${lower(var.site-id)}"
location = var.region
node_locations = [var.zone]
network = var.vpc-id
subnetwork = var.subnet-id
enable_autopilot = true
initial_node_count = var.initial-node-count
dns_config {
cluster_dns = "CLOUD_DNS"
cluster_dns_domain = "cluster.qg${var.site-id}stream"
cluster_dns_scope = "CLUSTER_SCOPE"
}
ip_allocation_policy {
cluster_secondary_range_name = var.subnet-pod-ip-range-name
services_secondary_range_name = var.subnet-service-ip-range-name
}
dynamic "release_channel" {
for_each = var.release-channel != null ? [{ channel : var.release-channel }] : []
content {
channel = var.release-channel
}
}
logging_service = var.logging_service
monitoring_service = var.monitoring_service
}
terraform {
backend "pg" {}
}

Launch instance on service deployment

I am using Terraform to deploy an ECS EC2 cluster on AWS.
My pipeline is creating a new task definition from docker-compose, then updates the service to use this task definition.
Desired count is 1, deployment_minimum_healthy_percent is 100 and deployment_maximum_percent is 200.
Expected behavior is that the autoscaling group will launch a new EC2 instance to deploy the new task, then kill the old one.
What happens is that I get an error message : "service X was unable to place a task because no container instance met all of its requirements. The closest matching container-instance has insufficient memory available."
No instance is created and the deployment is rolled back. How can I make sure that an extra instance is created when deploying my service ?
Here is my Terraform code :
resource "aws_ecs_cluster" "main_cluster" {
name = var.application_name
tags = {
Environment = var.environment_name
}
}
data "aws_ecs_task_definition" "main_td" {
task_definition = var.application_name
}
resource "aws_ecs_service" "main_service" {
name = var.application_name
cluster = aws_ecs_cluster.main_cluster.id
launch_type = "EC2"
scheduling_strategy = "REPLICA"
task_definition = "${data.aws_ecs_task_definition.main_td.family}:${data.aws_ecs_task_definition.main_td.revision}"
desired_count = var.target_capacity
deployment_minimum_healthy_percent = 100
deployment_maximum_percent = 200
health_check_grace_period_seconds = 10
wait_for_steady_state = false
force_new_deployment = true
load_balancer {
target_group_arn = aws_lb_target_group.main_tg.arn
container_name = var.container_name
container_port = var.container_port
}
ordered_placement_strategy {
type = "binpack"
field = "memory"
}
deployment_circuit_breaker {
enable = true
rollback = true
}
lifecycle {
ignore_changes = [desired_count]
}
tags = {
Environment = var.environment_name
}
}
Auto-scaling group :
data "template_file" "user_data" {
template = "${file("${path.module}/user_data.sh")}"
vars = {
ecs_cluster = "${aws_ecs_cluster.main_cluster.name}"
}
}
resource "aws_launch_configuration" "main_lc" {
name = var.application_name
image_id = var.ami_id
instance_type = var.instance_type
associate_public_ip_address = true
iam_instance_profile = "arn:aws:iam::812844034365:instance-profile/ecsInstanceRole"
security_groups = ["${aws_security_group.main_sg.id}"]
root_block_device {
volume_size = "30"
volume_type = "gp3"
}
user_data = "${data.template_file.user_data.rendered}"
}
resource "aws_autoscaling_policy" "main_asg_policy" {
name = "${var.application_name}-cpu-scale-policy"
policy_type = "TargetTrackingScaling"
autoscaling_group_name = aws_autoscaling_group.main_asg.name
estimated_instance_warmup = 10
target_tracking_configuration {
predefined_metric_specification {
predefined_metric_type = "ASGAverageCPUUtilization"
}
target_value = 40.0
}
}
resource "aws_autoscaling_group" "main_asg" {
name = var.application_name
launch_configuration = aws_launch_configuration.main_lc.name
min_size = var.target_capacity
max_size = var.target_capacity * 2
health_check_type = "EC2"
health_check_grace_period = 10
default_cooldown = 30
desired_capacity = var.target_capacity
vpc_zone_identifier = data.aws_subnet_ids.subnets.ids
wait_for_capacity_timeout = "3m"
instance_refresh {
strategy = "Rolling"
preferences {
min_healthy_percentage = 100
}
}
}
Module is published here https://registry.terraform.io/modules/hboisgibault/ecs-cluster/aws/latest

Resources