Create SSL crt and map to ELB | Terraform
I am trying to create a self signed certificate and upload it to S3 bucket. But how should I map this SSL certificate to my ELB?If so, how can I download these pem files from S3 bucket and use in ELB?
Below is my code:-
cert.tf code:-
resource "random_string" "app_keystore_password" {
length = 16
special = false
}
resource "tls_private_key" "key" {
algorithm = "RSA"
}
resource "tls_self_signed_cert" "cert" {
key_algorithm = "RSA"
private_key_pem = "${tls_private_key.key.private_key_pem}"
validity_period_hours = 87600
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
]
dns_names = ["*.${var.region}.elb.amazonaws.com"]
subject {
common_name = "*.${var.region}.elb.amazonaws.com"
organization = "ORAG"
province = "STATE"
country = "COUNT"
}
}
data "archive_file" "certs" {
type = "zip"
output_path = "/tmp/certs.zip"
source {
content = "${tls_private_key.key.private_key_pem}"
filename = "privateKey.pem"
}
source {
content = "${tls_self_signed_cert.cert.cert_pem}"
filename = "certificateChain.pem"
}
source {
content = "${tls_self_signed_cert.cert.cert_pem}"
filename = "trustedCertificates.pem"
}
}
resource "tls_self_signed_cert" "public_cert" {
key_algorithm = "RSA"
private_key_pem = "${tls_private_key.key.private_key_pem}"
validity_period_hours = 87600
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
]
dns_names = ["*.${var.region}.elb.amazonaws.com"]
subject {
common_name = "*.${var.region}.elb.amazonaws.com"
organization = "ORAG"
province = "STATE"
country = "COUNT"
}
}
data "template_file" "configure_system" {
template = "${file("files/configure-system.sh.tpl")}"
vars = {
bucket = "services-${var.aws_account_id}-storage"
app_keystore_password = "${var.app_keystore_password}"
}
}
resource "aws_s3_bucket_object" "configure_system" {
key = "configure-system.sh"
bucket = "services-${var.aws_account_id}-storage"
content = "${data.template_file.configure_system.rendered}"
etag = "${md5(data.template_file.configure_system.rendered)}"
}
resource "aws_s3_bucket_object" "certs" {
source = "/tmp/certs.zip"
bucket = "services-${var.aws_account_id}-storage"
key = "${var.app_certs_archive_path}/certs.zip"
server_side_encryption = "AES256"
}
Below is my aws_lb_listener resource block:-
resource "aws_lb" "master" {
name = "lb"
security_groups = ["${aws_security_group.sg.id}"]
subnets = [ "${data.aws_subnet.app_subnet_0.id}", "${data.aws_subnet.app_subnet_1.id}" ]
internal = true
tags = {
Name = "ca"
Environment = "${var.environment}"
}
}
resource "aws_lb_listener" "master_lb_listener" {
load_balancer_arn = "${aws_lb.master.arn}"
port = "443"
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = "WHAT SHOULD BE MY VALUE...?"
default_action {
target_group_arn = "${aws_lb_target_group.master_lb_tg.arn}"
type = "forward"
}
}
You can use aws_acm_certificate and you don't need to download certificate from S3.
https://www.terraform.io/docs/providers/aws/r/acm_certificate.html
resource "aws_acm_certificate" "cert" {
private_key = "${tls_private_key.key.private_key_pem}"
certificate_body = "${tls_self_signed_cert.public_cert.cert_pem}"
}
and add certificate_arn in LB
...
certificate_arn = "${aws_acm_certificate.cert.arn}"
...
Related
getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come
I have a terraform code that create an EC2 type Batch job , and my aws batch job download some data a total of 50GB ,how to add that storage space to my instance in terrafrom ? and if there is another way to add that storage
This is my terrafrom code
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus}
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
If you want you may be able to mount a shared EFS drive you could try something like this. Keep in mind I have not tested this & you will need to replace certain parameters with your subnet-ids, vpc-id, etc:
resource "aws_batch_compute_environment" "pipeline" {
compute_environment_name = "${var.product}-${var.application}-pipeline-batch-compute-environment-${var.env}"
compute_resources {
instance_role = aws_iam_instance_profile.pipeline_batch.arn
instance_type = var.pipeline_instance_type
max_vcpus = var.pipeline_max_vcpus
min_vcpus = 0
security_group_ids = [
aws_security_group.pipeline_batch.id
]
subnets = var.subnets
type = "EC2"
}
service_role = aws_iam_role.pipeline_batch_service_role.arn
type = "MANAGED"
tags = {
environment = var.env
}
}
resource "aws_batch_job_queue" "pipeline" {
depends_on = [aws_batch_compute_environment.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-queue-${var.env}"
state = "ENABLED"
priority = 1
compute_environments = [
aws_batch_compute_environment.pipeline.arn
]
tags = {
environment = var.env
}
}
resource "aws_batch_job_definition" "pipeline" {
depends_on = [aws_ecr_repository.pipeline]
name = "${var.product}-${var.application}-pipeline-batch-job-definition-${var.env}"
type = "container"
container_properties = <<CONTAINER_PROPERTIES
{
"image": "${aws_ecr_repository.pipeline.repository_url}:latest",
"command": [ "--s3_bucket", "${var.input_bucket}", "--s3_upload_bucket", "${var.upload_bucket}"],
"executionRoleArn": "${aws_iam_role.pipeline_batch_instance_role.arn}",
"memory": ${var.pipeline_memory},
"vcpus": ${var.pipeline_vcpus},
"mountPoints": [
{
readOnly = null,
containerPath = "/var/batch"
sourceVolume = "YOUR-FILE-SYSTEM-NAME"
}
]
}
CONTAINER_PROPERTIES
tags = {
environment = var.env
}
}
resource "aws_efs_file_system" "general" {
creation_token = "YOUR-FILE-SYSTEM-NAME"
#kms_key_id = module.kms.arn
#encrypted = true
encrypted = false
performance_mode = "generalPurpose"
throughput_mode = "provisioned"
provisioned_throughput_in_mibps = 8
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
}
resource "aws_efs_access_point" "general" {
tags = {Name = "YOUR-FILE-SYSTEM-NAME"}
file_system_id = aws_efs_file_system.general.id
root_directory {
path = "/YOUR-FILE-SYSTEM-NAME"
creation_info {
owner_gid = "1000"
owner_uid = "1000"
permissions = "755"
}
}
posix_user {
uid = "1000"
gid = "1000"
}
}
## FOR REDUNDANCY
## It is a good idea to add a mount target per AZ you use
resource "aws_efs_mount_target" "a" {
source = "app.terraform.io/popreach/efs-mount-target/aws"
version = "1.0.0"
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-A
security_groups = [aws_security_group.general.id]
}
resource "aws_efs_mount_target" "b" {
file_system_id = aws_efs_file_system.general.id
subnet_id = PUBLIC-SUBNET-B
security_groups = [aws_security_group.general.id]
}
resource "aws_security_group" "general" {
name = YOUR-SECURITY-GROUP-NAME
vpc_id = YOUR-VPC-ID
tags = {Name = YOUR-SECURITY-GROUP-NAME}
}
resource "aws_security_group_rule" "ingress" {
type = "ingress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "2049"
to_port = "2049"
protocol = "tcp"
security_group_id = aws_security_group.general.id
}
resource "aws_security_group_rule" "egress" {
type = "egress"
description = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
from_port = "0"
to_port = "0"
protocol = "all"
security_group_id = aws_security_group.general.id
}
You'll be able to mount your EFS drive on any EC2 Default AMZN Linux Instance like this: mkdir /data/efs && mount -t nfs4 -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport REPLACE_WITH_EFS_DNS:/ /data/efs
i am getting error "Blocks of type "dns_config" are not expected here" in my terraform
main.tf file. Here is my code.
I am trying this in GCP
This code is breaking at dns_config section.
provider google {
project = var.project
region = var.region
version = "4.22.0"
credentials = var.credentials
}
resource "google_container_cluster" "primary" {
name = "${var.service-name}-${lower(var.site-id)}"
location = var.region
node_locations = [var.zone]
network = var.vpc-id
subnetwork = var.subnet-id
enable_autopilot = true
initial_node_count = var.initial-node-count
dns_config {
cluster_dns = "CLOUD_DNS"
cluster_dns_domain = "cluster.qg${var.site-id}stream"
cluster_dns_scope = "CLUSTER_SCOPE"
}
ip_allocation_policy {
cluster_secondary_range_name = var.subnet-pod-ip-range-name
services_secondary_range_name = var.subnet-service-ip-range-name
}
dynamic "release_channel" {
for_each = var.release-channel != null ? [{ channel : var.release-channel }] : []
content {
channel = var.release-channel
}
}
logging_service = var.logging_service
monitoring_service = var.monitoring_service
}
terraform {
backend "pg" {}
}
I have the following TF module to create Secret Manager resources:
resource "aws_secretsmanager_secret" "secrets_manager" {
count = length(var.secrets)
name = element(split(":", var.secrets[count.index]),0)
description = "Managed by Terraform"
}
resource "aws_secretsmanager_secret_version" "secret" {
count = length(var.secrets)
secret_id = aws_secretsmanager_secret.secrets_manager[count.index].id
secret_string = <<EOF
{
"${element(split(":", var.secrets[count.index]),1)}": "${element(split(":", var.secrets[count.index]),2)}"
}
EOF
}
With:
module "secrets_manager" {
source = "./modules/secret_manager"
secrets = [
"secret_name:secret_value:secret_key",
...
]
}
This has been working perfectly for me. Now what I want is the possibility without changing this mechanism, to create a Secret without specifying/creating a value/key.
module "secrets_manager" {
source = "./modules/secret_manager"
secrets = [
"secret_name:secret_value:secret_key",
"secret_name",
"secret_name:secret_value:secret_key",
...
]
}
I have been trying using a map as a variable, etc ....no luck:
secrets_map = {
secret_one = {
secret_name = "...."
secret_value = "...."
secret_key = "...."
}
secret_two = {
secret_name = "...."
}
}
}
Working!
resource "aws_secretsmanager_secret" "secrets_manager" {
for_each = var.secrets_map
name = each.value["secret_name"]
description = "Managed by Terraform"
}
resource "aws_secretsmanager_secret_version" "secret" {
for_each = {
for key, value in var.secrets_map :
key => value
if lookup(value, "secret_value", "") != ""
}
secret_id = each.value["secret_name"]
secret_string = <<EOF
{
"${each.value["secret_value"]}": "${each.value["secret_key"]}"
}
EOF
depends_on = [
aws_secretsmanager_secret.secrets_manager
]
}
With:
module "secrets_manager" {
source = "./modules/secret_manager"
secrets_map = {
secret_one = {
secret_name = "...."
secret_value = "...."
secret_key = "...."
}
secret_two = {
secret_name = "...."
secret_value = ""
secret_key = ""
}
}
}
And:
variable "secrets_map" {
type = map(object({
secret_name = string
secret_value = string
secret_key = string
}))
}
I couldn't leave the defaults to avoid passing empty values.
Extra:
terraform import .......module.secrets_manager.aws_secretsmanager_secret.secrets_manager[\"secret_two\"] arn....
Thanks to: https://www.youtube.com/watch?v=UFEhJFIj9gY
Getting an error on Terraform Plan saying my object has no attributes for the name value. We are deploying about 7 private dns zones and many of them live in the same resource group. some may live in others, but most live in the same one.
Error: Unsupported attribute
on Modules/privatednszone/main.tf line 4, in data "azurerm_resource_group" "this":
name = each.value.name
This value does not have any attributes.
MAIN
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.83.0"
}
}
}
provider "azurerm" {
features {}
}
variable "private_dns_zones" {
type = map(object({
dns_zone_name = string
resource_group_name = string
tags = map(string)
vnet_links = list(object({
zone_to_vnet_link_name = string
vnet_name = string
networking_resource_group = string
zone_to_vnet_link_exists = bool
vnet_link_rg_name = string
}))
zone_exists = bool
registration_enabled = bool
}))
description = "Map containing Private DNS Zone Objects"
default = {}
}
data "azurerm_resource_group" "this" {
# read from local variable, index is resource_group_name
for_each = local.rgs_map
name = each.value.name
}
locals {
rgs_map = {
for n in var.private_dns_zones :
n.resource_group_name => {
name = n.resource_group_name
}
}
}
output "rgs_map" {
value = local.rgs_map
}
output "rg_data" {
value = data.azurerm_resource_group.this
}
TFVARS
Code below is a sample of two dns zones, but there are additional ones.
private_dns_zones = {
zone1 = {
dns_zone_name = "privatelink.vaultcore.azure.net"
resource_group_name = "Terraform1"
tags = {
iac = "Terraform"
syntax = "zone1"
}
zone_exists = false
vnet_links = [
{
zone_to_vnet_link_name = "vaultcore-vnet-eastus2-01"
vnet_name = "vnet-eastus2-01"
networking_resource_group = "Terraform1"
zone_to_vnet_link_exists = false
vnet_link_rg_name = "Terraform1"
}
]
registration_enabled = false
},
zone2 = {
dns_zone_name = "privatelink.monitor.azure.com"
resource_group_name = "Terraform1"
tags = {
iac = "Terraform"
syntax = "zone2"
}
zone_exists = false
vnet_links = [
{
zone_to_vnet_link_name = "monitor-vnet-eastus2-01"
vnet_name = "vnet-eastus2-01"
networking_resource_group = "Terraform1"
zone_to_vnet_link_exists = false
vnet_link_rg_name = "Terraform1"
}
]
registration_enabled = false
}
}
You code seems to work fine only if I use different resource group names. As you are using duplicate values of resource group names which is your requirement creating a map "rgs_map" with your code is not possible as it will error out with below :
So , in order to resolve the above error , I used something like below :
locals {
rgs_map = {
for i,n in var.private_dns_zones : "${i}" =>{
name = n.resource_group_name
}
}
}
Complete code:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.83.0"
}
}
}
provider "azurerm" {
features {}
}
variable "private_dns_zones" {
type = map(object({
dns_zone_name = string
resource_group_name = string
tags = map(string)
vnet_links = list(object({
zone_to_vnet_link_name = string
vnet_name = string
networking_resource_group = string
zone_to_vnet_link_exists = bool
vnet_link_rg_name = string
}))
zone_exists = bool
registration_enabled = bool
}))
description = "Map containing Private DNS Zone Objects"
default = {}
}
data "azurerm_resource_group" "this" {
# read from local variable, index is resource_group_name
for_each = local.rgs_map
name = each.value.name
}
locals {
rgs_map = {
for i,n in var.private_dns_zones : "${i}" =>{
name = n.resource_group_name
}
}
}
output "rgs_map" {
value = local.rgs_map
}
output "rg_data" {
value = data.azurerm_resource_group.this
}
Output: