Error: Module does not support depends_on - azure

#Get existing subnet properties
module "subnet" {
#source = "git::git#bitbucket.org:exium-c2/azure-registery.git/az-sub"
source = "C:\\Users\\harip\\azure-registery\\az-sub"
subnet_prefix4 = var.subnet_prefix4
subnet_prefix6 = var.subnet_prefix6
rg = var.rg-name
location = var.rg-location
vnet-name = data.azurerm_virtual_network.vnet.name
routeipv4 = data.azurerm_route_table.routeipv4.id
routeipv6 = data.azurerm_route_table.routeipv6.id
mgmt-sg-id = data.azurerm_network_security_group.sg.id
nwu-sg-id = data.azurerm_network_security_group.sg1.id
}
module "nics" {
#source = "git::git#bitbucket.org:exium-c2/azure-registery.git/az-nic"
source = "C:\\Users\\harip\\azure-registery\\az-nic"
#nic-name = var.nic-name
rg-name = var.rg-name
rg-location = var.rg-location
#count= "${length(var.subnetwork-subnetid)}"
subnetwork-subnetid = module.subnet.subnetwork-subnetid
subnetwork6-subnetid = module.subnet.subnetwork6-subnetid
depends_on = [ module.subnet.subnetwork,module.subnet.subnetwork6 ]
}
module "fpm" {
#source = "git::git#bitbucket.org:exium-c2/azure-registery.git/az-compute-fpm"
source = "C:\\Users\\harip\\azure-registery\\az-compute-fpm"
#count = "${length(var.nic1-id)}"
vm-name = var.vm-name
size = var.size
user-name = var.user-name
rg-name = var.rg-name
rg-location = var.rg-location
nic1-id = module.nics.nic1-id
nic2-id = module.nics.nic2-id
}
Plan to create multiple instances and based subnets and nics so plan to use module with "dependence on" function .
error:
Error: Module does not support depends on
Please help in this. Thanks for advance.

Related

Create VPS in GCP via Terraform module using count

getting stuck with problem.
Need a terraform expert help. I want to create VPS in GCP with count using module. How to correct create and attach google_compute_address and google_compute_disk to each VPS with different names
Any help, please
Module code:
resource "google_compute_instance" "vps" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
description = var.server_description
machine_type = var.server_type
zone = var.server_datacenter
deletion_protection = var.server_delete_protection
labels = var.server_labels
metadata = var.server_metadata
tags = var.server_tags
boot_disk {
auto_delete = false
initialize_params {
size = var.boot_volume_size
type = var.boot_volume_type
image = var.boot_volume_image
labels = var.boot_volume_labels
}
}
dynamic "attached_disk" {
for_each = var.volumes
content {
source = attached_disk.value["volume_name"]
}
}
dynamic "network_interface" {
for_each = var.server_network
content {
subnetwork = network_interface.value["subnetwork_name"]
network_ip = network_interface.value["subnetwork_ip"]
dynamic "access_config" {
for_each = network_interface.value.nat_ip ? [1] : []
content {
nat_ip = google_compute_address.static_ip.address
}
}
}
}
}
resource "google_compute_disk" "volume" {
for_each = var.volumes
name = each.value["volume_name"]
type = each.value["volume_type"]
size = each.value["volume_size"]
zone = var.server_datacenter
labels = each.value["volume_labels"]
}
resource "google_compute_address" "static_ip" {
count = var.server_count
name = var.server_count > 1 ? "${var.server_name}-${count.index}" : var.server_name
region = var.server_region
}
Using example:
module "vps-test" {
source = "../module"
credentials_file = "../../../../main/vault/prod/.tf/terraform-bb-prod-ground.json"
server_count = 2
server_name = "example-vps"
server_description = "simple vps for module testing"
server_type = "e2-small"
server_region = "europe-west4"
server_datacenter = "europe-west4-c"
server_labels = {
project = "terraform"
environment = "test"
}
server_metadata = {
groups = "parent_group.child_group"
}
boot_volume_image = "debian-cloud/debian-11"
boot_volume_size = 30
boot_volume_labels = {
environment = "production"
project = "v3"
type = "system"
}
server_tags = ["postgres", "production", "disable-gce-firewall"]
server_delete_protection = true
server_network = {
common_network = {
subnetwork_name = "${data.terraform_remote_state.network.outputs.subnetwork_vpc_production_common_name}"
subnetwork_ip = ""
nat_ip = true
} # },
# custom_network = {
# subnetwork_name = (data.terraform_remote_state.network.outputs.subnetwork_vpc_production_k8s_name)
# subnetwork_ip = ""
# nat_ip = false
# }
}
volumes = {
volume_data1 = {
volume_name = "v3-postgres-saga-import-test-storage"
volume_size = "40"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v3"
type = "storage"
}
},
volume_data2 = {
volume_name = "volume-vpstest2"
volume_size = "20"
volume_type = "pd-ssd"
volume_labels = {
environment = "production"
project = "v2"
type = "storage"
}
}
}
}
Now error is: Because google_compute_address.static_ip has "count" set, its attributes must be accessed on specific instances And i know, error with same disk name will come

Terraform nested dynamic blocks

I'm trying to deploy an Azure application gateway in Terraform, in particular I need to create a nested dynamic blocks.
I have tried to implement this (this part of the code is in a file called application_gateway.tf):
dynamic "url_path_map" {
for_each = var.path_maps
content {
name = outer_block.value["name"]
default_backend_address_pool_name = outer_block.value["backend"]
default_backend_http_settings_name = outer_block.value["backend_set"]
dynamic "url_path_rule" {
for_each = url_path_map.value["upm"]
content{
name = url_path_rule.value["name_rule"]
paths = url_path_rule.value["path"]
backend_address_pool_name = url_path_rule.value["backend"]
backend_http_settings_name = url_path_rule.value["backend_set"]
}
}
}
}
The correspective variables.tf file is:
variable "path_maps" {
default = []
type = list(object({
name = string
backend = string
backend_set = string
upm = list(object({
name_rule = string
path = string
backend = string
backend_set = string
}))
}))
}
With the following module call (this part of the script is in another file called main.tf):
module "application_gateway" {
source = "../modules/resources-hub/application_gateway"
resource_group_name = module.resource_group.name
resource_group_location = module.resource_group.location
subnet_id = module.agw_subnet.id
public_ip_address_id = module.app_gw_pip.id
firewall_policy_id = module.agw_web_application_firewall.id
log_analytics_workspace_id = module.log_analytics_workspace.id
path_maps = [{name = "dev_url_path_name", backend = "devBackend", backend_set = "devHttpSetting", name_rule = "dev_path_rule_name_app", path = "/app/*"},
{name = "tst_url_path_name", backend = "tstBackend", backend_set = "tstHttpSetting", name_rule = "dev_path_rule_name_edp", path = "/edp/*"},
{name = "uat_url_path_name", backend = "uatBackend", backend_set = "uatHttpSetting", name_rule = "dev_path_rule_name_internal", path = "/internal/*"}]
}
At the end, what I would like to obtain is this but using the nested dynamic blocks:
url_path_map {
name = "dev_url_path_name"
default_backend_address_pool_name = "devBackend"
default_backend_http_settings_name = "devHttpSetting"
path_rule {
name = "dev_path_rule_name_app_edp"
paths = ["/app/*"]
backend_address_pool_name = "devBackend"
backend_http_settings_name = "devHttpSetting"
}
path_rule {
name = "dev_path_rule_name_internal"
paths = ["/edp/*"]
backend_address_pool_name = "devBackend"
backend_http_settings_name = "devHttpSetting"
}
path_rule {
name = "dev_path_rule_name_internal"
paths = ["/internal/*"]
backend_address_pool_name = "sinkPool"
backend_http_settings_name = "devHttpSetting"
}
}
This is the error that I get if I run "terraform validate":
enter image description here
Thank you in advance!
I have tried the code above but I got the error in the image.
The first problem is on the definition of the variable "path_maps", because is different as the path_maps format that you are passing to the module.
You can modify the path_maps before passing to the module with the correct format, or you can change variable to fit the format that you define.
thats why you are getting the error that "upm" is required

for each module reference between modules with a map object

im trying to create an instance group with reference to an GCE, but im unable to refer to the id from instance group to link to the GCE
variable "compute_engine_instances" {
type = map(object({
instance_name = string
machine_type = string
zone = string
tags = list(string)
image_name = string
image_project = string
labels =object({
app_id = number
cost_center = string
owner = string
email = string
})
}))
}
module "qat_hosted_pacs_compute_engines" {
source = "../modules/compute_engine"
for_each = var.compute_engine_instances
project_id = var.project_id
instance_name = each.value.instance_name
machine_type = each.value.machine_type
tags = each.value.tags
labels = each.value.labels
subnetwork = var.subnetwork
zone = each.value.zone
image_name = each.value.image_name
image_project = each.value.image_project
}
variable "instance_group" {
type = map(object({
group_manager_name = string
zone = string
}))
}
module "qat_hosted_pacs_app_grp" {
source = "../modules/instance_groups"
for_each = var.instance_group
group_manager_name = each.value.group_manager_name
zone = each.value.zone
project_id = var.project_id
instances = module.qat_hosted_pacs_compute_engines.vm_name #unable to figure out how to reference the GCE
}
#output.tf looks like this for compute engine module
output "compute_engine_instances" {
value = {
for k, v in module.qat_hosted_pacs_compute_engines : k => v.vm_name
}
}
The root module looks like this for compute engine
data "google_compute_image" "compute_image" {
name = var.image_name
project = var.image_project
}
resource "google_compute_instance" "generic_instance" {
project = var.project_id
name = var.instance_name
machine_type = var.machine_type
zone = var.zone
tags = var.tags
labels = var.labels
boot_disk {
initialize_params {
image = data.google_compute_image.compute_image.self_link
}
auto_delete = true
}
network_interface {
subnetwork = var.subnetwork
}
}
#outputs.tf here looks like this for gce resource
output "vm_name" {
value = google_compute_instance.generic_instance.id
description = "The name of the VM"
}
And the instance group manager looks like this
resource "google_compute_instance_group" "igm" {
name = var.group_manager_name
zone = var.zone
project = var.project_id
instances = var.instances
named_port {
name = "http"
port = "8080"
}
named_port {
name = "https"
port = "8443"
}
lifecycle {
create_before_destroy = true
}
}
i get the foll. error
Error: Unsupported attribute
on main.tf line 45, in module "qat_hosted_pacs_app_grp":
45: instances = module.qat_hosted_pacs_compute_engines.vm_name
|----------------
| module.qat_hosted_pacs_compute_engines is object with 2 attributes
This object does not have an attribute named "vm_name".
terraform tf vars file
compute_engine_instances ={
"test-adi"={
instance_name = "test-vm"
machine_type = "n1-standard-1"
zone = "us-east4-b"
tags = ["foo","bar"]
image_name = "gold-centos-7"
image_project = "dev-cietools"
labels = {
app_id = "56"
cost_center = "156"
owner = "ops"
email = "ops"
}}
"test-adi-2"={
instance_name = "test-vm-2"
machine_type = "n1-standard-1"
zone = "us-east4-b"
tags = ["foo","bar"]
image_name = "centos-7"
image_project = "dev-cietools"
labels = {
app_id = "56"
cost_center = "856"
owner = "ops"
email = "ops"
}
}
}
subnetwork = "sandbox-us-east4"
project_id = "cloudops-dev01-sb"
instance_group = {
"igm" = {
group_manager_name = "test"
zone = "us-east4-b"
}
}
Since you used for_each in your google_compute_instance_group module, you have to use key to refer to individual instances of the module, e.g.
instances = module.qat_hosted_pacs_compute_engines["test-adi"].vm_name
or if you want to pass all vm_name created for all values of for_each, you can do:
instances = values(module.qat_hosted_pacs_compute_engines)[*].vm_name

Is there a better way to create multiple cloudwatch alarms?

I'm trying to create cloudwatch alarms for some specific load balancers.
What if I have to create 100 cloudwatch alarms, do I need to populate the tfvars the way, I'm updating it currently, or is there any other way which is more optimized.
Following is my code.
main.tf
resource "aws_cloudwatch_metric_alarm" "UnHealthyHosts" {
for_each = var.cloudwatch_alarms_map
alarm_name = each.key
comparison_operator = var.cloudwatch_alarm_operator
evaluation_periods = var.cloudwatch_alarm_evaluation_periods
metric_name = var.cloudwatch_alarm_metric
namespace = each.value["alarm_namespace"]
period = var.cloudwatch_alarm_period
statistic = var.cloudwatch_alarm_statistic
threshold = var.cloudwatch_alarm_threshold
alarm_description = var.cloudwatch_alarm_description
actions_enabled = var.cloudwatch_alarm_actions_enabled
alarm_actions = [aws_sns_topic.sns.arn]
dimensions = {
TargetGroup = each.value["target_group_arn"]
LoadBalancer = each.value["load_balancer_arn"]
}
}
variables.tf
variable "cloudwatch_alarms_map" {
type = map(object({
alarm_namespace = string # eg: AWS/ApplicationELB
target_group_arn = string
load_balancer_arn = string
}))
default = {}
}
terraform.tfvars
cloudwatch_alarms_map = {
app1-unhealthy-alarm = {
target_group_arn_suffix = "targetgroup/sample-app1-tg/12de123e123123aa"
load_balancer_arn_suffix = "app/sample-alb-app1-lb/12c5732bd012e47a"
alarm_namespace = "AWS/ApplicationELB"
}
app2-unhealthy-alarm = {
target_group_arn_suffix = "targetgroup/sample-app2-tg/313e7f1ad4a2e373"
load_balancer_arn_suffix = "app/sample-alb-app2-lb/f2c5132bd012e47a"
alarm_namespace = "AWS/ApplicationELB"
}
}

terraform add multiple VM and change the default name of the resoruces

I need a bit of help as I have a terraform script and I want to add multiple VM and change the name of the network card like node_name-NIC and do the same thing even for the other resources but is failing and i cant fine the proper way to do it.
below is the terraform script
terraform {
required_providers {
azurerm = {
// source = "hashicorp/azurerm"
version = "=1.44"
}
}
}
locals {
rsname = "testing-new-terraform-modules"
node_name = ["server1","server2"]
clustersize = 2
node_size = "Standard_B4ms"
av_set_name = "Windows-AV-Set"
vnet_name = "VNET_1"
vnet_rg = "RG_VNET_D"
gw_subnet = "SUB_GW_INT"
vm_subnet = "SUB_WIN"
image_rg = "RG__TEMPLATE"
common_tags = {
lbuildingblock = "GENERAL"
customer = "IND"
}
}
module "resource_group" {
source = "../modules/resources/azure/data-resource-group"
rsname = local.rsname
}
data "azurerm_virtual_network" "virtual_network" {
name = local.vnet_name
resource_group_name = local.vnet_rg
}
# GatewayZone subnet, for the Load Balancer frontend IP address
module "gw_subnet" {
source = "../modules/resources/azure/data-subnet"
subnet-name = local.gw_subnet
vnet-name = data.azurerm_virtual_network.virtual_network.name
rs-name = data.azurerm_virtual_network.virtual_network.resource_group_name
}
module "windows_subnet" {
source = "../modules/resources/azure/data-subnet"
// We will use the SUB_LHIND_P_APP subnet, no need to create a new subnet just for two servers
subnet-name = local.vm_subnet
rs-name = local.vnet_rg
vnet-name = local.vnet_name
}
//data "azurerm_network_security_group" "app_nsg" {
//
// name = "SUB_LHIND_D_APP_NSG"
// resource_group_name = data.azurerm_virtual_network.virtual_network.resource_group_name
//}
module "nic" {
source = "../modules/resources/azure/network-interface"
location = module.resource_group.rs_group_location
name = "${local.node_name[0]}-NIC"
nic_count = local.clustersize
resource_group = module.resource_group.rs_group_name
subnet_id = module.windows_subnet.subnet_id
tags = local.common_tags
}
module "av_set" {
source = "../modules/resources/azure/availability-set"
av_name = local.av_set_name
resource_group = module.resource_group.rs_group_name
location = module.resource_group.rs_group_location
}
module "template_image" {
source = "../modules/resources/azure/data-templates"
template_name = "WindowsServer2019"
resource_group = local.image_rg
}
module "windows" {
source = "../modules/resources/azure/windows-server"
location = module.resource_group.rs_group_location
network_interface_ids = module.nic.nic_id
node_count = local.clustersize
node_name = local.node_name
node_size = local.node_size
av_set_id = module.av_set.availability_set_id
resource_group = module.resource_group.rs_group_name
template_id = module.template_image.template_id
username = var.username
password = var.password
domain_user = var.domain_user
domain_pass = var.domain_pass
}
is failing with the below error
Error: Invalid index
on ../modules/resources/azure/network-interface/main.tf line 10, in resource "azurerm_network_interface" "nic":
10: name = var.name[count.index]
|----------------
| count.index is 0
| var.name is "SW-AZLHIND-580-NIC"
This value does not have any indices.
and the resource Network-Interface is like below
resource "azurerm_network_interface" "nic" {
count = var.nic_count
location = var.location
name = var.name[count.index]
resource_group_name = var.resource_group
tags = var.tags
// network_security_group_id = var.network_security_group_id
ip_configuration {
name = var.name[count.index]
private_ip_address_allocation = "dynamic"
subnet_id = var.subnet_id
}
}
You can use the following:
name = "{var.name}-${count.index}"

Resources