I want to create a serverless SQL pool database (that is associated with synapse) with terraform, I can not find a provider to do this. Is it possible?
I have tried using azurerm_mssql_server provider, but to no avail, eg:
data "azurerm_mssql_server" "synapseserverless" {
name = "${var.environment}${local.application_namespace}${local.location_id}synws-ondemand.sql.azuresynapse.net"
resource_group_name = azurerm_resource_group.rg_data.name
}
resource "azurerm_mssql_database" "reporting" {
name = "${var.environment}-${local.application_namespace}-${local.location_id}-sqldb-reporting"
server_id = data.azurerm_mssql_server.synapseserverless.id
read_scale = true
sku_name = "GP_S_Gen5_6"
zone_redundant = true
}
Thank you
You can use the below terraform code to create the serverless SQL pool database (that is associated with synapse).
provider "azurerm" {
features{}
}
data "azurerm_resource_group" "example" {
name = "XXXXXXXXX"
}
resource "azurerm_storage_account" "example" {
name = "examplestorageacc4353"
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "BlobStorage"
is_hns_enabled = true
}
resource "azurerm_storage_data_lake_gen2_filesystem" "example" {
name = "example"
storage_account_id = azurerm_storage_account.example.id
}
resource "azurerm_synapse_workspace" "example" {
name = "example77354"
resource_group_name = data.azurerm_resource_group.example.name
location = data.azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "H#Sh1CoR3!"
}
resource "azurerm_synapse_sql_pool" "example" {
name = "examplesqlpool456"
synapse_workspace_id = azurerm_synapse_workspace.example.id
sku_name = "DW100c"
create_mode = "Default"
}
Output
Related
I need to test my azure private-endpoint using the following scenario.
We have a virtual netwroks with two sub-nets (vm_subnet and storage_account_subnet)
The virtual-machine (vm) should be able to connect to the storage-account using a private-link.
So, the below image explains the scenario:
and then i need to test my endpoint using the below manual test-case:
Connect to the azure virtual-machine using ssh and putty username: adminuser and password: P#$$w0rd1234!
In the terminal ping formuleinsstorage.blob.core.windows.net (Expect to see the ip of storage account in the range of storage_account_subnet (10.0.2.0/24))
I deploy all the infrastructure using the below Terraform code:
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
}
}
resource "azurerm_resource_group" "main_resource_group" {
name = "RG-Terraform-on-Azure"
location = "West Europe"
}
# Create Virtual-Network
resource "azurerm_virtual_network" "virtual_network" {
name = "Vnet"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
}
# Create subnet for virtual-machine
resource "azurerm_subnet" "virtual_network_subnet" {
name = "vm_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.1.0/24"]
}
# Create subnet for storage account
resource "azurerm_subnet" "storage_account_subnet" {
name = "storage_account_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.2.0/24"]
}
# Create Linux Virtual machine
resource "azurerm_linux_virtual_machine" "example" {
name = "example-machine"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
size = "Standard_F2"
admin_username = "adminuser"
admin_password = "14394Las?"
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.virtual_machine_network_interface.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
resource "azurerm_network_interface" "virtual_machine_network_interface" {
name = "vm-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm_public_ip.id
}
}
# Create Network-interface and public-ip for virtual-machien
resource "azurerm_public_ip" "vm_public_ip" {
name = "vm-public-ip-for-rdp"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_network_interface" "virtual_network_nic" {
name = "vm_nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "testconfiguration1"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
}
}
# Setup an Inbound rule because we need to connect to the virtual-machine using RDP (remote-desktop-protocol)
resource "azurerm_network_security_group" "traffic_rules" {
name = "vm_traffic_rules"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
security_rule {
name = "virtual_network_permission"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "private_nsg_asso" {
subnet_id = azurerm_subnet.virtual_network_subnet.id
network_security_group_id = azurerm_network_security_group.traffic_rules.id
}
# Setup storage_account and its container
resource "azurerm_storage_account" "storage_account" {
name = "storagaccountfortest"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "StorageV2"
is_hns_enabled = "true"
}
resource "azurerm_storage_data_lake_gen2_filesystem" "data_lake_storage" {
name = "rawdata"
storage_account_id = azurerm_storage_account.storage_account.id
lifecycle {
prevent_destroy = false
}
}
# Setup DNS zone
resource "azurerm_private_dns_zone" "dns_zone" {
name = "privatelink.blob.core.windows.net"
resource_group_name = azurerm_resource_group.main_resource_group.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "network_link" {
name = "vnet_link"
resource_group_name = azurerm_resource_group.main_resource_group.name
private_dns_zone_name = azurerm_private_dns_zone.dns_zone.name
virtual_network_id = azurerm_virtual_network.virtual_network.id
}
# Setup private-link
resource "azurerm_private_endpoint" "endpoint" {
name = "storage-private-endpoint"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
subnet_id = azurerm_subnet.storage_account_subnet.id
private_service_connection {
name = "private-service-connection"
private_connection_resource_id = azurerm_storage_account.storage_account.id
is_manual_connection = false
subresource_names = ["blob"]
}
}
resource "azurerm_private_dns_a_record" "dns_a" {
name = "dns-record"
zone_name = azurerm_private_dns_zone.dns_zone.name
resource_group_name = azurerm_resource_group.main_resource_group.name
ttl = 10
records = [azurerm_private_endpoint.endpoint.private_service_connection.0.private_ip_address]
}
The problem is, my endpoint does not work ! But, if i try to add the service-endpoint manually then everything works well like a charm. So,i think, my DNS Zone is correct, also apparently the link to the storage-account is also working well. So, I think there should be something wrong with my private-link ! Any idea ?
Update:
Here are versions:
Terraform v1.2.5
on windows_386
+ provider registry.terraform.io/hashicorp/azurerm v3.30.0
I believe the issue lies in the name of the dns_a_record. This should be the name of the storage account you want to reach via the private link.
The following Terraform code is working for me:
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=3.30.0"
}
}
}
provider "azurerm" {
features {
resource_group {
prevent_deletion_if_contains_resources = false
}
}
}
resource "azurerm_resource_group" "main_resource_group" {
name = "RG-Terraform-on-Azure"
location = "West Europe"
}
# Create Virtual-Network
resource "azurerm_virtual_network" "virtual_network" {
name = "Vnet"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
}
# Create subnet for virtual-machine
resource "azurerm_subnet" "virtual_network_subnet" {
name = "vm_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.1.0/24"]
}
# Create subnet for storage account
resource "azurerm_subnet" "storage_account_subnet" {
name = "storage_account_subnet"
resource_group_name = azurerm_resource_group.main_resource_group.name
virtual_network_name = azurerm_virtual_network.virtual_network.name
address_prefixes = ["10.0.2.0/24"]
}
# Create Linux Virtual machine
resource "azurerm_linux_virtual_machine" "example" {
name = "example-machine"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
size = "Standard_F2"
admin_username = "adminuser"
admin_password = "14394Las?"
disable_password_authentication = false
network_interface_ids = [
azurerm_network_interface.virtual_machine_network_interface.id,
]
os_disk {
caching = "ReadWrite"
storage_account_type = "Standard_LRS"
}
source_image_reference {
publisher = "Canonical"
offer = "UbuntuServer"
sku = "16.04-LTS"
version = "latest"
}
}
resource "azurerm_network_interface" "virtual_machine_network_interface" {
name = "vm-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "internal"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.vm_public_ip.id
}
}
# Create Network-interface and public-ip for virtual-machien
resource "azurerm_public_ip" "vm_public_ip" {
name = "vm-public-ip-for-rdp"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
allocation_method = "Static"
sku = "Standard"
}
resource "azurerm_network_interface" "virtual_network_nic" {
name = "storage-private-endpoint-nic"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
ip_configuration {
name = "storage-private-endpoint-ip-config"
subnet_id = azurerm_subnet.virtual_network_subnet.id
private_ip_address_allocation = "Dynamic"
}
}
# Setup an Inbound rule because we need to connect to the virtual-machine using RDP (remote-desktop-protocol)
resource "azurerm_network_security_group" "traffic_rules" {
name = "vm_traffic_rules"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
security_rule {
name = "virtual_network_permission"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "*"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_subnet_network_security_group_association" "private_nsg_asso" {
subnet_id = azurerm_subnet.virtual_network_subnet.id
network_security_group_id = azurerm_network_security_group.traffic_rules.id
}
# Setup storage_account and its container
resource "azurerm_storage_account" "storage_account" {
name = <STORAGE_ACCOUNT_NAME>
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
account_tier = "Standard"
account_replication_type = "LRS"
account_kind = "StorageV2"
is_hns_enabled = "true"
}
resource "azurerm_storage_data_lake_gen2_filesystem" "data_lake_storage" {
name = "rawdata"
storage_account_id = azurerm_storage_account.storage_account.id
lifecycle {
prevent_destroy = false
}
}
# Setup DNS zone
resource "azurerm_private_dns_zone" "dns_zone" {
name = "privatelink.blob.core.windows.net"
resource_group_name = azurerm_resource_group.main_resource_group.name
}
resource "azurerm_private_dns_zone_virtual_network_link" "network_link" {
name = "vnet-link"
resource_group_name = azurerm_resource_group.main_resource_group.name
private_dns_zone_name = azurerm_private_dns_zone.dns_zone.name
virtual_network_id = azurerm_virtual_network.virtual_network.id
}
# Setup private-link
resource "azurerm_private_endpoint" "endpoint" {
name = "storage-private-endpoint"
location = azurerm_resource_group.main_resource_group.location
resource_group_name = azurerm_resource_group.main_resource_group.name
subnet_id = azurerm_subnet.storage_account_subnet.id
private_service_connection {
name = "storage-private-service-connection"
private_connection_resource_id = azurerm_storage_account.storage_account.id
is_manual_connection = false
subresource_names = ["blob"]
}
}
resource "azurerm_private_dns_a_record" "dns_a" {
name = azurerm_storage_account.storage_account.name
zone_name = azurerm_private_dns_zone.dns_zone.name
resource_group_name = azurerm_resource_group.main_resource_group.name
ttl = 10
records = [azurerm_private_endpoint.endpoint.private_service_connection.0.private_ip_address]
}
Additionally, I'm not sure whether it is possible to ping storage accounts. To test I ran nslookup <STORAGE_ACCOUNT_NAME>.blob.core.windows.net both from my local machine and from the Azure VM. In the former case, I got a public IP while in the latter I got a private IP in the range defined in the Terraform config, which seems to be the behaviour you are looking for.
I need help with terraform. I need deploy azure for redis cache using private endpoint. My code:
resource "azurerm_redis_cache" "redis_cache_example" {
name = "redis-cache-ex"
location = var.location
resource_group_name = var.resource_group_name
capacity = var.redis_plan_capacity
family = var.redis_plan_family
sku_name = var.redis_plan_sku_name
enable_non_ssl_port = false
minimum_tls_version = "1.2"
public_network_access_enabled = false
}
resource "azurerm_private_dns_zone" "private_dns_zone_example" {
name = "example.redis-ex.azure.com"
resource_group_name = var.resource_group_name
}
resource "azurerm_private_dns_zone_virtual_network_link" "virtual_network_link_example" {
name = "exampleVnet.com"
private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone_example.name
virtual_network_id = var.vnet_id
resource_group_name = var.resource_group_name
}
resource "azurerm_private_endpoint" "redis_pe_example" {
name = "redis-private-endpoint-ex"
location = var.location
resource_group_name = var.resource_group_name
subnet_id = var.subnet_id
private_dns_zone_group {
name = "privatednsrediszonegroup"
private_dns_zone_ids = [azurerm_private_dns_zone.private_dns_zone_example.id]
}
private_service_connection {
name = "peconnection-example"
private_connection_resource_id = azurerm_redis_cache.redis_cache_example.id
is_manual_connection = false
subresource_names = ["redisCache"]
}
}
After deploying my redis doesn't ping within vnet. What's wrong with my terraform?
You can also add an azurerm_private_endpoint resource and link it to azurerm_redis_cache (or i guess other resource as well).
resource "azurerm_redis_cache" "default" {
...
}
resource "azurerm_private_endpoint" "default" {
count = 1
name = format("%s-redis%d", var.env, count.index + 1)
resource_group_name = data.azurerm_resource_group.default.name
location = data.azurerm_resource_group.default.location
subnet_id = data.azurerm_subnet.default.id
private_service_connection {
name = format("%s-redis%d-pe", var.env, count.index + 1)
private_connection_resource_id = azurerm_redis_cache.default[count.index].id
is_manual_connection = false
subresource_names = ["redisCache"]
}
}
You can find list of other private resources on AZ docs.
I want to create AKS and ACR resources in my Azure environment. The script is able to create the two resources, and I am able to connect to each of them. But the AKS node cannot pull images from the ACR. After some research, I found I need to create a Private Endpoint between the AKS and ACR.
The strange thing is that if I create the PE using Terraform the AKS and ACR still cannot communicate. If I create the PE manually, they can communicate. I compared the parameters of the two PEs on the UI and they look the same.
Could someone help me define the PE using the following script? Or let me know what I did wrong?
Thanks!
Full TF script without the Private Endpoint
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.97.0"
}
}
required_version = ">= 1.1.7"
}
provider "azurerm" {
features {}
subscription_id = "xxx"
}
resource "azurerm_resource_group" "rg" {
name = "aks-rg"
location = "East US"
}
resource "azurerm_kubernetes_cluster" "aks" {
name = "my-aks"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
dns_prefix = "myaks"
default_node_pool {
name = "default"
node_count = 2
vm_size = "Standard_B2s"
}
identity {
type = "SystemAssigned"
}
}
resource "azurerm_container_registry" "acr" {
name = "my-aks-acr-123"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
sku = "Premium"
admin_enabled = true
network_rule_set {
default_action = "Deny"
}
}
resource "azurerm_role_assignment" "acrpull" {
principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.acr.id
skip_service_principal_aad_check = true
}
Then you need to create a VNET, a Subnet (no part of this code ) plus a private DNS zone:
Private DNS zone:
resource "azurerm_private_dns_zone" "example" {
name = "mydomain.com"
resource_group_name = azurerm_resource_group.example.name
}
AKS Part:
resource "azurerm_kubernetes_cluster" "aks" {
name = "my-aks"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "myaks"
private_cluster_enabled = true
default_node_pool {
name = "default"
node_count = 2
vm_size = "Standard_B2s"
}
identity {
type = "SystemAssigned"
}
}
You need to create the ACR and a private endpoint for the ACR:
resource "azurerm_container_registry" "acr" {
name = "my-aks-acr-123"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
public_network_access_enabled = false
sku = "Premium"
admin_enabled = true
}
resource "azurerm_private_endpoint" "acr" {
name = "pvep-acr"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
subnet_id = YOUR_SUBNET
private_service_connection {
name = "example-acr"
private_connection_resource_id = azurerm_container_registry.acr.id
is_manual_connection = false
subresource_names = ["registry"]
}
private_dns_zone_group {
name = data.azurerm_private_dns_zone.example.name
private_dns_zone_ids = [data.azurerm_private_dns_zone.example.id]
}
}
resource "azurerm_role_assignment" "acrpull" {
principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.acr.id
skip_service_principal_aad_check = true
}
Doing Event Grid Subscription with a EventHub endpoint
resource "azurerm_eventgrid_system_topic_event_subscription" "example" {
name = "example-event-subscription"
system_topic = azurerm_system_topic.example.name
resource_group_name = azurerm_resource_group.example.name
eventhub_endpoint {
eventhub_endpoint_id = azurerm_eventhub.example.id
}
I got the error like
Blocks of type "eventhub_endpoint" are not expected here.
Not sure what I'm missing here. Is the eventhub_endpoint is not a valid one ? How can i configure the eventhub for my event grid sub ?
Regarding the issue, please update your script as
resource "azurerm_eventgrid_system_topic_event_subscription" "example" {
name = "example-event-subscription"
system_topic = azurerm_eventgrid_system_topic.example.name
resource_group_name = azurerm_resource_group.example.name
eventhub_endpoint_id = azurerm_eventhub.example.id
}
For more details, please refer to here.
For example (I use terraform 0.15.4 on windows)
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.46.0"
}
}
}
provider "azurerm" {
subscription_id = "e5b0fcfa-e859-43f3-8d84-5e5fe29f4c68"
client_id = "42e0d080-b1f3-40cf-8db6-c4c522d988c4"
client_secret = "Gbx2eK64iqq_g_3NCA.ClJDfQpIjoae:"
tenant_id = "e4c9ab4e-bd27-40d5-8459-230ba2a757fb"
features {}
}
resource "azurerm_resource_group" "example" {
name = "example-rg"
location = "West Europe"
}
resource "azurerm_eventhub_namespace" "example" {
name = "testhubname0123"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
sku = "Standard"
capacity = 1
tags = {
environment = "Production"
}
}
resource "azurerm_eventhub" "example" {
name = "testhub0123"
namespace_name = azurerm_eventhub_namespace.example.name
resource_group_name = azurerm_resource_group.example.name
partition_count = 2
message_retention = 1
}
resource "azurerm_eventgrid_system_topic" "example" {
name = "example-system-topic"
location = "Global"
resource_group_name = azurerm_resource_group.example.name
source_arm_resource_id = azurerm_resource_group.example.id
topic_type = "Microsoft.Resources.ResourceGroups"
}
resource "azurerm_eventgrid_system_topic_event_subscription" "example" {
name = "example-event-subscription"
system_topic = azurerm_eventgrid_system_topic.example.name
resource_group_name = azurerm_resource_group.example.name
eventhub_endpoint_id = azurerm_eventhub.example.id
}
I would like to create Virtual network gateway for an existing Vnet using terraform. Can someone help me with Code.
try updating your azurerm Terraform provider
resource "azurerm_subnet" "test" {
name = "GatewaySubnet"
resource_group_name = "${var.resource_group_name}"
virtual_network_name = "${var.virtual_network_name}"
address_prefix = "192.168.2.0/24"
}
resource "azurerm_public_ip" "test" {
name = "test"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
public_ip_address_allocation = "Dynamic"
}
resource "azurerm_virtual_network_gateway" "test" {
name = "test"
location = "${var.location}"
resource_group_name = "${var.resource_group_name}"
type = "Vpn"
vpn_type = "RouteBased"
active_active = false
enable_bgp = false
sku = "Basic"
ip_configuration {
name = "vnetGatewayConfig"
public_ip_address_id = "${azurerm_public_ip.test.id}"
private_ip_address_allocation = "Dynamic"
subnet_id = "${azurerm_subnet.test.id}"
}
vpn_client_configuration {
address_space = [ "10.2.0.0/24" ]
root_certificate {
name = "DigiCert-Federated-ID-Root-CA"
public_cert_data = <<EOF
MIIDuzCCAqOgAwIBAgIQCHTZWCM+IlfFIRXIvyKSrjANBgkqhkiG9w0BAQsFADBn
MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
d3cuZGlnaWNlcnQuY29tMSYwJAYDVQQDEx1EaWdpQ2VydCBGZWRlcmF0ZWQgSUQg
Um9vdCBDQTAeFw0xMzAxMTUxMjAwMDBaFw0zMzAxMTUxMjAwMDBaMGcxCzAJBgNV
BAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdp
Y2VydC5jb20xJjAkBgNVBAMTHURpZ2lDZXJ0IEZlZGVyYXRlZCBJRCBSb290IENB
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvAEB4pcCqnNNOWE6Ur5j
QPUH+1y1F9KdHTRSza6k5iDlXq1kGS1qAkuKtw9JsiNRrjltmFnzMZRBbX8Tlfl8
zAhBmb6dDduDGED01kBsTkgywYPxXVTKec0WxYEEF0oMn4wSYNl0lt2eJAKHXjNf
GTwiibdP8CUR2ghSM2sUTI8Nt1Omfc4SMHhGhYD64uJMbX98THQ/4LMGuYegou+d
GTiahfHtjn7AboSEknwAMJHCh5RlYZZ6B1O4QbKJ+34Q0eKgnI3X6Vc9u0zf6DH8
Dk+4zQDYRRTqTnVO3VT8jzqDlCRuNtq6YvryOWN74/dq8LQhUnXHvFyrsdMaE1X2
DwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNV
HQ4EFgQUGRdkFnbGt1EWjKwbUne+5OaZvRYwHwYDVR0jBBgwFoAUGRdkFnbGt1EW
jKwbUne+5OaZvRYwDQYJKoZIhvcNAQELBQADggEBAHcqsHkrjpESqfuVTRiptJfP
9JbdtWqRTmOf6uJi2c8YVqI6XlKXsD8C1dUUaaHKLUJzvKiazibVuBwMIT84AyqR
QELn3e0BtgEymEygMU569b01ZPxoFSnNXc7qDZBDef8WfqAV/sxkTi8L9BkmFYfL
uGLOhRJOFprPdoDIUBB+tmCl3oDcBy3vnUeOEioz8zAkprcb3GHwHAK+vHmmfgcn
WsfMLH4JCLa/tRYL+Rw/N3ybCkDp00s0WUZ+AoDywSl0Q/ZEnNY0MsFiw6LyIdbq
M/s/1JRtO3bDSzD9TazRVzn2oBqzSa8VgIo5C1nOnoAKJTlsClJKvIhnRlaLQqk=
EOF
}
revoked_certificate {
name = "Verizon-Global-Root-CA"
thumbprint = "912198EEF23DCAC40939312FEE97DD560BAE49B1"
}
}
}
Terraform v0.11.7
provider.azurerm v1.13.0
It works well on my site and you can change the resources into yours. For more details about virtual network gateway with Terraform, see azurerm_virtual_network_gateway.