Terraform: Use of Locals with interpolated values generates unexpected errors

Created on 12 Sep 2017  ยท  10Comments  ยท  Source: hashicorp/terraform

Terraform Version

Terraform v0.10.4

Terraform Configuration Files

provider "aws" {
  region = "us-east-1"
}


resource "aws_nat_gateway" "us-east-1a-CLUSTER_NAME" {
  allocation_id = "${aws_eip.us-east-1a-CLUSTER_NAME.id}"
  subnet_id     = "${aws_subnet.utility-us-east-1a-CLUSTER_NAME.id}"
}

resource "aws_nat_gateway" "us-east-1b-CLUSTER_NAME" {
  allocation_id = "${aws_eip.us-east-1b-CLUSTER_NAME.id}"
  subnet_id     = "${aws_subnet.utility-us-east-1b-CLUSTER_NAME.id}"
}


resource "aws_route" "0-0-0-0--0" {
  route_table_id         = "${aws_route_table.CLUSTER_NAME.id}"
  destination_cidr_block = "0.0.0.0/0"
  gateway_id             = "${aws_internet_gateway.CLUSTER_NAME.id}"
}

resource "aws_route" "private-us-east-1a-0-0-0-0--0" {
  route_table_id         = "${aws_route_table.private-us-east-1a-CLUSTER_NAME.id}"
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = "${aws_nat_gateway.us-east-1a-CLUSTER_NAME.id}"
}

resource "aws_route" "private-us-east-1b-0-0-0-0--0" {
  route_table_id         = "${aws_route_table.private-us-east-1b-CLUSTER_NAME.id}"
  destination_cidr_block = "0.0.0.0/0"
  nat_gateway_id         = "${aws_nat_gateway.us-east-1b-CLUSTER_NAME.id}"
}

resource "aws_route_table" "CLUSTER_NAME" {
  vpc_id = "${aws_vpc.CLUSTER_NAME.id}"

  tags = {
    KubernetesCluster = "CLUSTER_DNSNAME"
    Name              = "CLUSTER_DNSNAME"
  }
}

resource "aws_route_table" "private-us-east-1a-CLUSTER_NAME" {
  vpc_id = "${aws_vpc.CLUSTER_NAME.id}"

  tags = {
    KubernetesCluster = "CLUSTER_DNSNAME"
    Name              = "private-us-east-1a.CLUSTER_DNSNAME"
  }
}

resource "aws_route_table" "private-us-east-1b-CLUSTER_NAME" {
  vpc_id = "${aws_vpc.CLUSTER_NAME.id}"

  tags = {
    KubernetesCluster = "CLUSTER_DNSNAME"
    Name              = "private-us-east-1b.CLUSTER_DNSNAME"
  }
}

resource "aws_route_table_association" "private-us-east-1a-CLUSTER_NAME" {
  subnet_id      = "${aws_subnet.us-east-1a-CLUSTER_NAME.id}"
  route_table_id = "${aws_route_table.private-us-east-1a-CLUSTER_NAME.id}"
}

resource "aws_route_table_association" "private-us-east-1b-CLUSTER_NAME" {
  subnet_id      = "${aws_subnet.us-east-1b-CLUSTER_NAME.id}"
  route_table_id = "${aws_route_table.private-us-east-1b-CLUSTER_NAME.id}"
}

resource "aws_route_table_association" "utility-us-east-1a-CLUSTER_NAME" {
  subnet_id      = "${aws_subnet.utility-us-east-1a-CLUSTER_NAME.id}"
  route_table_id = "${aws_route_table.CLUSTER_NAME.id}"
}

resource "aws_route_table_association" "utility-us-east-1b-CLUSTER_NAME" {
  subnet_id      = "${aws_subnet.utility-us-east-1b-CLUSTER_NAME.id}"
  route_table_id = "${aws_route_table.CLUSTER_NAME.id}"
}

resource "aws_subnet" "us-east-1a-CLUSTER_NAME" {
  vpc_id            = "${aws_vpc.CLUSTER_NAME.id}"
  cidr_block        = "10.65.66.0/23"
  availability_zone = "us-east-1a"

  tags = {
    KubernetesCluster                               = "CLUSTER_DNSNAME"
    Name                                            = "us-east-1a.CLUSTER_DNSNAME"
    "kubernetes.io/cluster/CLUSTER_DNSNAME" = "owned"
  }
}

resource "aws_subnet" "us-east-1b-CLUSTER_NAME" {
  vpc_id            = "${aws_vpc.CLUSTER_NAME.id}"
  cidr_block        = "10.65.68.0/23"
  availability_zone = "us-east-1b"

  tags = {
    KubernetesCluster                               = "CLUSTER_DNSNAME"
    Name                                            = "us-east-1b.CLUSTER_DNSNAME"
    "kubernetes.io/cluster/CLUSTER_DNSNAME" = "owned"
  }
}

resource "aws_subnet" "utility-us-east-1a-CLUSTER_NAME" {
  vpc_id            = "${aws_vpc.CLUSTER_NAME.id}"
  cidr_block        = "10.65.64.0/26"
  availability_zone = "us-east-1a"

  tags = {
    KubernetesCluster                               = "CLUSTER_DNSNAME"
    Name                                            = "utility-us-east-1a.CLUSTER_DNSNAME"
    "kubernetes.io/cluster/CLUSTER_DNSNAME" = "owned"
  }
}

resource "aws_subnet" "utility-us-east-1b-CLUSTER_NAME" {
  vpc_id            = "${aws_vpc.CLUSTER_NAME.id}"
  cidr_block        = "10.65.64.64/26"
  availability_zone = "us-east-1b"

  tags = {
    KubernetesCluster                               = "CLUSTER_DNSNAME"
    Name                                            = "utility-us-east-1b.CLUSTER_DNSNAME"
    "kubernetes.io/cluster/CLUSTER_DNSNAME" = "owned"
  }
}

resource "aws_vpc" "CLUSTER_NAME" {
  cidr_block           = "10.65.64.0/20"
  enable_dns_hostnames = true
  enable_dns_support   = true

  tags = {
    KubernetesCluster                               = "CLUSTER_DNSNAME"
    Name                                            = "CLUSTER_DNSNAME"
    "kubernetes.io/cluster/CLUSTER_DNSNAME" = "owned"
  }
}

resource "aws_vpc_dhcp_options" "CLUSTER_NAME" {
  domain_name         = "ec2.internal"
  domain_name_servers = ["AmazonProvidedDNS"]

  tags = {
    KubernetesCluster = "CLUSTER_DNSNAME"
    Name              = "CLUSTER_DNSNAME"
  }
}

resource "aws_vpc_dhcp_options_association" "CLUSTER_NAME" {
  vpc_id          = "${aws_vpc.CLUSTER_NAME.id}"
  dhcp_options_id = "${aws_vpc_dhcp_options.CLUSTER_NAME.id}"
}

resource "aws_eip" "us-east-1a-CLUSTER_NAME" {
  vpc = true
}

resource "aws_eip" "us-east-1b-CLUSTER_NAME" {
  vpc = true
}

resource "aws_internet_gateway" "CLUSTER_NAME" {
  vpc_id = "${aws_vpc.CLUSTER_NAME.id}"

  tags = {
    KubernetesCluster = "CLUSTER_DNSNAME"
    Name              = "CLUSTER_DNSNAME"
  }
}


locals {
  aws_region = "us-east-1"
  kube_cluster_name-TF = "CLUSTER_NAME"
  kube_cluster_name = "CLUSTER_DNSNAME"
  vpc_id = "${aws_vpc.CLUSTER_NAME.id}"
  client_tag = "COMPANY - Testing"

  route_tables = {
    "0" = "${aws_route_table.private-us-east-1a-CLUSTER_NAME.id}",
    "1" = "${aws_route_table.private-us-east-1b-CLUSTER_NAME.id}"
  }

  number_of_route_tables = 2

}



# Setup Site-to-Site VPN to COMPANY
resource "aws_vpn_gateway" "vpn_gateway" {
  # This DOES generate an error on destroy
  vpc_id = "${local.vpc_id}"
  # This doesn't generate an error on destroy
  # vpc_id = "${aws_vpc.CLUSTER_NAME.id}"

  tags = {
    KubernetesCluster = "${local.kube_cluster_name}"
    Name              = "EB_VPN-${local.kube_cluster_name}"
    Client            = "${local.client_tag}"
  }

}

# VPN
resource "aws_customer_gateway" "customer_gateway" {
  bgp_asn    = 65001
  ip_address = "200.200.200.200"
  type       = "ipsec.1"

  tags = {
    KubernetesCluster = "${local.kube_cluster_name}"
    Name              = "EB_VPN-${local.kube_cluster_name}"
    Client            = "${local.client_tag}"
  }
}

# VPN
resource "aws_vpn_connection" "main" {
  vpn_gateway_id      = "${aws_vpn_gateway.vpn_gateway.id}"
  customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}"
  type                = "ipsec.1"
  static_routes_only  = true

  tags = {
    KubernetesCluster = "${local.kube_cluster_name}"
    Name              = "EB_VPN-${local.kube_cluster_name}"
    Client            = "${local.client_tag}"
  }
}

# VPN
resource "aws_vpn_connection_route" "COMPANY_office" {
  destination_cidr_block = "10.1.0.0/24"
  vpn_connection_id      = "${aws_vpn_connection.main.id}"
}

# VPN 
resource "aws_vpn_gateway_route_propagation" "example" {
  # why doesn't this work..
  # count = "${length( keys(local.route_tables) )}"
  #
  count = "${local.number_of_route_tables}"
  vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}"
  route_table_id = "${lookup(local.route_tables, count.index)}"
}

Debug Output

Available on request

Expected Behavior

terraform destroy should remove all resources and not error out

Actual Behavior

terraform generates an error when running destroy and exits
Errror:

Error applying plan:
2017/09/12 21:20:56 [DEBUG] plugin: waiting for all plugin processes to complete...

2 error(s) occurred:

* local.route_tables: local.route_tables: Resource 'aws_route_table.private-us-east-1b-CLUSTER_NAME' does not have attribute 'id' for variable 'aws_route_table.private-us-east-1b-CLUSTER_NAME.id'
* local.vpc_id: local.vpc_id: Resource 'aws_vpc.CLUSTER_NAME' does not have attribute 'id' for variable 'aws_vpc.CLUSTER_NAME.id'

Terraform does not automatically rollback in the face of errors.
Instead, your Terraform state file has been partially updated with
any resources that successfully completed. Please address the error
above and apply again to incrementally change your infrastructure.

Steps to Reproduce

Please list the full steps required to reproduce the issue, for example:

  1. terraform init
  2. terraform apply
  3. terraform destroy

Important Factoids

In the locals definition, i'm assigning locals.vpc_id a value of "${aws_vpc.kube1-us-east-1-evba-ca.id}" and referencing the local below
If i do NOT use locals and reference "${aws_vpc.kube1-us-east-1-evba-ca.id}" directly, no errors are generated

bug core

All 10 comments

+1 Facing the same issue. Getting the following error:
module.api_gateway.local.api_ids: local.api_ids: Resource 'aws_api_gateway_resource.test_api_gateway_resource' does not have attribute 'rest_api_id' for variable 'aws_api_gateway_resource.test_api_gateway_resource.rest_api_id'
After trying terraform destroy a couple of times, it produces a different issue:
module.api_gateway.local.methods: local.methods: variable "test_id_api_gateway_resource" is nil, but no error was reported

I have a locals like so:

locals {
    methods = [
    {
     ...
    },
    {
    ...
    }
   ]
}

Same issue here, looks like variables used by locals gets destroyed before the local section gets evaluated.

data "terraform_remote_state" "network" {
    backend = "local"
    config {
        path = "${path.module}/../remote/network.tfstate"
    }
}

locals {
  # constant lookup table for AMIs
  subnet_mapping = {
    saltmaster    = "${data.terraform_remote_state.network.access_subnet_id}"
    kubemaster    = "${data.terraform_remote_state.network.kmaster_subnet_id}"
    kubenode      = "${data.terraform_remote_state.network.knode_subnet_id}"
    skydns        = "${data.terraform_remote_state.network.knode_subnet_id}"
    registry      = "${data.terraform_remote_state.network.vpn_subnet_id}"
    loadbalancer  = "${data.terraform_remote_state.network.front_subnet_id}"
  }
}

during the destroy we get this.

* local.subnet_mapping: local.subnet_mapping: Resource 'data.terraform_remote_state.network' does not have attribute 'access_subnet_id' for variable 'data.terraform_remote_state.network.access_subnet_id'

@apparentlymart if you can think of a quick hack to get around the limitation, that would help a lot. Thanks!

@djsly I'm seeing exactly the same thing with 0.10.6 and terraform_remote_state vars being used in a locals block.

Hi all! Sorry for the delay in replying here, and sorry for this limitation of the new locals feature.

The fix for this bug is #16213. Unfortunately we don't have an easy workaround ready, but temporarily replacing the local. interpolation with some constant value (doesn't matter which, since destroy doesn't use attributes) may help "unstick" Terraform here and allow the destroy to proceed.

We should have a new release out soon containing this fix.

I think I'm facing the same issue. While terraform destroy works great, putting 0 value to a count variable of a resource locals depend on makes the same effect.
i.e. consider the following

resource "aws_instance" "yowo_swarm_manager" {
 count = "0"
 ...
}

locals {
  swarm_private_ip = "${aws_instance.yowo_swarm_manager.0.private_ip}"
}

Hi @apparentlymart I am seeing this using terraform 0.11.7 and 0.11.8, local provider version 1.1.0, ca we re-open please. It is intermittent so hard to reproduce.

I am now in a situation where I have destroyed everything, if I comment out the locals and the local files they are used for, destroy exits cleanly, if I add them back in though exit fails looking for the resources they point to e.g.:

* module.testnet.local.nixops: local.nixops: Resource 'aws_instance.nixops' does not have attribute 'private_ip' for variable 'aws_instance.nixops.private_ip'

you can of course use this hack "${element(concat(resource.name.*.attr, list("")), 0)}"

my eyes are bleeding.

I'm going to lock this issue because it has been closed for _30 days_ โณ. This helps our maintainers find and focus on the active issues.

If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.

Was this page helpful?
0 / 5 - 0 ratings