Whenever I have to recreate a launch configuration attached to an asg, terraform errors with a typical message like the following
* aws_launch_configuration.stagingsandbox-webservers (deposed #0): ResourceInUse: Cannot delete launch configuration stagingsandbox-webservers-2018-03-29-1 because it is attached to AutoScalingGroup
status code: 400, request id: 30042b40-3380-11e8-a2c6-231eedcff7c8
0.11.5
launch configuration
resource "aws_launch_configuration" "stagingsandbox-services" {
name = "stagingsandbox-services-2018-03-29-1"
iam_instance_profile = "${module.stagingsandbox_ec2_profile.name}"
security_groups = ["${module.security_groups_stagingsandbox.services}"]
associate_public_ip_address = false
image_id = "${lookup(var.trusty_ami, data.aws_region.current.name)}"
instance_type = "m4.large"
user_data = "${data.template_file.stagingsandbox-services.rendered}"
ebs_optimized = "true"
lifecycle {
create_before_destroy = true
}
}
.
.
No error
I need to deal with the error and reapply
any adjustment to launch configuration
Hi @kzw 馃憢 could you show us a sanitized version of your aws_autoscaling_group configuration as well? Curious if its related to this upstream Terraform issue about action ordering with modules and create_with_destroy: https://github.com/hashicorp/terraform/issues/17735
You commented in another issue you close that I should not specify AZ in vpc asg and I have not gotten around to removing it yet but here is the current code
module "stagingsandbox_innocentbox_services_asg" {
source = "asg"
environment = "stagingsandbox"
lc_name = "${aws_launch_configuration.stagingsandbox-services.name}"
webserver_lc_name = "${aws_launch_configuration.stagingsandbox-webservers.name}"
chef_role = "${lookup(data.template_file.stagingsandbox-services.vars, "chef_role")}"
webserver_chef_role = "${lookup(data.template_file.stagingsandbox-webservers.vars, "chef_role")}"
elbs = "${list("${module.stagingsandbox_services_elb.clark}")}"
desired = 0
max = 4
web_desired = 1
webserver_elbs = "${list("${module.stagingsandbox_services_elb.webservers}")}"
zone_identifier = "${list("${module.innocent_stagingsandbox_vpc.private-b-id}",
"${module.innocent_stagingsandbox_vpc.private-c-id}",
"${module.innocent_stagingsandbox_vpc.private-d-id}")}"
iam_role = "${aws_iam_role.asg-events.arn}"
sns_topic = "${aws_sns_topic.asg-events.arn}"
}
resource "aws_sns_topic" "asg-events" {
name = "asg-events"
}
module asg is
variable "environment" {}
variable "lc_name" {}
variable "webserver_lc_name" {}
variable "elbs" {
type = "list"
}
variable "webserver_elbs" {
type = "list"
}
variable "zone_identifier" {
type = "list"
}
variable "chef_role" {}
variable "webserver_chef_role" {}
variable "desired" {
default = 1
}
variable "max" {
default = 2
}
variable "min" {
default = 0
}
variable "web_desired" {
default = 1
}
variable "web_max" {
default = 2
}
variable "web_min" {
default = 0
}
variable "iam_role" {}
variable "sns_topic" {}
data "aws_availability_zones" "az" {}
module "webservers_termination_hook" {
source = "hooks"
asg = "${aws_autoscaling_group.innocentbox-webservers.name}"
role_arn = "${var.iam_role}"
sns_arn = "${var.sns_topic}"
}
module "services_termination_hook" {
source = "hooks"
asg = "${aws_autoscaling_group.innocentbox-services.name}"
role_arn = "${var.iam_role}"
sns_arn = "${var.sns_topic}"
}
resource "aws_autoscaling_group" "innocentbox-webservers" {
availability_zones = ["${data.aws_availability_zones.az.names[0]}",
"${data.aws_availability_zones.az.names[1]}",
"${data.aws_availability_zones.az.names[2]}",
]
name = "${var.environment}-webservers"
desired_capacity = "${var.web_desired}"
max_size = "${var.web_max}"
min_size = "${var.web_min}"
health_check_grace_period = 1800
health_check_type = "ELB"
force_delete = true
launch_configuration = "${var.webserver_lc_name}"
load_balancers = ["${var.webserver_elbs}"]
vpc_zone_identifier = ["${var.zone_identifier}"]
tag {
key = "ChefRole"
value = "${var.webserver_chef_role}"
propagate_at_launch = true
}
tag {
key = "Environment"
value = "${var.environment}"
propagate_at_launch = true
}
}
output "webserver_name" {
value = "${aws_autoscaling_group.innocentbox-webservers.name}"
}
resource "aws_autoscaling_group" "innocentbox-services" {
availability_zones = ["${data.aws_availability_zones.az.names[0]}",
"${data.aws_availability_zones.az.names[1]}",
"${data.aws_availability_zones.az.names[2]}",
]
name = "${var.environment}-services"
desired_capacity = "${var.desired}"
max_size = "${var.max}"
min_size = "${var.min}"
health_check_grace_period = 1800
health_check_type = "ELB"
force_delete = true
launch_configuration = "${var.lc_name}"
load_balancers = ["${var.elbs}"]
vpc_zone_identifier = ["${var.zone_identifier}"]
tag {
key = "ChefRole"
value = "${var.chef_role}"
propagate_at_launch = true
}
tag {
key = "Environment"
value = "${var.environment}"
propagate_at_launch = true
}
}
output "name" {
value = "${var.environment}-services"
}
module asg/hooks is
variable "asg" {}
variable "role_arn" {}
variable "sns_arn" {}
resource "aws_sns_topic_subscription" "this" {
topic_arn = "${var.sns_arn}"
protocol = "sqs"
endpoint = "${aws_sqs_queue.this.arn}"
}
resource "aws_sqs_queue" "this" {
name = "asg-events-for-${var.asg}"
max_message_size = 2048
receive_wait_time_seconds = 10
}
resource "aws_sqs_queue_policy" "this" {
queue_url = "${aws_sqs_queue.this.id}"
policy = <<POLICY
{
"Version": "2012-10-17",
"Id": "sqspolicy",
"Statement": [
{
"Sid": "First",
"Effect": "Allow",
"Principal": "*",
"Action": "sqs:SendMessage",
"Resource": "${aws_sqs_queue.this.arn}",
"Condition": {
"ArnEquals": {
"aws:SourceArn": "${var.sns_arn}"
}
}
}
]
}
POLICY
}
resource "aws_autoscaling_lifecycle_hook" "terminate" {
name = "scale-in"
default_result = "CONTINUE"
heartbeat_timeout = 600
autoscaling_group_name = "${var.asg}"
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
notification_target_arn = "${var.sns_arn}"
role_arn = "${var.role_arn}"
}
Hmm, not sure if I understand correctly, but you may need to use the name_prefix attribute instead of name:
name_prefix = "stagingsandbox-services-"
Then it'll automatically append a timestamp or something to that to form the actual name. That way, if there are any changes that require a new launch configuration, it can create the new one, with a new name, link it to the ASG, and then delete the old one. That's what I've done for my launch configurations.
I prefer name over name-prefix and I also understand that LCs are immutable and I always change the name. create_before_destroy = true is designed for this use case where terraform create a new one and delete the old one after new one is created according to recommendation in the terraform LC configuration.
Still happening here too, I have create_before_destroy set to true.
It tries to delete the old launch config before it has applied the new launch config to the ASG, OR it is a delay before the API realises that the old launch config is no longer in use.
A way around this might be to use launch templates and just point the ASG at the latest version of that
Out of a sudden this happened here too. Tried to state rm and import as suggested here https://github.com/hashicorp/terraform/issues/18643 but so far found no way to solve this issue.
Update your AWS provider, we had the same issue until 1.56.0.
With 1.57.0 it at least cleaned up on the second run (the first run failing with an error), but with 2.11.0 we don't get that error anymore.
Most helpful comment
Still happening here too, I have create_before_destroy set to true.
It tries to delete the old launch config before it has applied the new launch config to the ASG, OR it is a delay before the API realises that the old launch config is no longer in use.
A way around this might be to use launch templates and just point the ASG at the latest version of that