Hello,
Every time I run terraform apply
my elastic beanstalk environment is marked for update, which in turn re-creates my environment. This is clearly undesirable behavior.
Terraform v0.10.2
resource "aws_elastic_beanstalk_environment" "backend" {
name = "backend-${var.environment}"
application = "backend"
solution_stack_name = "64bit Amazon Linux 2017.03 v2.4.3 running PHP 5.6"
tier = "WebServer"
setting {
...
}
}
~ module.ums.aws_elastic_beanstalk_environment.backend
setting.#: "19" => "20"
setting.1224657411.name: "SSHSourceRestriction" => ""
setting.1224657411.namespace: "aws:autoscaling:launchconfiguration" => ""
setting.1224657411.resource: "" => ""
setting.1224657411.value: "tcp,22,22,null" => ""
setting.1311926541.name: "RollingUpdateEnabled" => "RollingUpdateEnabled"
setting.1311926541.namespace: "aws:autoscaling:updatepolicy:rollingupdate" => "aws:autoscaling:updatepolicy:rollingupdate"
setting.1311926541.resource: "" => ""
setting.1311926541.value: "true" => "true"
setting.1531156480.name: "Application Healthcheck URL" => ""
setting.1531156480.namespace: "aws:elasticbeanstalk:application" => ""
setting.1531156480.resource: "" => ""
setting.1531156480.value: "/elb-status" => ""
setting.1636165274.name: "EC2KeyName" => "EC2KeyName"
setting.1636165274.namespace: "aws:autoscaling:launchconfiguration" => "aws:autoscaling:launchconfiguration"
setting.1636165274.resource: "" => ""
setting.1636165274.value: "key" => "key"
setting.2276893638.name: "RollingUpdateType" => "RollingUpdateType"
setting.2276893638.namespace: "aws:autoscaling:updatepolicy:rollingupdate" => "aws:autoscaling:updatepolicy:rollingupdate"
setting.2276893638.resource: "" => ""
setting.2276893638.value: "Health" => "Health"
setting.2396587397.name: "MinSize" => "MinSize"
setting.2396587397.namespace: "aws:autoscaling:asg" => "aws:autoscaling:asg"
setting.2396587397.resource: "" => ""
setting.2396587397.value: "1" => "1"
setting.2420299722.name: "SystemType" => "SystemType"
setting.2420299722.namespace: "aws:elasticbeanstalk:healthreporting:system" => "aws:elasticbeanstalk:healthreporting:system"
setting.2420299722.resource: "" => ""
setting.2420299722.value: "enhanced" => "enhanced"
setting.2558992023.name: "ManagedActionsEnabled" => "ManagedActionsEnabled"
setting.2558992023.namespace: "aws:elasticbeanstalk:managedactions" => "aws:elasticbeanstalk:managedactions"
setting.2558992023.resource: "" => ""
setting.2558992023.value: "true" => "true"
setting.2671587030.name: "Subnets" => "Subnets"
setting.2671587030.namespace: "aws:ec2:vpc" => "aws:ec2:vpc"
setting.2671587030.resource: "" => ""
setting.2671587030.value: "subnet-3630230b,subnet-6a798c23,subnet-6d116547,subnet-8ea1a4d6" => "subnet-3630230b,subnet-6a798c23,subnet-6d116547,subnet-8ea1a4d6"
setting.2808638165.name: "PreferredStartTime" => "PreferredStartTime"
setting.2808638165.namespace: "aws:elasticbeanstalk:managedactions" => "aws:elasticbeanstalk:managedactions"
setting.2808638165.resource: "" => ""
setting.2808638165.value: "Sun:02:00" => "Sun:02:00"
setting.2983186660.name: "UpdateLevel" => "UpdateLevel"
setting.2983186660.namespace: "aws:elasticbeanstalk:managedactions:platformupdate" => "aws:elasticbeanstalk:managedactions:platformupdate"
setting.2983186660.resource: "" => ""
setting.2983186660.value: "minor" => "minor"
setting.3007260544.name: "AssociatePublicIpAddress" => "AssociatePublicIpAddress"
setting.3007260544.namespace: "aws:ec2:vpc" => "aws:ec2:vpc"
setting.3007260544.resource: "" => ""
setting.3007260544.value: "true" => "true"
setting.3172159480.name: "ELBSubnets" => "ELBSubnets"
setting.3172159480.namespace: "aws:ec2:vpc" => "aws:ec2:vpc"
setting.3172159480.resource: "" => ""
setting.3172159480.value: "subnet-3630230b,subnet-6a798c23,subnet-6d116547,subnet-8ea1a4d6" => "subnet-3630230b,subnet-6a798c23,subnet-6d116547,subnet-8ea1a4d6"
setting.3225151102.name: "MaxSize" => "MaxSize"
setting.3225151102.namespace: "aws:autoscaling:asg" => "aws:autoscaling:asg"
setting.3225151102.resource: "" => ""
setting.3225151102.value: "2" => "2"
setting.3276487710.name: "" => "Application Healthcheck URL"
setting.3276487710.namespace: "" => "aws:elasticbeanstalk:application"
setting.3276487710.resource: "" => ""
setting.3276487710.value: "" => "HTTP:80/elb-status"
setting.335963092.name: "" => "DBSubnets"
setting.335963092.namespace: "" => "aws:ec2:vpc"
setting.335963092.resource: "" => ""
setting.335963092.value: "" => "subnet-3630230b,subnet-6a798c23,subnet-6d116547,subnet-8ea1a4d6"
setting.337125008.name: "" => "SSHSourceRestriction"
setting.337125008.namespace: "" => "aws:autoscaling:launchconfiguration"
setting.337125008.resource: "" => ""
setting.337125008.value: "" => "tcp, 22, 22, sg-9f9946e4"
setting.37040285.name: "ServiceRole" => "ServiceRole"
setting.37040285.namespace: "aws:elasticbeanstalk:environment" => "aws:elasticbeanstalk:environment"
setting.37040285.resource: "" => ""
setting.37040285.value: "aws-elasticbeanstalk-service-role" => "aws-elasticbeanstalk-service-role"
setting.3909253589.name: "VPCId" => "VPCId"
setting.3909253589.namespace: "aws:ec2:vpc" => "aws:ec2:vpc"
setting.3909253589.resource: "" => ""
setting.3909253589.value: "vpc-581ad93f" => "vpc-581ad93f"
setting.417274623.name: "InstanceType" => "InstanceType"
setting.417274623.namespace: "aws:autoscaling:launchconfiguration" => "aws:autoscaling:launchconfiguration"
setting.417274623.resource: "" => ""
setting.417274623.value: "m3.medium" => "m3.medium"
setting.43215759.name: "SecurityGroups" => "SecurityGroups"
setting.43215759.namespace: "aws:autoscaling:launchconfiguration" => "aws:autoscaling:launchconfiguration"
setting.43215759.resource: "" => ""
setting.43215759.value: "sg-b445c3c4" => "sg-b445c3c4"
setting.733236782.name: "IamInstanceProfile" => "IamInstanceProfile"
setting.733236782.namespace: "aws:autoscaling:launchconfiguration" => "aws:autoscaling:launchconfiguration"
setting.733236782.resource: "" => ""
setting.733236782.value: "profile" => "profile"
No change should be detected
Terraform thinks I've made a change
terraform apply
terraform apply
again with no changesI narrowed it down to an issue with these two setting options. My current workaround is to just comment them out:
setting {
namespace = "aws:elasticbeanstalk:application"
name = "Application Healthcheck URL"
value = "HTTP:80/elb-status"
}
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "SSHSourceRestriction"
value = "tcp, 22, 22, ${var.bastion_security_group}"
}
One of the side effects of this ends up being a security critical issue. Not being able to configure SSHSourceRestriction
to avoid the constant refreshes by default will keep port 22
open to the world.
I'm having this issue as well. I can't not use the Application Healthcheck URL without compromising my deployment, so I'm stuck letting it update every environment on each apply.
This also happens with the aws:autoscaling:launchconfiguration
IamInstanceProfile
setting. Looks like the setting ID changes every time.
Terraform v0.11.7
But running into issues with:
setting {
name = "SSLReferencePolicy"
namespace = "aws:elb:policies:TLSHighPolicy"
value = "ELBSecurityPolicy-TLS-1-2-2017-01"
}
I'm also facing the same issue, any update or work around?
It's annoying to wait 10 min for beanstalk update even there no changes
+1 - Also facing this issue which causes unnecessary deployment time.
+1 I'm having this issue as well
<> terraform version
Terraform v0.11.8
Any update on this? Having the same issue when using aws:autoscaling:launchconfiguration IamInstanceProfile setting. I have to give an instance profile. Is there any workaround to suppress updates?
Same problems... Really a PITA
@mitchellh Any chance of getting some official word on this? It's a pretty big usability issue.
I have to agree that this is pretty serious, it's completely crippling my IaC, not only does this apply every time for no reason, it also prevents other resources from being updated because it fails to update constantly with:
aws_elastic_beanstalk_environment.default: Error waiting for Elastic Beanstalk Environment (e-nwn8rhh3xs) to become ready: 2 errors occurred:
* 2019-02-27 19:33:27.54 +0000 UTC (e-nwn8rhh3xs) : Service:AmazonCloudFormation, Message:No updates are to be performed.
* 2019-02-27 19:33:28.574 +0000 UTC (e-nwn8rhh3xs) : Environment tag update failed.
This is pretty much unusable for my purposes.
Any updates??? Seems like IamInstanceProfile and SecurityGroups settings force this change...
*Edit
So, after searching a bit, I found out that
Another important point for the SSHSourceRestriction
setting: do NOT put spaces after commas.
This will trigger an update, so do not use it:
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "SSHSourceRestriction"
value = "tcp, 22, 22, ${var.bastion_security_group}"
}
while this WILL NOT:
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "SSHSourceRestriction"
value = "tcp,22,22,${var.bastion_security_group}"
}
So I found a bug in the beanstalk API almost a year ago that essentially meant they were returning null values in certain situations with SSHSourceRestriction. They emailed me this morning say it should be fixed now, perhaps it'll solve this problem.
I have this issues with many other elements. see below (- & +) removing and adding.
Terraform v0.12.3
/+ setting {
+ name = "Subnets"
+ namespace = "aws:ec2:vpc"
+ value = "subnet-0533bc84xxxxx, subnet-0849547f75xxxx"
}
- setting {
- name = "Subnets" -> null
- namespace = "aws:ec2:vpc" -> null
- value = "subnet-0533bc84xxxxx,subnet-0849547f75xxxx" -> null
}
- setting {
- name = "Unit" -> null
- namespace = "aws:autoscaling:trigger" -> null
- value = "Percent" -> null
}
+ setting {
+ name = "Unit"
+ namespace = "aws:autoscaling:trigger"
+ value = "Percent"
}
- setting {
- name = "UpperThreshold" -> null
- namespace = "aws:autoscaling:trigger" -> null
- value = "70" -> null
}
+ setting {
+ name = "UpperThreshold"
+ namespace = "aws:autoscaling:trigger"
+ value = "70"
}
- setting {
- name = "VPCId" -> null
- namespace = "aws:ec2:vpc" -> null
- value = "vpc-095dee784a4382c63" -> null
}
+ setting {
+ name = "VPCId"
+ namespace = "aws:ec2:vpc"
+ value = "vpc-095dee784a4382c63"
}
* /+ setting { + name = "Subnets" + namespace = "aws:ec2:vpc" + value = "subnet-0533bc84xxxxx, subnet-0849547f75xxxx" } - setting { - name = "Subnets" -> null - namespace = "aws:ec2:vpc" -> null - value = "subnet-0533bc84xxxxx,subnet-0849547f75xxxx" -> null }
@krunalsabnis this is because you have a space between the first and the second subnet, after the comma.
Try removing the space in the value, and it should stop this behavior
I observed ultra strange behavior - the post by @GarlicDipping prompted me to try this, so thanks!
My EB deployment had a bunch of custom settings, and each Terraform apply would re-apply these settings as a no-op. These were settings like subnets, sgs, load balancer settings, service role, instance profile, etc.
I changed only the IamInstanceProfile
from a full ARN to just the name, and instantly the entire problem went away for all settings.
Previous: arn:aws:iam::xxx:instance-profile/eb-ec2-role
After: eb-ec2-role
__Update:__ In a different EB app, this trick didn't resolve the problem.
Adding resource = ""
to setting section fixes my issue.
Similar to @krunalsabnis I get this for every setting when applying terraform to Beanstalk environment Using terraform v 0.12.6.
Bug is present in provider "aws" { version = "~> 2.30" }
released 2019-09-26.
Two changes work around it for me:
resource=""
to each setting
section. (Thanks @Rmannn ! 馃憤)Working version:
resource "aws_elastic_beanstalk_environment" "api_service" {
name = "${var.instance_name}-api-service"
application = aws_elastic_beanstalk_application.api_service.name
cname_prefix = "com-company-${var.instance_name}-api"
version_label = aws_elastic_beanstalk_application_version.api_service.name
# https://docs.aws.amazon.com/elasticbeanstalk/latest/platforms/platforms-supported.html
solution_stack_name = "64bit Amazon Linux 2018.03 v2.9.2 running Java 8"
# https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html#command-options-general-elasticbeanstalkenvironment
# Prod & Staging get expensive Load Balancers. Other deployments get cheap Elastic IPs.
setting {
namespace = "aws:elasticbeanstalk:environment"
name = "EnvironmentType"
value = var.is_prod_or_staging ? "LoadBalanced" : "SingleInstance"
// These empty 'resource' values prevent updating the environment on every apply.
// See https://github.com/terraform-providers/terraform-provider-aws/issues/1471#issuecomment-522977469
resource = ""
}
// Include the 'LoadBalancerType' setting only in Prod/Staging deployments.
// The Beanstalk API ignores the LoadBalancerType setting when EnvironmentType=SingleInstance.
// When Terraform queries the application's settings, the Beanstalk API does not return the
// LoadBalancerType entry, so Terraform tries to add it. This makes Terraform update the
// application on *every* apply.
// See https://github.com/terraform-providers/terraform-provider-aws/issues/1471#issuecomment-522977469
// This 'dynamic' ugly hack is required because Terraform has no conditionals.
// See https://github.com/hashicorp/terraform/issues/19853
// TODO(dev1) Switch to Pulumi once it's mature enough and get away from this nonsense.
dynamic "setting" {
for_each = var.is_prod_or_staging ? [
1] : []
content {
namespace = "aws:elasticbeanstalk:environment"
name = "LoadBalancerType"
# classic, application, or network
value = "application"
resource = ""
}
}
...
Faced exact same issue. What solved the issue for me (these are not my solutions, found them from reading this and other posts):
resource = ""
in every environment settingHope these help :)
Same issue here when trying to use aws:elb:policies:backendkey
PublicKey
, Terraform force the update with exactly the same certificate PEM content.
setting {
namespace = "aws:elb:policies:backendencryption"
name = "PublicKeyPolicyNames"
value = "backendkey"
resource = ""
}
setting {
namespace = "aws:elb:policies:backendencryption"
name = "InstancePorts"
value = "443"
resource = ""
}
setting {
namespace = "aws:elb:policies:backendkey"
name = "PublicKey"
value = tls_self_signed_cert.backend.cert_pem
resource = ""
}
EDIT: To fix this issue, the public key must be passed instead of the certificate and it must be formatted to contain only the raw key:
setting {
namespace = "aws:elb:policies:backendencryption"
name = "PublicKeyPolicyNames"
value = "backendkey"
resource = ""
}
setting {
namespace = "aws:elb:policies:backendencryption"
name = "InstancePorts"
value = "443"
resource = ""
}
setting {
namespace = "aws:elb:policies:backendkey"
name = "PublicKey"
value = replace(replace(tls_private_key.backend.public_key_pem, "/-----[A-Z ]+-----/", ""), "/\\s/", "")
resource = ""
}
As others have mentioned, you need to watch out for spaces in your subnets. In addition the subnets appear to be sorted alphabetically.
setting {
namespace = "aws:ec2:vpc"
name = "Subnets"
value = join(",", sort(var.private_subnets))
resource = ""
}
where var.private_subnets
is a list(string) of subnets fixed my issue.
One other thing I just noticed was that when you have lists of things (subnets, security groups), sometimes they come back in a different order than your TF config specifies, so it detects a diff every time. Reorder the entries in the value to match what's coming back, and it should be set. That, plus the resource = ""
hack is what fixed it all for me.
Most helpful comment
Adding
resource = ""
to setting section fixes my issue.