0.7.1
aws_elastic_beanstalk_environmentresource "aws_elastic_beanstalk_application" "poco" {
count = "${lookup(var.instance_count, var.env)}"
name = "tf-${var.app_name}"
description = "tf-${var.app_name}"
}
resource "aws_elastic_beanstalk_environment" "poco" {
count = "${lookup(var.instance_count, var.env)}"
name = "poco"
application = "${aws_elastic_beanstalk_application.poco.name}"
solution_stack_name = "${var.solution_stack_name}"
wait_for_ready_timeout = "${var.wait_for_ready_timeout}"
# ALB
setting {
namespace = "aws:elasticbeanstalk:environment"
name = "LoadBalancerType"
value = "application"
}
setting {
namespace = "aws:elasticbeanstalk:environment:process:default"
name = "HealthCheckPath"
value = "/health"
}
setting {
namespace = "aws:elasticbeanstalk:environment:process:default"
name = "MatcherHTTPCode"
value = "200"
}
# Autoscaling
setting {
namespace = "aws:autoscaling:trigger"
name = "LowerBreachScaleIncrement"
value = "-1"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "UpperBreachScaleIncrement"
value = "1"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "UpperThreshold"
value = "75"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "LowerThreshold"
value = "10"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "BreachDuration"
value = "5"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "MeasureName"
value = "CPUUtilization"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "EvaluationPeriods"
value = "1"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "Period"
value = "5"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "Statistic"
value = "Average"
}
setting {
namespace = "aws:autoscaling:trigger"
name = "Unit"
value = "Percent"
}
setting {
namespace = "aws:autoscaling:asg"
name = "MaxSize"
value = "4"
}
setting {
namespace = "aws:autoscaling:asg"
name = "MinSize"
value = "3"
}
# Enable Enhanced Health
setting {
namespace = "aws:elasticbeanstalk:healthreporting:system"
name = "SystemType"
value = "enhanced"
}
setting {
namespace = "aws:elasticbeanstalk:healthreporting:system"
name = "HealthCheckSuccessThreshold"
value = "Warning"
}
setting {
namespace = "aws:elasticbeanstalk:environment"
name = "ServiceRole"
value = "${var.service_role}"
}
setting {
namespace = "aws:elasticbeanstalk:application"
name = "Application Healthcheck URL"
value = "/health"
}
# Handling Rolling Application Updates
setting {
namespace = "aws:elasticbeanstalk:command"
name = "DeploymentPolicy"
value = "RollingWithAdditionalBatch"
}
setting {
namespace = "aws:elasticbeanstalk:command"
name = "BatchSizeType"
value = "Percentage"
}
setting {
namespace = "aws:elasticbeanstalk:command"
name = "BatchSize"
value = "100"
}
setting {
namespace = "aws:elasticbeanstalk:command"
name = "Timeout"
value = "900"
}
# Handling Rolling Configuration Updates
setting {
namespace = "aws:autoscaling:updatepolicy:rollingupdate"
name = "RollingUpdateEnabled"
value = "true"
}
setting {
namespace = "aws:autoscaling:updatepolicy:rollingupdate"
name = "MaxBatchSize"
value = "3"
}
setting {
namespace = "aws:autoscaling:updatepolicy:rollingupdate"
name = "RollingUpdateType"
value = "Health"
}
setting {
namespace = "aws:autoscaling:updatepolicy:rollingupdate"
name = "Timeout"
value = "PT15M"
}
# Launch Configuration
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "InstanceType"
value = "t2.medium"
}
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "SecurityGroups"
value = "${var.vpc_default_security_group},${aws_security_group.poco.id}"
}
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "EC2KeyName"
value = "${var.ec2_key_name}"
}
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "IamInstanceProfile"
value = "${var.iam_instance_profile}"
}
setting {
namespace = "aws:autoscaling:launchconfiguration"
name = "ImageId"
value = "${var.image_id}"
}
# ENV vars
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "AWS_REGION"
value = "${var.aws_region}"
}
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "REDIS_URI"
value = "redis://${aws_elasticache_cluster.redis.cache_nodes.0.address}:6379"
}
setting {
namespace = "aws:elasticbeanstalk:application:environment"
name = "RICKROLL"
value = "never gonna give you up"
}
# VPC
setting {
namespace = "aws:ec2:vpc"
name = "VPCId"
value = "${var.vpc_id}"
}
setting {
namespace = "aws:ec2:vpc"
name = "Subnets"
value = "${var.vpc_private_subnets}"
}
setting {
namespace = "aws:ec2:vpc"
name = "ELBSubnets"
value = "${var.vpc_public_subnets}"
}
tags {
App = "${var.app_name}"
Env = "${var.env}"
Name = "${var.app_name}"
Inspector = "enabled"
}
}
The apply result listed in Atlas: https://gist.github.com/plukevdh/6c1a1933b9ec7331e865ec93ab4b590b
I should have seen all three env vars listed in the settings in the EB console:
AWS_REGION, REDIS_URI, RICKROLL
Only AWS_REGION and RICKROLL were present.
I modified the tf config above to modify the REDIS_URI value from "redis://${aws_elasticache_cluster.redis.cache_nodes.0.address}:6379/" to "redis://${aws_elasticache_cluster.redis.cache_nodes.0.address}:6379" (removing the trailing slash).
I added the RICKROLL setting block as seen above.
I then ran terraform apply via our hosted Atlas. The result is the REDIS_URI was unset and removed while the newer RICKROLL value was added successfully.
I'm also observing this behaviour - all updated envvars get removed during the update. 0.7.2 is affected here too.
0.7.3 has the same problem.
Hey Friends, I'm looking into this now. An update was mentioned, can you clarify for me, you're talking about simply updating the env vars and getting inconsistent state, not necessarily updating from Terraform versions? Meaning that the upgrade from one version to the next is not when this happens, but instead once you're on the new version(s), updating env vars is not behaving correctly. Is that right?
Thanks! Sorry for the trouble here
That is correct.
Hey everyone – I believe #8848 should fix this; I was able to find an error in the logic and it should be cleaned up now. If you can test it out that would be great, otherwise we'll rely on my testing and the new acceptance test behavior.
Sorry for the trouble!
Awesome, this has resolved our problem! Thanks @catsby! Do you know when this is likely to hit an official release? We've got multiple terraformers on our team and I don't really want to have to get everyone to make local versions from source, if at all possible.
Hey @leemhenson – we just merged the fix, this will go out in the next release. I don't know 100% sure when that will be, and I make no promises, but I suspect early next week
Good to know, thanks.
On Thu, 15 Sep 2016 at 20:11 Clint [email protected] wrote:
Hey @leemhenson https://github.com/leemhenson – we just merged the fix,
this will go out in the next release. I don't know 100% sure when that will
be, and I make no promises, but I suspect early next week—
You are receiving this because you were mentioned.Reply to this email directly, view it on GitHub
https://github.com/hashicorp/terraform/issues/8742#issuecomment-247423206,
or mute the thread
https://github.com/notifications/unsubscribe-auth/AAACAxfhAgCcJqORopy6esbbXdX1Epkmks5qqZh1gaJpZM4J4cLv
.
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
Most helpful comment
That is correct.