Terraform v0.6.15
provider "aws" {
region = "us-east-1"
}
resource "aws_iam_user" "es" {
name = "srv_user1"
}
resource "aws_iam_access_key" "es" {
user = "${aws_iam_user.es.name}"
}
resource "aws_elasticsearch_domain" "es" {
domain_name = "es1"
advanced_options {
"rest.action.multi.allow_explicit_index" = true
}
snapshot_options {
"automated_snapshot_start_hour" = 23
}
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "es:*",
"Principal": {
"AWS": "${aws_iam_user.es.arn}"
}
}
]
}
CONFIG
}
https://gist.github.com/jritsema/8d4060e703c9a287753e1e0db5c41afd
none
An Elasticsearch domain should be created with a policy that grants access to the newly created user.
Throws the following error
Error applying plan:
1 error(s) occurred:
* aws_elasticsearch_domain.es: InvalidTypeException: Error setting policy: [ {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "es:*",
"Principal": {
"AWS": "arn:aws:iam::xxxxxxxx:user/srv_user1"
}
}
]
}
]
status code: 409, request id: 5ce1b757-1060-11e6-800a-c363f7f5dcbd
Please list the steps required to reproduce the issue
terraform applynone
GH-4485
if I run terraform apply twice, it works the second time
I ran into the same bug but with a different POLICY
resource "aws_elasticsearch_domain" "logs" {
domain_name = "es-test"
elasticsearch_version = "2.3"
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": [
"arn:aws:iam::${module.account.number}:role/role1",
"arn:aws:iam::${module.account.number}:role/role2"
]
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:${module.account.number}:domain/es-test/*"
},
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:${module.account.number}:domain/es-test/*",
"Condition": {
"IpAddress": {
"aws:SourceIp": "${module.networks.nat_gateway}"
}
}
}
]
}
CONFIG
}
The error does not like "aws:SourceIp": "${module.networks.nat_gateway}"
where the networks module looks like
variable "region" {}
variable "vpc" {}
variable "nat_gateways" {
default = {
us-east-1.vpc1 = ["w1.x1.y1.z1","w2.x2.y2.z2"]
}
}
output "nat_gateway" {
value = "${lookup(var.nat_gateways, format("%s.%s", var.region, var.vpc))}"
}
Any update on this issue, I am getting same error on version 0.8.7
I have this same error on version 0.8.3. Does anyone found a solution?
The bug is still there, I found a workaround for now, I have changed Principal -> AWS value from hash to array like this
[ {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "es:*",
"Principal": {
"AWS": ["arn:aws:iam::xxxxxxxx:user/srv_user1"]
}
}
]
}
]
I found solution. Helped me use this aws_elasticsearch_domain_policy. Works only with Terraform 0.8.7
provider "aws" {
region = "us-east-1"
}
resource "aws_iam_user" "es" {
name = "srv_user1"
}
resource "aws_iam_access_key" "es" {
user = "${aws_iam_user.es.name}"
}
resource "aws_elasticsearch_domain" "es" {
domain_name = "es1"
advanced_options {
"rest.action.multi.allow_explicit_index" = true
}
snapshot_options {
"automated_snapshot_start_hour" = 23
}
}
resource "aws_elasticsearch_domain_policy" "main" {
domain_name = "${aws_elasticsearch_domain.es.domain_name}"
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "es:*",
"Principal": {
"AWS": "${aws_iam_user.es.arn}"
}
}
]
}
CONFIG
}
Based on the comments above and also in my own findings, the problem may only come to light whenever you have a second Statement in your ES policy. Moving it to the aws_elasticsearch_domain_policy doesn't explicitly solve it and in the example above with SourceIP there are two statements, just like my case. My situation is in the opposite order as that example and my issue was with the second statement. I wrapped the arn in [] as one of the previous comments says and now it seems to work.
resource "aws_elasticsearch_domain_policy" "es_policy" {
depends_on = ["aws_elasticsearch_domain.es"]
domain_name = "${aws_elasticsearch_domain.es.domain_name}"
access_policies = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "es:*",
"Principal": {
"AWS": "*"
},
"Effect": "Allow",
"Condition": {
"IpAddress": {"aws:SourceIp": ["0.0.0.0/0"] }
},
"Resource": "${aws_elasticsearch_domain.es.arn}/*"
},
{
"Action": "es:*",
"Effect": "Allow",
"Principal": {
"AWS": ["${aws_iam_role.lambda_role.arn}"]
},
"Resource": "${aws_elasticsearch_domain.es.arn}/*"
}
]
}
EOF
}
resource "aws_elasticsearch_domain_policy" "es_policy" {
depends_on = ["aws_elasticsearch_domain.es"]
domain_name = "${aws_elasticsearch_domain.es.domain_name}"
access_policies = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "es:*",
"Principal": {
"AWS": "*"
},
"Effect": "Allow",
"Condition": {
"IpAddress": {"aws:SourceIp": ["0.0.0.0/0"] }
},
"Resource": "${aws_elasticsearch_domain.es.arn}/*"
},
{
"Action": "es:*",
"Effect": "Allow",
"Principal": {
"AWS": "${aws_iam_role.lambda_role.arn}"
},
"Resource": "${aws_elasticsearch_domain.es.arn}/*"
}
]
}
EOF
}
Note the [] on the second statement in the AWS section.
Thank you @slajax I was ready to push a policy manually. Your workaround worked for me. I put the SourceIp on a single line and that worked.
Closing this since it appears to now work with the v0.9.6 bits.
@jritsema we are running into this issue with terraform version 0.9.6
resource "aws_elasticsearch_domain" "elastic" {
domain_name = "${var.stack_name}-elastic"
elasticsearch_version = "${var.elastic_version}"
ebs_options{
ebs_enabled = true
volume_size = 10
}
cluster_config {
instance_type = "${module.environment.elastic_instance_type}"
instance_count = "${module.environment.elastic_instance_count}"
}
access_policies = <<CONFIG
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": ["${aws_iam_role.zipkin_ecs_role.arn}"]
},
"Action": "es:*",
"Resource": "arn:aws:es:${var.region}:13884053156:domain/${var.stack_name}-elastic/*"
}
]
}
CONFIG
snapshot_options {
automated_snapshot_start_hour = 23
}
tags {
Name = "${var.stack_name}-elastic"
Environment = "${var.environment}"
Stack = "${var.stack_name}"
}
}
Error message:
Error applying plan:
1 error(s) occurred:
* aws_elasticsearch_domain.elastic: 1 error(s) occurred:
* aws_elasticsearch_domain.elastic: InvalidTypeException: Error setting policy: [{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": ["arn:aws:iam::13884053156:role/hurdygurdy-zipkin-role"]
},
"Action": "es:*",
"Resource": "arn:aws:es:eu-west-1:13884053156:domain/hurdygurdy-elastic/*"
}
]
}
]
status code: 409, request id: 4070cc55-49e2-11e7-a4d9-15d110862029
Terraform does not automatically rollback in the face of errors.
Instead, your Terraform state file has been partially updated with
any resources that successfully completed. Please address the error
above and apply again to incrementally change your infrastructure.
make: *** [apply] Error 1
Note
Running terraform apply a second time resolves the problem. But this workaround is not satisfactory when creating environments from scratch.
I'm having the same issue on v0.10.7
resource "aws_elasticsearch_domain_policy" "esserver" {
domain_name = "${aws_elasticsearch_domain.esserver.domain_name}"
access_policies = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "es:*",
"Principal": {
"AWS": "*"
},
"Effect": "Allow",
"Condition": {
"IpAddress": {"aws:SourceIp": ["0.0.0.0/0"] }
},
"Resource": "${aws_elasticsearch_domain.esserver.arn}/*"
},
{
"Action": "es:*",
"Effect": "Allow",
"Principal": {
"AWS": ["${var.lambda_fcn_arn}"]
},
"Resource": "${aws_elasticsearch_domain.esserver.arn}/*"
}
]
}
EOF
}
When I try to apply that policy, I get the error InvalidTypeException: Error setting policy.
The strange thing is, when I try to create that Policy directly in the AWS IAM console, it complains about the Principals. When I remove those, it validates just fine.
Edit:
Nevermind, classic example of PEBKAC. Just make sure that lambda_fcn_arn is the ARN of the role of your Lambda function, not the Lambda function itself.
Found a work around for this... I ran into this issue with Terraform v0.11.3
resource "aws_iam_user" "es_user" {
name = "${var.elasticsearch_user_name}"
force_destroy = true
provisioner "local-exec" "sleep" {
command = "sleep 20"
}
}
data "aws_iam_policy_document" "elasticsearch_main_policy_document" {
statement {
sid = "2"
effect = "Allow"
actions = [
"es:*"
]
resources = [
"${aws_elasticsearch_domain.es.arn}/*"
]
principals {
identifiers = ["${aws_iam_user.es_user.arn}"]
type = "AWS"
}
}
}
resource "aws_elasticsearch_domain_policy" "elasticsearch_main_policy_document" {
domain_name = "${aws_elasticsearch_domain.es.domain_name}"
access_policies = "${data.aws_iam_policy_document.elasticsearch_main_policy_document.json}"
}
note the local-exec sleep for 20 seconds. This now works everytime
Still see it on
Terraform v0.11.7
data "aws_iam_policy_document" "es_access" {
statement {
sid = "ES"
effect = "Allow"
actions = [
"es:Describe",
"es:List",
"es:ESHttpGet",
"es:ESHttpPost",
"es:ESHttpPut"
],
resources = [ "arn:aws:es:*" ]
}
}
For posterity, this can also happen when a role/group has been deleted. The ARN changes from the familiar colon format to a 21-character permanent ID. If you try to update a policy with the ARN or the ID, it will fail. You need to remove the dead role.
I'm going to lock this issue because it has been closed for _30 days_ โณ. This helps our maintainers find and focus on the active issues.
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.