Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Terraform wants to delete a bucket, and complains it is not empty. I don't want terraform to delete the bucket. How do I tell it not to?

Tags:

terraform

I have terraform that is producing the following log on apply:

Terraform will perform the following actions:

  # aws_lambda_permission.allow_bucket must be replaced
-/+ resource "aws_lambda_permission" "allow_bucket" {
        action        = "lambda:InvokeFunction"
        function_name = "arn:aws:lambda:us-east-1:<secret>:function:ProggyS3ObjectCreatedHandler"
      ~ id            = "AllowExecutionFromS3Bucket" -> (known after apply)
        principal     = "s3.amazonaws.com"
      ~ source_arn    = "arn:aws:s3:::bucket-example-us-east-1" -> (known after apply) # forces replacement
        statement_id  = "AllowExecutionFromS3Bucket"
    }

  # aws_s3_bucket.bucket must be replaced
-/+ resource "aws_s3_bucket" "bucket" {
      + acceleration_status         = (known after apply)
        acl                         = "private"
      ~ arn                         = "arn:aws:s3:::bucket-example-us-east-1" -> (known after apply)
      ~ bucket                      = "bucket-example-us-east-1" -> "sftp-assembly-us-east-1" # forces replacement
      ~ bucket_domain_name          = "bucket-example-us-east-1.s3.amazonaws.com" -> (known after apply)
      ~ bucket_regional_domain_name = "bucket-example-us-east-1.s3.amazonaws.com" -> (known after apply)
        force_destroy               = false
      ~ hosted_zone_id              = "FOOBAR" -> (known after apply)
      ~ id                          = "bucket-example-us-east-1" -> (known after apply)
      ~ region                      = "us-east-1" -> (known after apply)
      ~ request_payer               = "BucketOwner" -> (known after apply)
      ~ tags                        = {
          ~ "Name" = "bucket-example-us-east-1" -> "sftp-assembly-us-east-1"
        }
      + website_domain              = (known after apply)
      + website_endpoint            = (known after apply)

        versioning {
            enabled    = false
            mfa_delete = false
        }
    }

  # aws_s3_bucket_notification.bucket_notification must be replaced
-/+ resource "aws_s3_bucket_notification" "bucket_notification" {
      ~ bucket = "bucket-example-us-east-1" -> (known after apply) # forces replacement
      ~ id     = "bucket-example-us-east-1" -> (known after apply)

      ~ lambda_function {
            events              = [
                "s3:ObjectCreated:*",
            ]
          ~ id                  = "tf-s3-lambda-FOOBAR" -> (known after apply)
            lambda_function_arn = "arn:aws:lambda:us-east-1:<secret>:function:ProggyS3ObjectCreatedHandler"
        }
    }

  # module.large-file-breaker-lambda-primary.aws_lambda_function.lambda will be updated in-place
  ~ resource "aws_lambda_function" "lambda" {
        arn                            = "arn:aws:lambda:us-east-1:<secret>:function:ProggyLargeFileBreaker"
        function_name                  = "ProggyLargeFileBreaker"
        handler                        = "break-large-files"
        id                             = "ProggyLargeFileBreaker"
        invoke_arn                     = "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:<secret>:function:ProggyLargeFileBreaker/invocations"
        last_modified                  = "2020-03-13T20:17:33.376+0000"
        layers                         = []
        memory_size                    = 3008
        publish                        = false
        qualified_arn                  = "arn:aws:lambda:us-east-1:<secret>:function:ProggyLargeFileBreaker:$LATEST"
        reserved_concurrent_executions = -1
        role                           = "arn:aws:iam::<secret>:role/ProggyLargeFileBreaker-role20200310215329691700000001"
        runtime                        = "go1.x"
        s3_bucket                      = "repo-us-east-1"
        s3_key                         = "Proggy-large-file-breaker/1.0.10/break-large-files-1.0.10.zip"
        source_code_hash               = "TbwleLcqD+xL2zOYk6ZdiBWAAznCIiTS/6nzrWqYZhE="
        source_code_size               = 7294687
      ~ tags                           = {
            "Name"          = "ProggyLargeFileBreaker"
          ~ "TerraformRepo" = "https://git.com/wwexdevelopment/aws-terraform-projects/commits/tag/v2.0.11" -> "https://git.com/wwexdevelopment/aws-terraform-projects/commits/tag/v2.0.16"
            "Version"       = "1.0.10"
            "account"       = "main"
            "environment"   = "assembly"
            "source"        = "terraform"
            "type"          = "ops"
        }
        timeout                        = 360
        version                        = "$LATEST"

        environment {
            variables = {
                "ARCHIVE_BUCKET"       = "Proggy-archive-assembly-us-east-1"
                "S3_OBJECT_SIZE_LIMIT" = "15000000"
            }
        }

        tracing_config {
            mode = "PassThrough"
        }
    }

  # module.notifier-lambda-primary.aws_lambda_function.lambda will be updated in-place
  ~ resource "aws_lambda_function" "lambda" {
        arn                            = "arn:aws:lambda:us-east-1:<secret>:function:ProggyS3ObjectCreatedHandler"
        function_name                  = "ProggyS3ObjectCreatedHandler"
        handler                        = "s3-Proggy-notifier"
        id                             = "ProggyS3ObjectCreatedHandler"
        invoke_arn                     = "arn:aws:apigateway:us-east-1:lambda:path/2015-03-31/functions/arn:aws:lambda:us-east-1:<secret>:function:ProggyS3ObjectCreatedHandler/invocations"
        last_modified                  = "2020-03-11T20:52:33.256+0000"
        layers                         = []
        memory_size                    = 128
        publish                        = false
        qualified_arn                  = "arn:aws:lambda:us-east-1:<secret>:function:ProggyS3ObjectCreatedHandler:$LATEST"
        reserved_concurrent_executions = -1
        role                           = "arn:aws:iam::<secret>:role/ProggyS3ObjectCreatedHandler-role20200310215329780600000001"
        runtime                        = "go1.x"
        s3_bucket                      = "repo-us-east-1"
        s3_key                         = "s3-Proggy-notifier/1.0.55/s3-Proggy-notifier-1.0.55.zip"
        source_code_hash               = "4N+B1GpaUY/wB4S7tR1eWRnNuHnBExcEzmO+mqhQ5B4="
        source_code_size               = 6787828
      ~ tags                           = {
            "Name"          = "ProggyS3ObjectCreatedHandler"
          ~ "TerraformRepo" = "https://git.com/wwexdevelopment/aws-terraform-projects/commits/tag/v2.0.11" -> "https://git.com/wwexdevelopment/aws-terraform-projects/commits/tag/v2.0.16"
            "Version"       = "1.0.55"
            "account"       = "main"
            "environment"   = "assembly"
            "source"        = "terraform"
            "type"          = "ops"
        }
        timeout                        = 360
        version                        = "$LATEST"

        environment {
            variables = {
                "FILE_BREAKER_LAMBDA_FUNCTION_NAME" = "ProggyLargeFileBreaker"
                "Proggy_SQS_QUEUE_NAME"              = "Proggy_Proggy_edi-notification.fifo"
                "Proggy_SQS_QUEUE_URL"               = "https://sqs.us-east-1.amazonaws.com/<secret>/Proggy_Proggy_edi-notification.fifo"
                "S3_OBJECT_SIZE_LIMIT"              = "15000000"
            }
        }

        tracing_config {
            mode = "PassThrough"
        }
    }

Plan: 3 to add, 2 to change, 3 to destroy.

Do you want to perform these actions?
  Terraform will perform the actions described above.
  Only 'yes' will be accepted to approve.

  Enter a value: yes

module.large-file-breaker-lambda-primary.aws_lambda_function.lambda: Modifying... [id=ProggyLargeFileBreaker]
module.notifier-lambda-primary.aws_lambda_function.lambda: Modifying... [id=ProggyS3ObjectCreatedHandler]
aws_lambda_permission.allow_bucket: Destroying... [id=AllowExecutionFromS3Bucket]
aws_s3_bucket_notification.bucket_notification: Destroying... [id=bucket-example-us-east-1]
module.large-file-breaker-lambda-primary.aws_lambda_function.lambda: Modifications complete after 0s [id=ProggyLargeFileBreaker]
aws_s3_bucket_notification.bucket_notification: Destruction complete after 0s
aws_lambda_permission.allow_bucket: Destruction complete after 0s
aws_s3_bucket.bucket: Destroying... [id=bucket-example-us-east-1]
module.notifier-lambda-primary.aws_lambda_function.lambda: Modifications complete after 0s [id=ProggyS3ObjectCreatedHandler]

Error: error deleting S3 Bucket (bucket-example-us-east-1): BucketNotEmpty: The bucket you tried to delete is not empty. You must delete all versions in the bucket.
    status code: 409, request id: 0916517C5F1DF875, host id: +l5yzHjw7EMmdT4xdCmgg0Zx5W7zxpEil/dUWeJmnL8IvfPw2uKgvJ2Ee7utlRI0rkohdY+pjYI=

It wants to delete that bucket because I formerly told it to create it (I think). But I don't want terraform to delete that bucket. The bucket it's trying to delete is in use by other teams.

How can I tell terraform to do the apply, but not delete that bucket?

like image 647
Jason Michael Avatar asked Nov 07 '22 09:11

Jason Michael


1 Answers

Put prevent_destroy in the S3 bucket resource to make sure it will not be deleted by any chance.

  lifecycle {
    prevent_destroy = true
  }

Remove all resource definitions from .tf files other than the S3 bucket. Run terraform plan to see if Terraform will destroy other than the S3 bucket.

If it shows expected, then run terraform apply.

Then recreate resources you need besides the S3 bucket, but those resource being placed in different place, so that the S3 bucket will be left out.

Or, use terraform rm to remove other resources from the state file other than the S3 bucket. Then run terraform import to import them into new tf files in other location.

like image 125
mon Avatar answered Nov 15 '22 07:11

mon