I'm receiving this curious error message
PlatformTaskDefinitionIncompatibilityException: The specified platform does not satisfy the task definition’s required capabilities
I suspect it's something to do with this line although not quite sure
file_system_id = aws_efs_file_system.main.id
This is my script:
provider "aws" {
region = "us-east-1"
profile = var.profile
}
### Network
# Fetch AZs in the current region
data "aws_availability_zones" "available" {}
resource "aws_vpc" "main" {
cidr_block = "172.17.0.0/16"
}
# Create var.az_count private subnets, each in a different AZ
resource "aws_subnet" "private" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
}
# Create var.az_count public subnets, each in a different AZ
resource "aws_subnet" "public" {
count = "${var.az_count}"
cidr_block = "${cidrsubnet(aws_vpc.main.cidr_block, 8, var.az_count + count.index)}"
availability_zone = "${data.aws_availability_zones.available.names[count.index]}"
vpc_id = "${aws_vpc.main.id}"
map_public_ip_on_launch = true
}
# IGW for the public subnet
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}
# Route the public subnet traffic through the IGW
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.main.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.gw.id}"
}
# Create a NAT gateway with an EIP for each private subnet to get internet connectivity
resource "aws_eip" "gw" {
count = "${var.az_count}"
vpc = true
depends_on = ["aws_internet_gateway.gw"]
}
resource "aws_nat_gateway" "gw" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.public.*.id, count.index)}"
allocation_id = "${element(aws_eip.gw.*.id, count.index)}"
}
# Create a new route table for the private subnets
# And make it route non-local traffic through the NAT gateway to the internet
resource "aws_route_table" "private" {
count = "${var.az_count}"
vpc_id = "${aws_vpc.main.id}"
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = "${element(aws_nat_gateway.gw.*.id, count.index)}"
}
}
# Explicitely associate the newly created route tables to the private subnets (so they don't default to the main route table)
resource "aws_route_table_association" "private" {
count = "${var.az_count}"
subnet_id = "${element(aws_subnet.private.*.id, count.index)}"
route_table_id = "${element(aws_route_table.private.*.id, count.index)}"
}
### Security
# ALB Security group
# This is the group you need to edit if you want to restrict access to your application
resource "aws_security_group" "lb" {
name = "tf-ecs-alb"
description = "controls access to the ALB"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = 80
to_port = 80
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
# Traffic to the ECS Cluster should only come from the ALB
resource "aws_security_group" "ecs_tasks" {
name = "tf-ecs-tasks"
description = "allow inbound access from the ALB only"
vpc_id = "${aws_vpc.main.id}"
ingress {
protocol = "tcp"
from_port = "${var.app_port}"
to_port = "${var.app_port}"
security_groups = ["${aws_security_group.lb.id}"]
}
egress {
protocol = "-1"
from_port = 0
to_port = 0
cidr_blocks = ["0.0.0.0/0"]
}
}
### ALB
resource "aws_alb" "main" {
name = "tf-ecs-chat"
subnets = aws_subnet.public.*.id
security_groups = ["${aws_security_group.lb.id}"]
}
resource "aws_alb_target_group" "app" {
name = "tf-ecs-chat"
port = 80
protocol = "HTTP"
vpc_id = "${aws_vpc.main.id}"
target_type = "ip"
}
# Redirect all traffic from the ALB to the target group
resource "aws_alb_listener" "front_end" {
load_balancer_arn = "${aws_alb.main.id}"
port = "80"
protocol = "HTTP"
default_action {
target_group_arn = "${aws_alb_target_group.app.id}"
type = "forward"
}
}
### ECS
resource "aws_ecs_cluster" "main" {
name = "tf-ecs-cluster"
}
resource "aws_ecs_task_definition" "app" {
family = "app"
network_mode = "awsvpc"
requires_compatibilities = ["FARGATE"]
cpu = "${var.fargate_cpu}"
memory = "${var.fargate_memory}"
task_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
execution_role_arn = "${aws_iam_role.ecs_task_role_role.arn}"
container_definitions = <<DEFINITION
[
{
"cpu": ${var.fargate_cpu},
"image": "${var.app_image}",
"memory": ${var.fargate_memory},
"name": "app",
"networkMode": "awsvpc",
"portMappings": [
{
"containerPort": ${var.app_port},
"hostPort": ${var.app_port}
}
]
}
]
DEFINITION
volume {
name = "efs-html"
efs_volume_configuration {
file_system_id = aws_efs_file_system.main.id
root_directory = "/opt/data"
}
}
}
resource "aws_ecs_service" "main" {
name = "tf-ecs-service"
cluster = "${aws_ecs_cluster.main.id}"
task_definition = "${aws_ecs_task_definition.app.arn}"
desired_count = "${var.app_count}"
launch_type = "FARGATE"
network_configuration {
security_groups = ["${aws_security_group.ecs_tasks.id}"]
subnets = aws_subnet.private.*.id
}
load_balancer {
target_group_arn = "${aws_alb_target_group.app.id}"
container_name = "app"
container_port = "${var.app_port}"
}
depends_on = [
"aws_alb_listener.front_end",
]
}
# ECS roles & policies
# Create the IAM task role for ECS Task definition
resource "aws_iam_role" "ecs_task_role_role" {
name = "test-ecs-task-role"
assume_role_policy = "${file("ecs-task-role.json")}"
tags = {
Terraform = "true"
}
}
# Create the AmazonECSTaskExecutionRolePolicy managed role
resource "aws_iam_policy" "ecs_task_role_policy" {
name = "test-ecs-AmazonECSTaskExecutionRolePolicy"
description = "Provides access to other AWS service resources that are required to run Amazon ECS tasks"
policy = "${file("ecs-task-policy.json")}"
}
# Assign the AmazonECSTaskExecutionRolePolicy managed role to ECS
resource "aws_iam_role_policy_attachment" "ecs_task_policy_attachment" {
role = "${aws_iam_role.ecs_task_role_role.name}"
policy_arn = "${aws_iam_policy.ecs_task_role_policy.arn}"
}
resource "aws_efs_file_system" "main" {
tags = {
Name = "ECS-EFS-FS"
}
}
resource "aws_efs_mount_target" "main" {
count = "${var.subnets-count}"
file_system_id = "${aws_efs_file_system.main.id}"
subnet_id = "${element(var.subnets, count.index)}"
}
variables.tf
variable "az_count" {
description = "Number of AZs to cover in a given AWS region"
default = "2"
}
variable "app_image" {
description = "Docker image to run in the ECS cluster"
default = "xxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/test1:nginx"
}
variable "app_port" {
description = "Port exposed by the docker image to redirect traffic to"
# default = 3000
default = 80
}
variable "app_count" {
description = "Number of docker containers to run"
default = 2
}
variable "fargate_cpu" {
description = "Fargate instance CPU units to provision (1 vCPU = 1024 CPU units)"
default = "256"
}
variable "fargate_memory" {
description = "Fargate instance memory to provision (in MiB)"
default = "512"
}
################
variable "subnets" {
type = "list"
description = "list of subnets to mount the fs to"
default = ["subnet-xxxxxxx","subnet-xxxxxxx"]
}
variable "subnets-count" {
type = "string"
description = "number of subnets to mount to"
default = 2
}
Amazon ECS and Amazon Fargate support for Amazon EFS File Systems now generally available. Amazon Elastic Container Service (ECS) tasks running on both Amazon Elastic Compute Cloud (EC2) and Amazon Fargate can now mount Amazon Elastic Filesystem (EFS) filesystems.
Terraform AWS ECS Fargate. Terraform module to create AWS ECS FARGATE services. Module supports both FARGATE and FARGATE-SPOT capacity provider settings.
Open your preferred code editor, and create a file called main.tf inside the ~/terraform-amazon-efs-demo directory. Copy/paste the following configuration to the main.tf file, and save the changes. This main.tf file is the Terraform configuration for the AWS EFS.
You can use Amazon EFS file systems with Amazon ECS to access file system data across your fleet of Amazon ECS tasks.
To mount an Amazon EFS file system on a Fargate task or container, you must create a task definition, and then make that task definition available to the containers in your task across all Availability Zones in your AWS Region.
As we have seen, thanks to AWS Fargate together with the power provided by Terraform, an application inside a container can be lifted, managed, and launched very quickly and easy. This is just one example of one of the many benefits provided by AWS.
An Amazon EFS mount appears unresponsive. For example, commands like ls hang. This error can occur if another application is writing large amounts of data to the file system. Access to the files that are being written might be blocked until the operation is complete.
In order to give access to the Terraform AWS Provider, we need to define our AWS region and credentials. Note: AWS creates a default VPC (Virtual Private Cloud) and a set of default subnets for each AWS account which we will be using, therefore this post will not be covering the creation of new VPCs, subnets, etc.
You simply require to upgrade your ecs service to latest version
resource "aws_ecs_service" "service" {
platform_version = "1.4.0"
launch_type = "FARGATE"
...
}
efs feature is only available on the latest version
When you don’t specify platform_version, it will default to LATEST which is set to 1.3.0 which doesn’t allow efs volumes.
UPDATE: As of 1/21/22, it seems that the LATEST ECS service version is 1.4.0, so explicitly specifying the ECS platform version is no longer necessary to have EFS mounts work. Per:
https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform-linux-fargate.html
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With