Terraforming CraftCMS for AWS (Docker + S3 backend for Assets)

shehan marino
sysops
Published in
6 min readJun 11, 2020

So you want to deploy your site which has been created to run on Craft CMS 3 and can’t find proper documentation on running it on AWS ? well I was in the same boat and here is the way I implemented it to make your life easier.

Disclaimer : This blog is written with the assumption of the reader already has experience using terraform.

The goal here is to fully automate the infrastructure to run CraftCMS on a docker container on your container platform which in this case is AWS ECS.

The Deployment WorkFlow

Keeping the workflow simple

The workflow can be automated in any of your choice of CI/CD tool.

The Architecture

Resource list

  • Docker container on ECS with ALB
  • Redis cache cluster
  • RDS (Aurora since Database is MySQL)
  • S3 Bucket for Assets
  • CloudFront distribution to serve assets to website
  • IAM (user) for S3 integration with CMS

Docker Image

The following docker image is made up of PHP with nginx fronting it as a server.

FROM wyveo/nginx-php-fpm:php74

LABEL maintainer colin@wyveo.com
ENV php_cli_conf /etc/php/7.4/cli/php.ini

# Remove existing webroot, configure PHP session handler for Redis, install mysqll-client
RUN rm -rf /usr/share/nginx/* && \
sed -i -e "s/memory_limit\s*=\s*.*/memory_limit = 1024M/g" ${php_conf} && \
sed -i -e "s/max_execution_time\s*=\s*.*/max_execution_time = 300/g" ${php_conf} && \
sed -i -e "s/session.save_handler\s*=\s*.*/session.save_handler = redis/g" ${php_conf} && \
sed -i -e "s/upload_max_filesize\s*=\s*.*/upload_max_filesize = 64M/g" ${php_conf} && \
sed -i -e "s/post_max_size\s*=\s*.*/post_max_size = 64M/g" ${php_conf} && \
sed -i -e "s/;max_input_vars\s*=\s*.*/max_input_vars = 10000/g" ${php_conf} && \
sed -i -e "s/;session.save_path\s*=\s*.*/session.save_path = \"\${REDIS_PORT_6379_TCP}\"/g" ${php_conf} && \
sed -i -e "s/memory_limit\s*=\s*.*/memory_limit = 1024M/g" ${php_cli_conf} && \
sed -i -e "s/max_execution_time\s*=\s*.*/max_execution_time = 300/g" ${php_cli_conf} && \
sed -i -e "s/session.save_handler\s*=\s*.*/session.save_handler = redis/g" ${php_cli_conf} && \
sed -i -e "s/upload_max_filesize\s*=\s*.*/upload_max_filesize = 64M/g" ${php_cli_conf} && \
sed -i -e "s/post_max_size\s*=\s*.*/post_max_size = 64M/g" ${php_cli_conf} && \
sed -i -e "s/;max_input_vars\s*=\s*.*/max_input_vars = 10000/g" ${php_cli_conf} && \
sed -i -e "s/;session.save_path\s*=\s*.*/session.save_path = \"\${REDIS_PORT_6379_TCP}\"/g" ${php_cli_conf} && \
apt-get update && \
apt-get install -y default-mysql-client

# Create Craft project
RUN composer create-project craftcms/craft /usr/share/nginx/



WORKDIR /usr/share/nginx/

# Add default config
COPY ./config /usr/share/nginx/config
COPY ./modules /usr/share/nginx/modules
COPY ./plugins-custom /usr/share/nginx/plugins-custom
COPY ./templates /usr/share/nginx/templates
COPY ./web/ /usr/share/nginx/web
COPY ./composer.json /usr/share/nginx/composer.json
COPY ./composer.lock /usr/share/nginx/composer.lock
COPY ./php.ini /usr/share/nginx/php.ini

# Install noted to CraftCMS
RUN composer install

# Install the yii2-redis library
RUN composer require --prefer-dist yiisoft/yii2-redis -d /usr/share/nginx/
RUN composer require --prefer-dist codemix/yii2-streamlog -d /usr/share/nginx/

#Install the Amazon S3 plugin
RUN composer require craftcms/aws-s3

# Add default craft cms nginx config
ADD ./default.conf /etc/nginx/conf.d/default.conf

# Add database environment
COPY .env.example /usr/share/nginx/.env
COPY ./web/assets /usr/share/nginx/web/assets

RUN chown -Rf nginx:nginx /usr/share/nginx/

EXPOSE 80

Terraforming the infrastructure.

We make use of a terraform modules but to make things easier for you the reader I will be using plain terraform with resource blocks that would make sense to you to make use.

cloudfront.tf

resource "aws_cloudfront_origin_access_identity" "assets_origin" {
comment = "assets-s3"
}

resource "aws_cloudfront_distribution" "s3_distribution" {
origin {
domain_name = "${aws_s3_bucket.assets.bucket_regional_domain_name}"
origin_id = "${aws_s3_bucket.assets.id}"

s3_origin_config {
origin_access_identity = aws_cloudfront_origin_access_identity.assets_origin.cloudfront_access_identity_path
}
}

enabled = true
comment = "Some comment"
default_root_object = ""



default_cache_behavior {
allowed_methods = [
"DELETE",
"GET",
"HEAD",
"OPTIONS",
"PATCH",
"POST",
"PUT"]
cached_methods = [
"GET",
"HEAD"]
target_origin_id = "${aws_s3_bucket.assets.id}"

min_ttl = 0
default_ttl = 86400
max_ttl = 31536000
compress = true
viewer_protocol_policy = "redirect-to-https"

forwarded_values {
query_string = false

cookies {
forward = "none"
}
}

}


price_class = "PriceClass_200"

restrictions {
geo_restriction {
restriction_type = "none"
}
}

tags = {
environment = "${var.environment}"
application = "${var.app_name}"
}

viewer_certificate {
cloudfront_default_certificate = true
}

}

s3.tf

resource "aws_s3_bucket" "assets" {
bucket = local.bucket_name

tags = local.tags

logging {
target_bucket = data.aws_s3_bucket.access_log.id
target_prefix = "${local.bucket_name}/"
}

versioning {
enabled = true
}


data "aws_iam_policy_document" "s3_policy" {
statement {
actions = ["s3:*"]
resources = ["${aws_s3_bucket.assets.arn}/*"]

principals {
type = "AWS"
identifiers = ["${aws_cloudfront_origin_access_identity.assets_origin.iam_arn}"]
}
}

statement {
actions = ["s3:*"]
resources = ["${aws_s3_bucket.assets.arn}"]

principals {
type = "AWS"
identifiers = ["${aws_cloudfront_origin_access_identity.assets_origin.iam_arn}"]
}
}

statement {
sid = "3"
actions = ["s3:GetObject"]
resources = ["${aws_s3_bucket.assets.arn}/*"]

principals {
identifiers = ["${aws_cloudfront_origin_access_identity.assets_origin.iam_arn}"]
type = "AWS"
}
}
}

resource "aws_s3_bucket_policy" "assets" {
bucket = "${aws_s3_bucket.assets.id}"
policy = "${data.aws_iam_policy_document.s3_policy.json}"
}

resource "aws_s3_bucket_public_access_block" "host_bucket" {
depends_on = [aws_s3_bucket_policy.assets]
bucket = aws_s3_bucket.assets.id

block_public_acls = true
block_public_policy = true
ignore_public_acls = true
restrict_public_buckets = true
}

cache.tf

resource "aws_elasticache_cluster" "redis" {
cluster_id = "${local.prefix}-${var.environment}"
engine = "redis"
node_type = "cache.t2.small"
num_cache_nodes = 1
parameter_group_name = "default.redis5.0"
engine_version = "5.0.5"
port = 6379
maintenance_window = "sun:14:00-sun:15:00"
apply_immediately = true
snapshot_window = "15:00-16:00"
snapshot_retention_limit = 2
preferred_availability_zones = [data.aws_availability_zones.current.names[0]]
subnet_group_name = "${local.vpc_name}-cache"
security_group_ids = [aws_security_group.cache.id]
tags = local.tags
}

ecs.tf

resource "aws_ecs_task_definition" "website" {
depends_on = [aws_cloudwatch_log_group.website]
family = "${local.prefix}-${var.environment}"

container_definitions = <<EOF
[
{
"name": "${local.prefix}",
"image": "notedltd/website:${var.environment}",
"essential": true,
"environment": [
{"name" : "AWS_DEFAULT_REGION", "value": "${var.region}"},
{"name" : "ECS_AWSVPC_BLOCK_IMDS", "value": "true"},
{"name" : "S3_BUCKET", "value": "${local.bucket_name}"},
{"name" : "S3_KEY_ID", "value": "${local.s3_key}"},
{"name" : "S3_SECRET", "value": "${local.s3_secret}"},
{"name" : "S3_REGION", "value": "${var.region}"},
{"name" : "CLOUDFRONT_URL", "value": "https://assets.${aws_cloudfront_distribution.s3_distribution.domain}"},
{"name" : "CLOUDFRONT_DISTRIBUTION_ID", "value": "${aws_cloudfront_distribution.s3_distribution.id}"},
{"name" : "CLOUDFRONT_PATH_PREFIX", "value": ""},
{"name" : "DB_USER", "value": "${jsondecode(aws_secretsmanager_secret_version.db.secret_string)["username"]}"},
{"name" : "DB_PASSWORD", "value": "${jsondecode(aws_secretsmanager_secret_version.db.secret_string)["password"]}"},
{"name" : "DB_SERVER", "value": "${aws_rds_cluster.db.endpoint}"},
{"name" : "DB_DATABASE", "value": "${aws_rds_cluster.db.database_name}"},
{"name" : "DB_DRIVER", "value": "mysql"},
{"name" : "DB_PORT", "value": "${aws_rds_cluster.db.port}"},
{"name" : "REDIS_HOST", "value": "${aws_elasticache_cluster.redis.cache_nodes.0.address}"},
{"name" : "REDIS_PORT_6379_TCP", "value": "tcp://${aws_elasticache_cluster.redis.cache_nodes.0.address}:${aws_elasticache_cluster.redis.cache_nodes.0.port}"},
{"name" : "ENVIRONMENT", "value": "${var.environment}"},
{"name" : "SECURITY_KEY", "value": "${uuidv5("url", "https://www.${local.root_domain}")}"},
{"name" : "DEFAULT_SITE_URL", "value": "https://www.${local.root_domain}"}
],
"logConfiguration": {
"logDriver": "awslogs",
"options": {
"awslogs-group": "${aws_cloudwatch_log_group.website.name}",
"awslogs-region": "${var.region}",
"awslogs-stream-prefix": "instance"
}
},
"portMappings": [
{
"containerPort": ${local.port},
"hostPort": ${local.port},
"protocol": "tcp"
}
],
"repositoryCredentials": {
"credentialsParameter": "${data.aws_secretsmanager_secret.docker.arn}"
}
}
]
EOF


requires_compatibilities = ["FARGATE"]
network_mode = "awsvpc"
cpu = var.website["cpu"]
memory = var.website["memory"]
execution_role_arn = data.aws_iam_role.instance.arn


task_role_arn = data.aws_iam_role.instance.arn

lifecycle {
ignore_changes = [id]
}
}



# CPU value Memory value (MiB)
# 256 (.25 vCPU) 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
# 512 (.5 vCPU) 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
# 1024 (1 vCPU) 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
# 2048 (2 vCPU) Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
# 4096 (4 vCPU) Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)

resource "aws_ecs_service" "website" {
name = local.prefix
task_definition = "${aws_ecs_task_definition.website.family}:${aws_ecs_task_definition.website.revision}"
desired_count = var.website["count"]
launch_type = "FARGATE"
cluster = data.aws_ecs_cluster.env.id
health_check_grace_period_seconds = var.website["grace_period"]

network_configuration {
security_groups = [aws_security_group.website-instance.id]

subnets = data.aws_subnet_ids.env_private.ids
}

load_balancer {
target_group_arn = aws_alb_target_group.website.arn
container_name = local.prefix
container_port = local.port
}

depends_on = [
aws_alb.website,
aws_alb_target_group.website,
]
}

#autoscaling
resource "aws_appautoscaling_target" "website" {
service_namespace = "ecs"
resource_id = "service/${data.aws_ecs_cluster.env.cluster_name}/${aws_ecs_service.website.name}"
scalable_dimension = "ecs:service:DesiredCount"

role_arn = "arn:aws:iam::${data.aws_caller_identity.self.account_id}:role/aws-service-role/ecs.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_ECSService"
min_capacity = var.website["count"]
max_capacity = var.website["max_count"]
}

resource "aws_appautoscaling_policy" "up" {
name = "${local.prefix}-${var.environment}_scale_up"
service_namespace = "ecs"
resource_id = "service/${data.aws_ecs_cluster.env.cluster_name}/${aws_ecs_service.website.name}"
scalable_dimension = "ecs:service:DesiredCount"

step_scaling_policy_configuration {
adjustment_type = "ChangeInCapacity"
cooldown = var.website["grace_period"] * 2
metric_aggregation_type = "Maximum"

step_adjustment {
metric_interval_lower_bound = 0
scaling_adjustment = 1
}
}

depends_on = [aws_appautoscaling_target.website]
}

resource "aws_appautoscaling_policy" "down" {
name = "${local.prefix}-${var.environment}_scale_down"
service_namespace = "ecs"
resource_id = "service/${data.aws_ecs_cluster.env.cluster_name}/${aws_ecs_service.website.name}"
scalable_dimension = "ecs:service:DesiredCount"

step_scaling_policy_configuration {
adjustment_type = "ChangeInCapacity"
cooldown = var.website["grace_period"] * 2
metric_aggregation_type = "Maximum"

step_adjustment {
metric_interval_lower_bound = 0
scaling_adjustment = -1
}
}

depends_on = [aws_appautoscaling_target.website]
}

resource "aws_cloudwatch_metric_alarm" "website_cpu_high" {
alarm_name = "${local.prefix}-${var.environment}_website_cpu_utilization_high"
comparison_operator = "GreaterThanOrEqualToThreshold"
evaluation_periods = "10"
metric_name = "CPUUtilization"
namespace = "AWS/ECS"
period = var.website["grace_period"]
statistic = "Maximum"
threshold = "85"

dimensions = {
ClusterName = data.aws_ecs_cluster.env.cluster_name
ServiceName = aws_ecs_service.website.name
}

alarm_actions = [aws_appautoscaling_policy.up.arn]
ok_actions = [aws_appautoscaling_policy.down.arn]
}

NOTE: You will have to edit or add your own locals and the obvious variables according to your terraform code structure.

Configuring S3 to work with CraftCMS.

Please follow https://nystudio107.com/blog/using-aws-s3-buckets-cloudfront-distribution-with-craft-cms blog to configure it. The ECS task puts in the variables into the service so you might only need to upload your assets to it.

#Terraform #CraftCMS #AWS #ECS #FARGATE

--

--

shehan marino
sysops
Editor for

Improving Developer Experience, Infrastructure as Code, Linux , Cloud Computing, Open-source DBA & DevOps enabler