EFS implementation for docker plugin
This commit is contained in:
parent
0a5838f409
commit
c67df523d0
12 changed files with 298 additions and 14 deletions
cloud/aws
docker
8
cloud/aws/efs_file_system/debug.tf
Normal file
8
cloud/aws/efs_file_system/debug.tf
Normal file
|
@ -0,0 +1,8 @@
|
|||
resource "local_file" "debug" {
|
||||
filename = "${path.root}/.debug/aws/s3_efs/efs.${var.volume_name}.json"
|
||||
content = jsonencode({
|
||||
volume_name = var.volume_name,
|
||||
tags = local.tags,
|
||||
})
|
||||
file_permission = "0600"
|
||||
}
|
48
cloud/aws/efs_file_system/efs.tf
Normal file
48
cloud/aws/efs_file_system/efs.tf
Normal file
|
@ -0,0 +1,48 @@
|
|||
variable "ia_lifecycle_policy" {
|
||||
default = "AFTER_30_DAYS"
|
||||
description = "The lifecycle policy for transitioning to IA storage"
|
||||
type = string
|
||||
validation {
|
||||
error_message = "Must be one of AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, AFTER_365_DAYS."
|
||||
condition = can(regex("AFTER_(1|7|14|30|60|90|180|270|365)_DAY[S]?", var.ia_lifecycle_policy))
|
||||
}
|
||||
}
|
||||
variable "archive_lifecycle_policy" {
|
||||
default = "AFTER_60_DAYS"
|
||||
description = "The lifecycle policy for transitioning to IA storage"
|
||||
type = string
|
||||
validation {
|
||||
error_message = "Must be one of AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, AFTER_365_DAYS."
|
||||
condition = can(regex("AFTER_(1|7|14|30|60|90|180|270|365)_DAY[S]?", var.archive_lifecycle_policy))
|
||||
}
|
||||
}
|
||||
variable "create_fs" {
|
||||
default = true
|
||||
type = bool
|
||||
description = "Create the EFS file system, or let something else do it?"
|
||||
}
|
||||
resource "aws_efs_file_system" "volume" {
|
||||
count = var.create_fs ? 1 : 0
|
||||
creation_token = var.volume_name
|
||||
lifecycle_policy {
|
||||
transition_to_ia = var.ia_lifecycle_policy
|
||||
transition_to_archive = var.archive_lifecycle_policy
|
||||
transition_to_primary_storage_class = "AFTER_1_ACCESS"
|
||||
}
|
||||
tags = merge(local.tags, {
|
||||
Name = var.volume_name
|
||||
})
|
||||
encrypted = true
|
||||
throughput_mode = "elastic"
|
||||
}
|
||||
|
||||
resource "aws_efs_access_point" "access_point" {
|
||||
count = var.create_fs ? 1 : 0
|
||||
file_system_id = aws_efs_file_system.volume[0].id
|
||||
root_directory {
|
||||
path = "/"
|
||||
}
|
||||
tags = merge(local.tags, {
|
||||
Name = "${var.volume_name}-access-point"
|
||||
})
|
||||
}
|
41
cloud/aws/efs_file_system/iam.tf
Normal file
41
cloud/aws/efs_file_system/iam.tf
Normal file
|
@ -0,0 +1,41 @@
|
|||
resource "aws_iam_user" "db_storage" {
|
||||
for_each = toset(var.users)
|
||||
name = each.value
|
||||
tags = var.tags
|
||||
}
|
||||
data "aws_iam_policy_document" "db_storage" {
|
||||
for_each = toset(var.users)
|
||||
statement {
|
||||
actions = [
|
||||
"elasticfilesystem:*",
|
||||
"elasticfilesystem:CreateFileSystem",
|
||||
"elasticfilesystem:CreateMountTarget",
|
||||
"ec2:DescribeSubnets",
|
||||
"ec2:DescribeNetworkInterfaces",
|
||||
"ec2:CreateNetworkInterface",
|
||||
"elasticfilesystem:CreateTags",
|
||||
"elasticfilesystem:DeleteFileSystem",
|
||||
"elasticfilesystem:DeleteMountTarget",
|
||||
"ec2:DeleteNetworkInterface",
|
||||
"elasticfilesystem:DescribeFileSystems",
|
||||
"elasticfilesystem:DescribeMountTargets"
|
||||
]
|
||||
resources = [
|
||||
"arn:aws:elasticfilesystem:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:file-system/*",
|
||||
]
|
||||
effect = "Allow"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
data "aws_caller_identity" "current" {}
|
||||
resource "aws_iam_user_policy" "db_storage" {
|
||||
for_each = toset(var.users)
|
||||
name = "efs_policy_${each.value}_to_${var.volume_name}"
|
||||
user = aws_iam_user.db_storage[each.key].name
|
||||
policy = data.aws_iam_policy_document.db_storage[each.key].json
|
||||
}
|
||||
resource "aws_iam_access_key" "db_storage" {
|
||||
for_each = toset(var.users)
|
||||
user = aws_iam_user.db_storage[each.key].name
|
||||
}
|
29
cloud/aws/efs_file_system/inputs.tf
Normal file
29
cloud/aws/efs_file_system/inputs.tf
Normal file
|
@ -0,0 +1,29 @@
|
|||
variable "volume_name" {
|
||||
type = string
|
||||
description = "The prefix for the efs file system name"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "AWS Resource Tags to apply to this bucket"
|
||||
}
|
||||
locals {
|
||||
tags = merge({
|
||||
|
||||
}, var.tags)
|
||||
}
|
||||
variable "users" {
|
||||
type = list(string)
|
||||
default = []
|
||||
description = "List of users to generate EFS API keys for. Will be used as the IAM name."
|
||||
validation {
|
||||
condition = length(var.users) > 0
|
||||
error_message = "At least one user must be specified!"
|
||||
}
|
||||
}
|
||||
variable "security_group_ids" {
|
||||
type = list(string)
|
||||
description = "The security group ids to apply to the task"
|
||||
default = []
|
||||
}
|
17
cloud/aws/efs_file_system/outputs.tf
Normal file
17
cloud/aws/efs_file_system/outputs.tf
Normal file
|
@ -0,0 +1,17 @@
|
|||
output "users" {
|
||||
value = {
|
||||
for user in var.users : user => { name = user, access_key = aws_iam_access_key.db_storage[user].id, secret_key = aws_iam_access_key.db_storage[user].secret }
|
||||
}
|
||||
}
|
||||
output "volume" {
|
||||
value = try(aws_efs_file_system.volume[0], null)
|
||||
}
|
||||
output "arn" {
|
||||
value = try(aws_efs_file_system.volume[0].arn, null)
|
||||
}
|
||||
output "availability_zone" {
|
||||
value = try(aws_efs_file_system.volume[0].availability_zone_name, null)
|
||||
}
|
||||
output "access_point" {
|
||||
value = aws_efs_access_point.access_point
|
||||
}
|
18
cloud/aws/efs_file_system/terraform.tf
Normal file
18
cloud/aws/efs_file_system/terraform.tf
Normal file
|
@ -0,0 +1,18 @@
|
|||
terraform {
|
||||
required_version = "~> 1.6"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.6.2"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = "~>2.1"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,12 +3,3 @@ resource "aws_s3_bucket" "bucket" {
|
|||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "local_file" "debug" {
|
||||
filename = "${path.root}/.debug/aws/s3_bucket/bucket.${aws_s3_bucket.bucket.bucket}.json"
|
||||
content = jsonencode({
|
||||
bucket_prefix = var.bucket_name_prefix,
|
||||
tags = local.tags,
|
||||
endpoint = aws_s3_bucket.bucket.bucket_domain_name
|
||||
})
|
||||
file_permission = "0600"
|
||||
}
|
9
cloud/aws/s3_bucket/debug.tf
Normal file
9
cloud/aws/s3_bucket/debug.tf
Normal file
|
@ -0,0 +1,9 @@
|
|||
resource "local_file" "debug" {
|
||||
filename = "${path.root}/.debug/aws/s3_bucket/bucket.${aws_s3_bucket.bucket.bucket}.json"
|
||||
content = jsonencode({
|
||||
bucket_prefix = var.bucket_name_prefix,
|
||||
tags = local.tags,
|
||||
endpoint = aws_s3_bucket.bucket.bucket_domain_name
|
||||
})
|
||||
file_permission = "0600"
|
||||
}
|
64
docker/efs-volume/efs-volume.tf
Normal file
64
docker/efs-volume/efs-volume.tf
Normal file
|
@ -0,0 +1,64 @@
|
|||
locals {
|
||||
# Sanitise the volume name - strip non-alphanumeric characters and replace spaces and underscores with hyphens
|
||||
volume_name = replace(replace(replace(lower(var.volume_name), "[^a-z0-9]", ""), "[ _]", "-"), "--", "-")
|
||||
alias = "efs-${local.volume_name}"
|
||||
iam_user = "${var.stack_name}-efs-${local.volume_name}"
|
||||
ebs_volume_name = var.bucket_name == null ? local.volume_name : var.bucket_name
|
||||
access_key = nonsensitive(module.efs_file_system.users[local.iam_user].access_key)
|
||||
secret_key = nonsensitive(module.efs_file_system.users[local.iam_user].secret_key)
|
||||
}
|
||||
resource "docker_plugin" "efs" {
|
||||
depends_on = [module.efs_file_system]
|
||||
name = var.image_efs_plugin
|
||||
alias = local.alias
|
||||
enabled = true
|
||||
grant_permissions {
|
||||
name = "network"
|
||||
value = ["host"]
|
||||
}
|
||||
grant_permissions {
|
||||
name = "mount"
|
||||
value = ["/dev"]
|
||||
}
|
||||
grant_permissions {
|
||||
name = "allow-all-devices"
|
||||
value = ["true"]
|
||||
}
|
||||
grant_permissions {
|
||||
name = "capabilities"
|
||||
value = ["CAP_SYS_ADMIN"]
|
||||
}
|
||||
env = [
|
||||
"REXRAY_LOGLEVEL=warn",
|
||||
"EFS_ACCESSKEY=${local.access_key}",
|
||||
"EFS_SECRETKEY=${local.secret_key}",
|
||||
"EFS_REGION=${data.aws_region.current.name}",
|
||||
"EFS_SECURITYGROUPS=\"${join(" ", var.security_group_ids)}\"",
|
||||
]
|
||||
lifecycle {
|
||||
create_before_destroy = false
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
module "efs_file_system" {
|
||||
source = "../../cloud/aws/efs_file_system"
|
||||
volume_name = var.volume_name
|
||||
users = [local.iam_user]
|
||||
tags = merge(var.tags, { Name = var.volume_name }, coalesce(var.application.application_tag, {}))
|
||||
ia_lifecycle_policy = var.ia_lifecycle_policy
|
||||
security_group_ids = var.security_group_ids
|
||||
create_fs = false
|
||||
}
|
||||
module "volume" {
|
||||
depends_on = [docker_plugin.efs, ]
|
||||
source = "../../docker/volume"
|
||||
stack_name = var.stack_name
|
||||
volume_name = local.volume_name
|
||||
volume_name_explicit = true
|
||||
driver = local.alias
|
||||
}
|
||||
output "volume" {
|
||||
value = module.volume.volume
|
||||
}
|
49
docker/efs-volume/inputs.tf
Normal file
49
docker/efs-volume/inputs.tf
Normal file
|
@ -0,0 +1,49 @@
|
|||
variable "stack_name" {
|
||||
description = "The name of the collective stack"
|
||||
type = string
|
||||
}
|
||||
variable "volume_name" {
|
||||
description = "The name of the volume"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "bucket_name" {
|
||||
description = "Override the generated name of the S3 bucket to create"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
description = "AWS Resource Tags to apply to this bucket"
|
||||
}
|
||||
variable "image_efs_plugin" {
|
||||
type = string
|
||||
description = "The docker image to use for the service."
|
||||
default = "rexray/efs:0.11.4"
|
||||
}
|
||||
|
||||
variable "application" {
|
||||
description = "The AWS myApplication to be associated with this cluster"
|
||||
type = object({
|
||||
arn = string
|
||||
name = string
|
||||
description = string
|
||||
application_tag = map(string)
|
||||
})
|
||||
default = null
|
||||
}
|
||||
variable "ia_lifecycle_policy" {
|
||||
default = "AFTER_30_DAYS"
|
||||
description = "The lifecycle policy for transitioning to IA storage"
|
||||
type = string
|
||||
validation {
|
||||
error_message = "Must be one of AFTER_1_DAY, AFTER_7_DAYS, AFTER_14_DAYS, AFTER_30_DAYS, AFTER_60_DAYS, AFTER_90_DAYS, AFTER_180_DAYS, AFTER_270_DAYS, AFTER_365_DAYS."
|
||||
condition = can(regex("AFTER_(1|7|14|30|60|90|180|270|365)_DAY[S]?", var.ia_lifecycle_policy))
|
||||
}
|
||||
}
|
||||
|
||||
variable "security_group_ids" {
|
||||
type = list(string)
|
||||
description = "The security group ids to apply to the task"
|
||||
}
|
15
docker/efs-volume/terraform.tf
Normal file
15
docker/efs-volume/terraform.tf
Normal file
|
@ -0,0 +1,15 @@
|
|||
terraform {
|
||||
required_version = "~> 1.6"
|
||||
required_providers {
|
||||
docker = {
|
||||
source = "kreuzwerker/docker"
|
||||
version = "~> 3.0"
|
||||
}
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -12,11 +12,6 @@ variable "bucket_name" {
|
|||
type = string
|
||||
default = null
|
||||
}
|
||||
variable "subdir" {
|
||||
default = ""
|
||||
description = "The subdirectory to mount in the S3 bucket"
|
||||
type = string
|
||||
}
|
||||
variable "tags" {
|
||||
type = map(string)
|
||||
default = {}
|
||||
|
|
Loading…
Reference in a new issue