Commit 64c25f15 authored by michaelguarino's avatar michaelguarino
Browse files

remove template_file provider usage

parent a26c9fd0
Showing with 84 additions and 390 deletions
+84 -390
......@@ -55,98 +55,6 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
}
data "template_file" "userdata" {
count = var.create_eks ? local.worker_group_count : 0
template = lookup(
var.worker_groups[count.index],
"userdata_template_file",
file(
lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
)
)
vars = merge({
platform = lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
var.worker_groups[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
)
}
data "template_file" "launch_template_userdata" {
count = var.create_eks ? local.worker_group_launch_template_count : 0
template = lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_file",
file(
lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
)
)
vars = merge({
platform = lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups_launch_template[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups_launch_template[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
)
}
data "aws_iam_role" "custom_cluster_iam_role" {
count = var.manage_cluster_iam_resources ? 0 : 1
name = var.cluster_iam_role_name
......
# if you have used ASGs before, that role got auto-created already and you need to import to TF state
resource "aws_iam_service_linked_role" "autoscaling" {
aws_service_name = "autoscaling.amazonaws.com"
description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
}
data "aws_caller_identity" "current" {}
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs_decryption" {
# Copy of default KMS policy that lets you manage it
statement {
sid = "Enable IAM User Permissions"
effect = "Allow"
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
actions = [
"kms:*"
]
resources = ["*"]
}
# Required for EKS
statement {
sid = "Allow service-linked role use of the CMK"
effect = "Allow"
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
]
resources = ["*"]
}
statement {
sid = "Allow attachment of persistent resources"
effect = "Allow"
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
actions = [
"kms:CreateGrant"
]
resources = ["*"]
condition {
test = "Bool"
variable = "kms:GrantIsForAWSResource"
values = ["true"]
}
}
}
data "template_file" "launch_template_userdata" {
template = file("${path.module}/templates/userdata.sh.tpl")
vars = {
cluster_name = local.cluster_name
endpoint = module.eks.cluster_endpoint
cluster_auth_base64 = module.eks.cluster_certificate_authority_data
bootstrap_extra_args = ""
kubelet_extra_args = ""
}
}
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
# there are several more options one could set but you probably dont need to modify them
# you can take the default and add your custom AMI and/or custom tags
#
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
# then the default user-data for bootstrapping a cluster is merged in the copy.
resource "aws_launch_template" "default" {
name_prefix = "eks-example-"
description = "Default Launch-Template"
update_default_version = true
block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = 100
volume_type = "gp2"
delete_on_termination = true
# encrypted = true
# Enable this if you want to encrypt your node root volumes with a KMS/CMK. encryption of PVCs is handled via k8s StorageClass tho
# you also need to attach data.aws_iam_policy_document.ebs_decryption.json from the disk_encryption_policy.tf to the KMS/CMK key then !!
# kms_key_id = var.kms_key_arn
}
}
instance_type = var.instance_type
monitoring {
enabled = true
}
network_interfaces {
associate_public_ip_address = false
delete_on_termination = true
security_groups = [module.eks.worker_security_group_id]
}
# if you want to use a custom AMI
# image_id = var.ami_id
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
#
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
# user_data = base64encode(
# data.template_file.launch_template_userdata.rendered,
# )
# Supplying custom tags to EKS instances is another use-case for LaunchTemplates
tag_specifications {
resource_type = "instance"
tags = {
CustomTag = "EKS example"
}
}
# Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
tag_specifications {
resource_type = "volume"
tags = {
CustomTag = "EKS example"
}
}
# Tag the LT itself
tags = {
CustomTag = "EKS example"
}
lifecycle {
create_before_destroy = true
}
}
terraform {
required_version = ">= 0.12.9"
}
provider "aws" {
version = ">= 3.3.0"
region = var.region
}
provider "random" {
version = "~> 2.1"
}
provider "local" {
version = "~>1.4"
}
provider "null" {
version = "~> 2.1"
}
provider "template" {
version = "~> 2.1"
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.11"
}
data "aws_availability_zones" "available" {
}
locals {
cluster_name = "test-eks-lt-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.47.0"
name = "test-vpc"
cidr = "172.16.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared" # EKS adds this and TF would want to remove then later
}
}
module "eks" {
source = "../.."
cluster_name = local.cluster_name
cluster_version = "1.17"
subnets = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
node_groups = {
example = {
desired_capacity = 1
max_capacity = 15
min_capacity = 1
launch_template_id = aws_launch_template.default.id
launch_template_version = aws_launch_template.default.default_version
additional_tags = {
CustomTag = "EKS example"
}
}
}
}
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="//"
--//
Content-Type: text/x-shellscript; charset="us-ascii"
#!/bin/bash
set -e
# Bootstrap and join the cluster
/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
--//--
variable "region" {
default = "eu-central-1"
}
variable "instance_type" {
# Smallest recommended, where ~1.1Gb of 2Gb memory is available for the Kubernetes pods after ‘warming up’ Docker, Kubelet, and OS
default = "t3.small"
type = string
}
variable "kms_key_arn" {
default = ""
description = "KMS key ARN to use if you want to encrypt EKS node root volumes"
type = string
}
......@@ -97,13 +97,6 @@ output "workers_asg_names" {
)
}
output "workers_user_data" {
description = "User data of worker groups"
value = concat(
data.template_file.userdata.*.rendered,
data.template_file.launch_template_userdata.*.rendered,
)
}
output "workers_default_ami_id" {
description = "ID of the default worker group AMI"
......
......@@ -5,7 +5,6 @@ terraform {
aws = ">= 3.22.0"
local = ">= 1.4"
null = ">= 2.1"
template = ">= 2.1"
random = ">= 2.1"
kubernetes = ">= 1.11.1"
}
......
......@@ -195,7 +195,49 @@ resource "aws_launch_configuration" "workers" {
"key_name",
local.workers_group_defaults["key_name"],
)
user_data_base64 = base64encode(data.template_file.userdata.*.rendered[count.index])
user_data_base64 = base64encode(
templatefile(
lookup(
var.worker_groups[count.index],
"userdata_template_file",
lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
),
merge({
platform = lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
var.worker_groups[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
)
)
)
ebs_optimized = lookup(
var.worker_groups[count.index],
"ebs_optimized",
......
......@@ -296,7 +296,47 @@ resource "aws_launch_template" "workers_launch_template" {
local.workers_group_defaults["key_name"],
)
user_data = base64encode(
data.template_file.launch_template_userdata.*.rendered[count.index],
templatefile(
lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_file",
lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) == "windows"
? "${path.module}/templates/userdata_windows.tpl"
: "${path.module}/templates/userdata.sh.tpl"
),
merge({
platform = lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"])
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
pre_userdata = lookup(
var.worker_groups_launch_template[count.index],
"pre_userdata",
local.workers_group_defaults["pre_userdata"],
)
additional_userdata = lookup(
var.worker_groups_launch_template[count.index],
"additional_userdata",
local.workers_group_defaults["additional_userdata"],
)
bootstrap_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"bootstrap_extra_args",
local.workers_group_defaults["bootstrap_extra_args"],
)
kubelet_extra_args = lookup(
var.worker_groups_launch_template[count.index],
"kubelet_extra_args",
local.workers_group_defaults["kubelet_extra_args"],
)
},
lookup(
var.worker_groups_launch_template[count.index],
"userdata_template_extra_args",
local.workers_group_defaults["userdata_template_extra_args"]
)
)
)
)
ebs_optimized = lookup(
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment