Skip to content

Commit

Permalink
Refactor Terraform to have all ports in a list (#2409)
Browse files Browse the repository at this point in the history
* terraform: aws refactoring

* terraform: gcp refactoring

* terraform: azure refactoring
  • Loading branch information
3u13r authored Oct 5, 2023
1 parent f69ae26 commit 1452e64
Show file tree
Hide file tree
Showing 5 changed files with 137 additions and 294 deletions.
193 changes: 67 additions & 126 deletions cli/internal/terraform/terraform/aws/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -17,27 +17,23 @@ provider "aws" {
}

locals {
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
initSecretHash = random_password.initSecret.bcrypt_hash
ports_node_range = "30000-32767"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_recovery = "9999"
ports_debugd = "4000"
ports_join = "30090"
uid = random_id.uid.hex
name = "${var.name}-${local.uid}"
initSecretHash = random_password.initSecret.bcrypt_hash
ports_node_range = "30000-32767"
load_balancer_ports = flatten([
{ name = "kubernetes", port = "6443", health_check = "HTTPS" },
{ name = "bootstrapper", port = "9000", health_check = "TCP" },
{ name = "verify", port = "30081", health_check = "TCP" },
{ name = "konnectivity", port = "8132", health_check = "TCP" },
{ name = "recovery", port = "9999", health_check = "TCP" },
{ name = "join", port = "30090", health_check = "TCP" },
var.debug ? [{ name = "debugd", port = "4000", health_check = "TCP" }] : [],
])
target_group_arns = {
control-plane : flatten([
module.load_balancer_target_bootstrapper.target_group_arn,
module.load_balancer_target_kubernetes.target_group_arn,
module.load_balancer_target_verify.target_group_arn,
module.load_balancer_target_recovery.target_group_arn,
module.load_balancer_target_konnectivity.target_group_arn,
module.load_balancer_target_join.target_group_arn,
var.debug ? [module.load_balancer_target_debugd[0].target_group_arn] : [],
])
control-plane : [
for port in local.load_balancer_ports : module.load_balancer_targets[port.name].target_group_arn
]
worker : []
}
iam_instance_profile = {
Expand Down Expand Up @@ -142,36 +138,15 @@ resource "aws_security_group" "security_group" {
description = "K8s node ports"
}

ingress {
from_port = local.ports_bootstrapper
to_port = local.ports_bootstrapper
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "bootstrapper"
}

ingress {
from_port = local.ports_kubernetes
to_port = local.ports_kubernetes
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "kubernetes"
}

ingress {
from_port = local.ports_konnectivity
to_port = local.ports_konnectivity
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "konnectivity"
}

ingress {
from_port = local.ports_recovery
to_port = local.ports_recovery
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "recovery"
dynamic "ingress" {
for_each = local.load_balancer_ports
content {
description = ingress.value.name
from_port = ingress.value.port
to_port = ingress.value.port
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
}

ingress {
Expand All @@ -182,16 +157,6 @@ resource "aws_security_group" "security_group" {
description = "allow all internal"
}

dynamic "ingress" {
for_each = var.debug ? [1] : []
content {
from_port = local.ports_debugd
to_port = local.ports_debugd
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
description = "debugd"
}
}
}

resource "aws_cloudwatch_log_group" "log_group" {
Expand All @@ -200,76 +165,16 @@ resource "aws_cloudwatch_log_group" "log_group" {
tags = local.tags
}

module "load_balancer_target_bootstrapper" {
source = "./modules/load_balancer_target"
name = "${local.name}-bootstrapper"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_bootstrapper
tags = local.tags
healthcheck_protocol = "TCP"
}

module "load_balancer_target_kubernetes" {
module "load_balancer_targets" {
for_each = { for port in local.load_balancer_ports : port.name => port }
source = "./modules/load_balancer_target"
name = "${local.name}-kubernetes"
name = "${local.name}-${each.value.name}"
port = each.value.port
healthcheck_protocol = each.value.health_check
healthcheck_path = each.value.name == "kubernetes" ? "/readyz" : ""
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_kubernetes
tags = local.tags
healthcheck_protocol = "HTTPS"
healthcheck_path = "/readyz"
}

module "load_balancer_target_verify" {
source = "./modules/load_balancer_target"
name = "${local.name}-verify"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_verify
tags = local.tags
healthcheck_protocol = "TCP"
}

module "load_balancer_target_recovery" {
source = "./modules/load_balancer_target"
name = "${local.name}-recovery"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_recovery
tags = local.tags
healthcheck_protocol = "TCP"
}

module "load_balancer_target_debugd" {
count = var.debug ? 1 : 0 // only deploy debugd in debug mode
source = "./modules/load_balancer_target"
name = "${local.name}-debugd"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_debugd
tags = local.tags
healthcheck_protocol = "TCP"
}

module "load_balancer_target_konnectivity" {
source = "./modules/load_balancer_target"
name = "${local.name}-konnectivity"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_konnectivity
tags = local.tags
healthcheck_protocol = "TCP"
}

module "load_balancer_target_join" {
source = "./modules/load_balancer_target"
name = "${local.name}-join"
vpc_id = aws_vpc.vpc.id
lb_arn = aws_lb.front_end.arn
port = local.ports_join
tags = local.tags
healthcheck_protocol = "TCP"
}

module "instance_group" {
Expand Down Expand Up @@ -300,3 +205,39 @@ module "instance_group" {
{ "kubernetes.io/cluster/${local.name}" = "owned" }
)
}

# TODO(31u3r): Remove once 2.12 is released
moved {
from = module.load_balancer_target_konnectivity
to = module.load_balancer_targets["konnectivity"]
}

moved {
from = module.load_balancer_target_verify
to = module.load_balancer_targets["verify"]
}

moved {
from = module.load_balancer_target_recovery
to = module.load_balancer_targets["recovery"]
}

moved {
from = module.load_balancer_target_join
to = module.load_balancer_targets["join"]
}

moved {
from = module.load_balancer_target_debugd[0]
to = module.load_balancer_targets["debugd"]
}

moved {
from = module.load_balancer_target_kubernetes
to = module.load_balancer_targets["kubernetes"]
}

moved {
from = module.load_balancer_target_bootstrapper
to = module.load_balancer_targets["bootstrapper"]
}
97 changes: 22 additions & 75 deletions cli/internal/terraform/terraform/azure/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,17 @@ locals {
constellation-uid = local.uid,
}
ports_node_range = "30000-32767"
ports_kubernetes = "6443"
ports_bootstrapper = "9000"
ports_konnectivity = "8132"
ports_verify = "30081"
ports_recovery = "9999"
ports_join = "30090"
ports_debugd = "4000"
cidr_vpc_subnet_nodes = "192.168.178.0/24"
cidr_vpc_subnet_pods = "10.10.0.0/16"
ports = flatten([
{ name = "kubernetes", port = "6443", health_check_protocol = "Https", path = "/readyz", priority = 100 },
{ name = "bootstrapper", port = "9000", health_check_protocol = "Tcp", path = null, priority = 101 },
{ name = "verify", port = "30081", health_check_protocol = "Tcp", path = null, priority = 102 },
{ name = "konnectivity", port = "8132", health_check_protocol = "Tcp", path = null, priority = 103 },
{ name = "recovery", port = "9999", health_check_protocol = "Tcp", path = null, priority = 104 },
{ name = "join", port = "30090", health_check_protocol = "Tcp", path = null, priority = 105 },
var.debug ? [{ name = "debugd", port = "4000", health_check_protocol = "Tcp", path = null, priority = 106 }] : [],
])
// wildcard_lb_dns_name is the DNS name of the load balancer with a wildcard for the name.
// example: given "name-1234567890.location.cloudapp.azure.com" it will return "*.location.cloudapp.azure.com"
wildcard_lb_dns_name = replace(data.azurerm_public_ip.loadbalancer_ip.fqdn, "/^[^.]*\\./", "*.")
Expand Down Expand Up @@ -150,60 +152,19 @@ resource "azurerm_lb" "loadbalancer" {
module "loadbalancer_backend_control_plane" {
source = "./modules/load_balancer_backend"

name = "${local.name}-control-plane"
loadbalancer_id = azurerm_lb.loadbalancer.id
ports = flatten([
{
name = "bootstrapper",
port = local.ports_bootstrapper,
protocol = "Tcp",
path = null
},
{
name = "kubernetes",
port = local.ports_kubernetes,
protocol = "Https",
path = "/readyz"
},
{
name = "konnectivity",
port = local.ports_konnectivity,
protocol = "Tcp",
path = null
},
{
name = "verify",
port = local.ports_verify,
protocol = "Tcp",
path = null
},
{
name = "recovery",
port = local.ports_recovery,
protocol = "Tcp",
path = null
},
{
name = "join",
port = local.ports_join,
protocol = "Tcp",
path = null
},
var.debug ? [{
name = "debugd",
port = local.ports_debugd,
protocol = "Tcp",
path = null
}] : [],
])
name = "${local.name}-control-plane"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = local.ports
}

module "loadbalancer_backend_worker" {
source = "./modules/load_balancer_backend"

name = "${local.name}-worker"
loadbalancer_id = azurerm_lb.loadbalancer.id
ports = []
name = "${local.name}-worker"
loadbalancer_id = azurerm_lb.loadbalancer.id
frontend_ip_configuration_name = azurerm_lb.loadbalancer.frontend_ip_configuration[0].name
ports = []
}

resource "azurerm_lb_backend_address_pool" "all" {
Expand Down Expand Up @@ -233,23 +194,18 @@ resource "azurerm_network_security_group" "security_group" {
tags = local.tags

dynamic "security_rule" {
for_each = flatten([
{ name = "noderange", priority = 100, dest_port_range = local.ports_node_range },
{ name = "kubernetes", priority = 101, dest_port_range = local.ports_kubernetes },
{ name = "bootstrapper", priority = 102, dest_port_range = local.ports_bootstrapper },
{ name = "konnectivity", priority = 103, dest_port_range = local.ports_konnectivity },
{ name = "join", priority = 104, dest_port_range = local.ports_recovery },
{ name = "recovery", priority = 105, dest_port_range = local.ports_join },
var.debug ? [{ name = "debugd", priority = 106, dest_port_range = local.ports_debugd }] : [],
])
for_each = concat(
local.ports,
[{ name = "nodeports", port = local.ports_node_range, priority = 200 }]
)
content {
name = security_rule.value.name
priority = security_rule.value.priority
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = security_rule.value.dest_port_range
destination_port_range = security_rule.value.port
source_address_prefix = "*"
destination_address_prefix = "*"
}
Expand Down Expand Up @@ -298,12 +254,3 @@ data "azurerm_user_assigned_identity" "uaid" {
resource_group_name = local.uai_resource_group
}

moved {
from = module.scale_set_control_plane
to = module.scale_set_group["control_plane_default"]
}

moved {
from = module.scale_set_worker
to = module.scale_set_group["worker_default"]
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ resource "azurerm_lb_probe" "health_probes" {
loadbalancer_id = var.loadbalancer_id
name = each.value.name
port = each.value.port
protocol = each.value.protocol
protocol = each.value.health_check_protocol
request_path = each.value.path
interval_in_seconds = 5
}
Expand All @@ -31,7 +31,7 @@ resource "azurerm_lb_rule" "rules" {
protocol = "Tcp"
frontend_port = each.value.port
backend_port = each.value.port
frontend_ip_configuration_name = "PublicIPAddress"
frontend_ip_configuration_name = var.frontend_ip_configuration_name
backend_address_pool_ids = [azurerm_lb_backend_address_pool.backend_pool.id]
probe_id = each.value.id
disable_outbound_snat = true
Expand Down
Loading

0 comments on commit 1452e64

Please sign in to comment.