From 9aef1b35f92c238a5c746db5ed8cded3b1d16e67 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 19:58:04 +0200 Subject: [PATCH 01/51] feat: add eks cluster gha --- .github/actions/eks-manage-cluster/action.yml | 209 ++++++++++++++++++ .../workflows/test-gha-eks-manage-cluster.yml | 123 +++++++++++ 2 files changed, 332 insertions(+) create mode 100644 .github/actions/eks-manage-cluster/action.yml create mode 100644 .github/workflows/test-gha-eks-manage-cluster.yml diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml new file mode 100644 index 00000000..d0f9ffef --- /dev/null +++ b/.github/actions/eks-manage-cluster/action.yml @@ -0,0 +1,209 @@ +name: Deploy or Destroy EKS Cluster + +description: | + This GitHub Action automates the deployment or destruction of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. + This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. + +inputs: + action: + description: 'Specify the action to perform: "create" or "destroy" the EKS cluster' + required: true + default: 'create' + aws-region: + description: 'AWS region where the EKS cluster will be deployed' + required: true + cluster-name: + description: 'Name of the EKS cluster to deploy' + required: true + kubernetes-version: + description: 'Version of Kubernetes to use for the EKS cluster' + required: true + # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver + default: "1.30" + node-instance-types: + description: 'Instance types for the EKS node group' + required: true + # TODO: add spot + default: '["t2.micro"]' + node-desired-count: + description: 'Desired number of nodes in the EKS node group' + required: true + default: "4" + node-min-count: + description: 'Minimum number of nodes in the EKS node group' + required: true + default: "1" + node-max-count: + description: 'Maximum number of nodes in the EKS node group' + required: true + default: "10" + s3-backend-bucket: + description: 'Name of the S3 bucket to store Terraform state' + required: true + s3-bucket-region: + description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + tf-modules-revision: + description: 'Git revision of the tf modules to use' + default: 'main' + required: true + tf-modules-path: + description: 'Path where the tf EKS modules will be cloned' + default: './.action-tf-modules/eks/' + required: true + login: + description: 'Authenticate the current kube context on the created cluster' + default: "true" + required: true + + # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml + tf-cli-config-credentials-hostname: + description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' + default: 'app.terraform.io' + required: false + tf-cli-config-credentials-token: + description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' + required: false + tf-terraform-version: + description: 'The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for example `<1.13.0`) to install the latest version satisfying the constraint. A value of `latest` will install the latest version of Terraform CLI. Defaults to `latest`.' + default: 'latest' + required: false + tf-terraform-wrapper: + description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' + default: 'true' + required: false + awscli-version: + description: 'Version of the aws cli to use' + required: true + # renovate: datasource=github-releases depName=aws/aws-cli + default: "2.15.52" + +outputs: + eks-cluster-endpoint: + description: 'The API endpoint of the deployed EKS cluster' + value: ${{ steps.cluster_info.outputs.cluster_endpoint }} + + eks-cluster-id: + description: 'The ID of the deployed EKS cluster' + value: ${{ steps.apply.outputs.cluster_id }} + + terraform-state-url: + description: 'URL of the Terraform state file in the S3 bucket' + value: ${{ steps.set-terraform-variables.outputs.terraform-state-url }} + + # Add all terraform outputs dynamically + all-terraform-outputs: + description: 'All outputs from Terraform' + value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} + +runs: + using: 'composite' + steps: + - name: Install Terraform + uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3 + with: + cli_config_credentials_hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + cli_config_credentials_token: ${{ inputs.tf-cli-config-credentials-token }} + terraform_version: ${{ inputs.tf-terraform-version }} + terraform_wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Install AWS CLI + shell: bash + run: | + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install + rm -f awscliv2.zip + + - name: Install kubectl + shell: bash + run: | + curl -LO "https://dl.k8s.io/release/v${{ inputs.kubernetes-version }}/bin/linux/amd64/kubectl" + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + kubectl version --client + + - name: Set Terraform variables + shell: bash + id: set-terraform-variables + run: | + export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" + export TFSTATE_KEY="terraform/${{ inputs.cluster-name }}/gha/eks-cluster/terraform.tfstate" + + if [ -z "${{ inputs.s3-bucket-region }}" ]; then + export TFSTATE_REGION="${{ inputs.aws-region }}" + else + export TFSTATE_REGION="${{ inputs.s3-bucket-region }}" + fi + + echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT" + + terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}" + echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT" + + - name: Check if S3 bucket exists + id: create-s3-bucket + shell: bash + run: | + if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then + echo "Bucket already exists" + else + echo "Bucket does not exist, creating..." + aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} + fi + + aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" + + - name: Checkout Repository EKS modules + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + repository: "camunda/camunda-tf-eks-module" + ref: ${{ inputs.tf-modules-revision }} + path: ${{ inputs.tf-modules-path }} + fetch-depth: 0 + + - name: Terraform Init + shell: bash + id: init + working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" + run: | + terraform version + terraform init -backend-config="bucket=${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}" + terraform validate -no-color + + - name: Terraform Plan + shell: bash + id: plan + working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" + run: | + terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" -var "region=${{ inputs.aws-region }}" -var "kubernetes_version=${{ inputs.kubernetes-version }}" -var "node_instance_types=${{ inputs.node-instance-types }}" -var "node_desired_count=${{ inputs.node-desired-count }}" -var "node_min_count=${{ inputs.node-min-count }}" -var "node_max_count=${{ inputs.node-max-count }}" + + - name: Terraform Apply or Destroy + shell: bash + id: apply-or-destroy + working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" + run: | + if [ "${{ inputs.action }}" == "create" ]; then + terraform apply -no-color eks.plan + export cluster_endpoint="$(terraform output -raw cluster_endpoint)" + echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" + elif [ "${{ inputs.action }}" == "destroy" ]; then + terraform destroy -no-color -auto-approve + else + echo "Invalid action. Please specify 'create' or 'destroy'." + exit 1 + fi + + - name: Configure kubectl + shell: bash + id: kube_config + if: inputs.login == 'true' && inputs.action == 'create' + run: | + aws eks --region ${{ inputs.aws-region }} update-kubeconfig --name ${{ inputs.cluster-name }} + + - name: Output Kube Config + shell: bash + if: inputs.login == 'true' && inputs.action == 'create' + run: | + kubectl config view diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml new file mode 100644 index 00000000..42f23e92 --- /dev/null +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -0,0 +1,123 @@ +--- +name: EKS Cluster creation and destruction test + +on: + schedule: + - cron: '0 1 * * 1' # At 01:00 on Monday. + + workflow_dispatch: + inputs: + cluster_name: + description: "Cluster name." + required: false + type: string + delete_cluster: + description: "Whether to delete the cluster." + required: false + type: boolean + default: true + + pull_request: + # the paths should be synced with ../labeler.yml + paths: + - modules/fixtures/**/*.tf + - modules/fixtures/**/*.tfvars + - modules/**.tf + - .tool-versions + - .github/workflows/test-gha-eks-manage-cluster.yml + - .github/actions/eks-manage-cluster/*.yml + - justfile + +# limit to a single execution per actor of this workflow +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + +env: + + AWS_PROFILE: "infex" + AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config + + TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" + TF_STATE_BUCKET_REGION: "eu-central-1" + +jobs: + action-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + + - name: Get Cluster Info + id: commit_info + run: | + if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then + cluster_name="${{ github.event.inputs.cluster_name }}" + else + cluster_name="cl-$(git rev-parse --short HEAD)" + fi + echo "cluster_name=$cluster_name" >> "$GITHUB_OUTPUT" + + # Get the current commit hash for the modules revision + tf_modules_revision=$(git rev-parse HEAD) + + echo "cluster_name=$cluster_name" >> "$GITHUB_OUTPUT" + echo "tf_modules_revision=$tf_modules_revision" >> "$GITHUB_OUTPUT" + + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + + - name: Create Cluster + timeout-minutes: 125 + uses: ./.github/actions/eks-manage-cluster + id: create_cluster + with: + action: "create" + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + + - name: Delete Cluster + timeout-minutes: 125 + if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') + uses: ./.github/actions/eks-manage-cluster + with: + action: "delete" + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} From afd850863b4e7bbe015d898b3766190fa31df513 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:04:17 +0200 Subject: [PATCH 02/51] don't install if already installed --- .github/actions/eks-manage-cluster/action.yml | 26 +++++++++++++------ 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index d0f9ffef..45ce4b3b 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -109,18 +109,28 @@ runs: - name: Install AWS CLI shell: bash run: | - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" - unzip -qq awscliv2.zip - sudo ./aws/install - rm -f awscliv2.zip + if ! command -v aws &> /dev/null; then + echo "AWS CLI not found, installing..." + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install + rm -f awscliv2.zip + else + echo "Warning: AWS CLI is already installed." + fi - name: Install kubectl shell: bash run: | - curl -LO "https://dl.k8s.io/release/v${{ inputs.kubernetes-version }}/bin/linux/amd64/kubectl" - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - kubectl version --client + if ! command -v kubectl &> /dev/null; then + echo "kubectl not found, installing..." + curl -LO "https://dl.k8s.io/release/v${{ inputs.kubernetes-version }}/bin/linux/amd64/kubectl" + chmod +x ./kubectl + sudo mv ./kubectl /usr/local/bin/kubectl + kubectl version --client + else + echo "Warning: kubectl is already installed." + fi - name: Set Terraform variables shell: bash From 5cee55c175e599f0b88ac3d795f21d41d8ee6c18 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:11:00 +0200 Subject: [PATCH 03/51] add some params --- .github/actions/eks-manage-cluster/action.yml | 31 +++++++++++++++++-- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 45ce4b3b..51e29c31 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -23,8 +23,23 @@ inputs: node-instance-types: description: 'Instance types for the EKS node group' required: true - # TODO: add spot - default: '["t2.micro"]' + default: '["t2.medium"]' + cluster-service-ipv4-cidr: + description: 'CIDR block for cluster service IPs' + required: true + default: '10.190.0.0/16' + cluster-node-ipv4-cidr: + description: 'CIDR block for cluster node IPs' + required: true + default: '10.192.0.0/16' + np-instance-types: + description: 'List of instance types for non-production environments' + required: true + default: '["t2.medium"]' + np-capacity-type: + description: 'Capacity type for non-production instances (e.g., SPOT)' + required: true + default: 'SPOT' node-desired-count: description: 'Desired number of nodes in the EKS node group' required: true @@ -187,7 +202,17 @@ runs: id: plan working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" run: | - terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" -var "region=${{ inputs.aws-region }}" -var "kubernetes_version=${{ inputs.kubernetes-version }}" -var "node_instance_types=${{ inputs.node-instance-types }}" -var "node_desired_count=${{ inputs.node-desired-count }}" -var "node_min_count=${{ inputs.node-min-count }}" -var "node_max_count=${{ inputs.node-max-count }}" + terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ + -var "region=${{ inputs.aws-region }}" \ + -var "kubernetes_version=${{ inputs.kubernetes-version }}" \ + -var "node_instance_types=${{ inputs.node-instance-types }}" \ + -var "node_desired_count=${{ inputs.node-desired-count }}" \ + -var "node_min_count=${{ inputs.node-min-count }}" \ + -var "node_max_count=${{ inputs.node-max-count }}" \ + -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ + -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ + -var "np_instance_types=${{ inputs.np-instance-types }}" \ + -var "np_capacity_type=${{ inputs.np-capacity-type }}" - name: Terraform Apply or Destroy shell: bash From 794cfe8e98ebf838adce82167213a38454a451b8 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:16:18 +0200 Subject: [PATCH 04/51] copy missing backend --- .github/actions/eks-manage-cluster/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 51e29c31..4fae9706 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -193,6 +193,7 @@ runs: id: init working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" run: | + cp ../fixtures/backend.tf ./ terraform version terraform init -backend-config="bucket=${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}" terraform validate -no-color From df35114c977243cb8f8c580e7fc9dfaae7cb5983 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:22:48 +0200 Subject: [PATCH 05/51] add missing parameters --- .github/actions/eks-manage-cluster/action.yml | 30 +++++++++++-------- modules/eks-cluster/variables.tf | 1 + 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 4fae9706..ee52d811 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -20,10 +20,6 @@ inputs: required: true # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver default: "1.30" - node-instance-types: - description: 'Instance types for the EKS node group' - required: true - default: '["t2.medium"]' cluster-service-ipv4-cidr: description: 'CIDR block for cluster service IPs' required: true @@ -33,22 +29,30 @@ inputs: required: true default: '10.192.0.0/16' np-instance-types: - description: 'List of instance types for non-production environments' + description: 'List of instance types' required: true default: '["t2.medium"]' np-capacity-type: description: 'Capacity type for non-production instances (e.g., SPOT)' required: true default: 'SPOT' - node-desired-count: + np-node-desired-count: description: 'Desired number of nodes in the EKS node group' required: true default: "4" - node-min-count: + np-node-min-count: description: 'Minimum number of nodes in the EKS node group' required: true default: "1" - node-max-count: + np-disk-size: + description: 'Disk size of the nodes on the default node pool' + required: true + default: "20" + np-ami-type: + description: 'Amazon Machine Image' + required: true + default: "AL2_x86_64" + np-node-max-count: description: 'Maximum number of nodes in the EKS node group' required: true default: "10" @@ -206,10 +210,12 @@ runs: terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ -var "region=${{ inputs.aws-region }}" \ -var "kubernetes_version=${{ inputs.kubernetes-version }}" \ - -var "node_instance_types=${{ inputs.node-instance-types }}" \ - -var "node_desired_count=${{ inputs.node-desired-count }}" \ - -var "node_min_count=${{ inputs.node-min-count }}" \ - -var "node_max_count=${{ inputs.node-max-count }}" \ + -var "name=${{ inputs.cluster-name }}" \ + -var "np_desired_node_count=${{ inputs.np-node-desired-count }}" \ + -var "np_min_node_count=${{ inputs.np-node-min-count }}" \ + -var "np_max_node_count=${{ inputs.np-node-max-count }}" \ + -var "np_disk_size=${{ inputs.np-node-disk-size }}" \ + -var "np_ami_type=${{ inputs.np-node-ami-type }}" \ -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ -var "np_instance_types=${{ inputs.np-instance-types }}" \ diff --git a/modules/eks-cluster/variables.tf b/modules/eks-cluster/variables.tf index 90193e29..d6ca5d04 100644 --- a/modules/eks-cluster/variables.tf +++ b/modules/eks-cluster/variables.tf @@ -1,4 +1,5 @@ # ! Developer: if you are adding a variable without a default value, please ensure to reference it in the cleanup script (.github/actions/eks-cleanup-resources/scripts/destroy.sh) +# and also in the manage gha variable "region" { type = string From 1bcf4e36f5d29fa0def59fe9fc61e523438e1119 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:26:01 +0200 Subject: [PATCH 06/51] fix param --- .github/actions/eks-manage-cluster/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index ee52d811..3548d73a 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -31,7 +31,7 @@ inputs: np-instance-types: description: 'List of instance types' required: true - default: '["t2.medium"]' + default: "['t2.medium']" np-capacity-type: description: 'Capacity type for non-production instances (e.g., SPOT)' required: true From 65ae75a2f2af61126cd94afa395bc4aec0d810fb Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:29:54 +0200 Subject: [PATCH 07/51] fix param --- .github/actions/eks-manage-cluster/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 3548d73a..ded421a3 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -31,7 +31,7 @@ inputs: np-instance-types: description: 'List of instance types' required: true - default: "['t2.medium']" + default: '["t2.medium"]' np-capacity-type: description: 'Capacity type for non-production instances (e.g., SPOT)' required: true @@ -218,7 +218,7 @@ runs: -var "np_ami_type=${{ inputs.np-node-ami-type }}" \ -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ - -var "np_instance_types=${{ inputs.np-instance-types }}" \ + -var 'np_instance_types=${{ inputs.np-instance-types }}' \ -var "np_capacity_type=${{ inputs.np-capacity-type }}" - name: Terraform Apply or Destroy From a99a8fbabb75016b76a11c6ceb68d720263431a2 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 09:44:55 +0200 Subject: [PATCH 08/51] add tmate debug --- .github/workflows/test-gha-eks-manage-cluster.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml index 42f23e92..284f18f4 100644 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -50,6 +50,8 @@ jobs: with: ref: ${{ github.head_ref }} fetch-depth: 0 + - name: Setup tmate session + uses: mxschmitt/action-tmate@v3 - name: Install tooling using asdf uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 From 8e35b9ef753202c87a414835e2817117a623a6c5 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 09:50:45 +0200 Subject: [PATCH 09/51] tmp disable tests --- .github/workflows/tests.yml | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 60d6910e..13249a23 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,16 +5,16 @@ on: schedule: - cron: '0 1 * * 2' workflow_dispatch: - pull_request: - # the paths should be synced with ../labeler.yml - paths: - - test/**.go - - test/**/go.mod - - modules/fixtures/** - - modules/**.tf - - .tool-versions - - .github/workflows/tests.yml - - justfile + # pull_request: + # # the paths should be synced with ../labeler.yml + # paths: + # - test/**.go + # - test/**/go.mod + # - modules/fixtures/** + # - modules/**.tf + # - .tool-versions + # - .github/workflows/tests.yml + # - justfile # limit to a single execution per ref of this workflow concurrency: From 9e35ebabfa719d9de7457f1db5f53949724f0479 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 10:49:05 +0200 Subject: [PATCH 10/51] detach --- .github/workflows/test-gha-eks-manage-cluster.yml | 3 +++ .github/workflows/tests.yml | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml index 284f18f4..954ad3c1 100644 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -50,8 +50,11 @@ jobs: with: ref: ${{ github.head_ref }} fetch-depth: 0 + - name: Setup tmate session uses: mxschmitt/action-tmate@v3 + with: + detached: true - name: Install tooling using asdf uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 13249a23..c649820b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,7 +5,7 @@ on: schedule: - cron: '0 1 * * 2' workflow_dispatch: - # pull_request: + # pull_request: TODO: revert # # the paths should be synced with ../labeler.yml # paths: # - test/**.go From 56838a71518c19d4adf215b8fe974c244b44c907 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 10:53:01 +0200 Subject: [PATCH 11/51] fix --- .github/actions/eks-manage-cluster/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index ded421a3..7a60caac 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -214,8 +214,8 @@ runs: -var "np_desired_node_count=${{ inputs.np-node-desired-count }}" \ -var "np_min_node_count=${{ inputs.np-node-min-count }}" \ -var "np_max_node_count=${{ inputs.np-node-max-count }}" \ - -var "np_disk_size=${{ inputs.np-node-disk-size }}" \ - -var "np_ami_type=${{ inputs.np-node-ami-type }}" \ + -var "np_disk_size=${{ inputs.np-disk-size }}" \ + -var "np_ami_type=${{ inputs.np-ami-type }}" \ -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ -var 'np_instance_types=${{ inputs.np-instance-types }}' \ From b31cb0801aab77b4db25195f8db718623d6cd05a Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:32:49 +0200 Subject: [PATCH 12/51] fix delete cluster --- .../actions/eks-cleanup-resources/README.md | 39 ++++++++++ .github/actions/eks-manage-cluster/README.md | 77 +++++++++++++++++++ .github/actions/eks-manage-cluster/action.yml | 28 +++---- .../workflows/test-gha-eks-manage-cluster.yml | 13 ++-- 4 files changed, 130 insertions(+), 27 deletions(-) create mode 100644 .github/actions/eks-cleanup-resources/README.md create mode 100644 .github/actions/eks-manage-cluster/README.md diff --git a/.github/actions/eks-cleanup-resources/README.md b/.github/actions/eks-cleanup-resources/README.md new file mode 100644 index 00000000..3c6535e2 --- /dev/null +++ b/.github/actions/eks-cleanup-resources/README.md @@ -0,0 +1,39 @@ +# Delete EKS Resources + +This GitHub Action automates the deletion of AWS resources using a shell script. It helps you manage and clean up modules of this repository as resources by specifying a target or deleting resources based on age criteria. + +## Usage + +To use this action, include it in your workflow file (e.g., `.github/workflows/delete-eks-resources.yml`): + +```yaml +name: Delete EKS Resources + +on: + workflow_dispatch: + +jobs: + cleanup: + runs-on: ubuntu-latest + steps: + - name: Delete EKS resources + uses: camunda/camunda-tf-eks-module/eks-cleanup-resources@main + with: + tf-bucket: 'your-s3-bucket-name' + tf-bucket-region: 'your-region' + max-age-hours: 24 + target: 'all' + temp-dir: './tmp/eks-cleanup/' +``` + +## Inputs + +The action supports the following input parameters: + +| Input Name | Description | Required | Default | +|--------------------|-------------------------------------------------------------------------------------------|----------|----------------------------| +| `tf-bucket` | The S3 bucket containing the resources' state files. | Yes | N/A | +| `tf-bucket-region` | The region of the S3 bucket containing the resources state files. Falls back to `AWS_REGION` if not set. | No | AWS_REGION | +| `max-age-hours` | The maximum age (in hours) for resources to be deleted. | No | "20" | +| `target` | Specifies an ID to destroy specific resources or "all" to destroy all resources. | Yes | "all" | +| `temp-dir` | Temporary directory prefix used for storing resource data during processing. | No | "./tmp/eks-cleanup/" | diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md new file mode 100644 index 00000000..a017cbd3 --- /dev/null +++ b/.github/actions/eks-manage-cluster/README.md @@ -0,0 +1,77 @@ +# Deploy or Destroy EKS Cluster + +This GitHub Action automates the deployment or destruction of an Amazon Elastic Kubernetes Service (EKS) cluster using Terraform. It also installs necessary tools like Terraform, AWS CLI, and `kubectl`, and sets up the Kubernetes context for the created cluster. + +## Usage + +To use this action, add it to your workflow file (e.g., `.github/workflows/eks-deploy.yml`): + +```yaml +name: EKS Cluster Management + +on: + workflow_dispatch: + +jobs: + eks_management: + runs-on: ubuntu-latest + steps: + - name: Deploy or Destroy EKS Cluster + uses: camunda/camunda-tf-eks-module/eks-manage-cluster@main + with: + action: 'create' # or 'destroy' + aws-region: 'us-west-2' + cluster-name: 'my-eks-cluster' + kubernetes-version: '1.30' + cluster-service-ipv4-cidr: '10.190.0.0/16' + cluster-node-ipv4-cidr: '10.192.0.0/16' + np-instance-types: '["t2.medium"]' + np-capacity-type: 'SPOT' + np-node-desired-count: '4' + np-node-min-count: '1' + np-disk-size: '20' + np-ami-type: 'AL2_x86_64' + np-node-max-count: '10' + s3-backend-bucket: 'your-terraform-state-bucket' + s3-bucket-region: 'us-west-2' + tf-modules-revision: 'main' + tf-modules-path: './.action-tf-modules/eks/' + login: 'true' + awscli-version: '2.15.52' +``` + +## Inputs + +| Input Name | Description | Required | Default | +|-------------------------------------|--------------------------------------------------------------------------------------------------------------|----------|----------------------------------| +| `aws-region` | AWS region where the EKS cluster will be deployed. | Yes | N/A | +| `cluster-name` | Name of the EKS cluster to deploy. | Yes | N/A | +| `kubernetes-version` | Version of Kubernetes to use for the EKS cluster. | Yes | `1.30` | +| `cluster-service-ipv4-cidr` | CIDR block for cluster service IPs. | Yes | `10.190.0.0/16` | +| `cluster-node-ipv4-cidr` | CIDR block for cluster node IPs. | Yes | `10.192.0.0/16` | +| `np-instance-types` | List of instance types for the node pool. | Yes | `["t2.medium"]` | +| `np-capacity-type` | Capacity type for non-production instances (e.g., SPOT). | Yes | `SPOT` | +| `np-node-desired-count` | Desired number of nodes in the EKS node group. | Yes | `4` | +| `np-node-min-count` | Minimum number of nodes in the EKS node group. | Yes | `1` | +| `np-disk-size` | Disk size of the nodes on the default node pool (in GB). | Yes | `20` | +| `np-ami-type` | Amazon Machine Image type. | Yes | `AL2_x86_64` | +| `np-node-max-count` | Maximum number of nodes in the EKS node group. | Yes | `10` | +| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | N/A | +| `s3-bucket-region` | Region of the bucket containing the resources states; falls back on `aws-region` if not set. | No | N/A | +| `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | +| `tf-modules-path` | Path where the Terraform EKS modules will be cloned. | Yes | `./.action-tf-modules/eks/` | +| `login` | Authenticate the current kube context on the created cluster. | Yes | `true` | +| `tf-cli-config-credentials-hostname`| The hostname of a HCP Terraform/Terraform Enterprise instance to use for credentials configuration. | No | `app.terraform.io` | +| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | N/A | +| `tf-terraform-version` | The version of Terraform CLI to install. Accepts full version or constraints like `<1.13.0` or `latest`. | No | `latest` | +| `tf-terraform-wrapper` | Whether or not to install a wrapper for Terraform CLI calls. | No | `true` | +| `awscli-version` | Version of the AWS CLI to install. | Yes | see `action.yml` | + +## Outputs + +| Output Name | Description | +|----------------------------|------------------------------------------------------------------| +| `eks-cluster-endpoint` | The API endpoint of the deployed EKS cluster. | +| `eks-cluster-id` | The ID of the deployed EKS cluster. | +| `terraform-state-url` | URL of the Terraform state file in the S3 bucket. | +| `all-terraform-outputs` | All outputs from Terraform. | diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 7a60caac..06006ef1 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -1,14 +1,10 @@ -name: Deploy or Destroy EKS Cluster +name: Deploy an EKS Cluster description: | - This GitHub Action automates the deployment or destruction of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. + This GitHub Action automates the deployment of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. inputs: - action: - description: 'Specify the action to perform: "create" or "destroy" the EKS cluster' - required: true - default: 'create' aws-region: description: 'AWS region where the EKS cluster will be deployed' required: true @@ -221,31 +217,25 @@ runs: -var 'np_instance_types=${{ inputs.np-instance-types }}' \ -var "np_capacity_type=${{ inputs.np-capacity-type }}" - - name: Terraform Apply or Destroy + - name: Terraform Apply shell: bash id: apply-or-destroy working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" run: | - if [ "${{ inputs.action }}" == "create" ]; then - terraform apply -no-color eks.plan - export cluster_endpoint="$(terraform output -raw cluster_endpoint)" - echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" - elif [ "${{ inputs.action }}" == "destroy" ]; then - terraform destroy -no-color -auto-approve - else - echo "Invalid action. Please specify 'create' or 'destroy'." - exit 1 - fi + terraform apply -no-color eks.plan + export cluster_endpoint="$(terraform output -raw cluster_endpoint)" + echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" - name: Configure kubectl shell: bash id: kube_config - if: inputs.login == 'true' && inputs.action == 'create' + if: inputs.login == 'true' run: | aws eks --region ${{ inputs.aws-region }} update-kubeconfig --name ${{ inputs.cluster-name }} - name: Output Kube Config shell: bash - if: inputs.login == 'true' && inputs.action == 'create' + if: inputs.login == 'true' run: | kubectl config view + kubectl get ns diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml index 954ad3c1..06444176 100644 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -99,7 +99,6 @@ jobs: uses: ./.github/actions/eks-manage-cluster id: create_cluster with: - action: "create" cluster-name: ${{ steps.commit_info.outputs.cluster_name }} aws-region: ${{ env.AWS_REGION }} s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} @@ -109,14 +108,12 @@ jobs: - name: Delete Cluster timeout-minutes: 125 if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') - uses: ./.github/actions/eks-manage-cluster + uses: ./.github/actions/eks-cleanup-resources with: - action: "delete" - cluster-name: ${{ steps.commit_info.outputs.cluster_name }} - aws-region: ${{ env.AWS_REGION }} - s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} - s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + tf-bucket: ${{ env.TF_STATE_BUCKET }} + tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + max-age-hours: 0 + target: ${{ steps.commit_info.outputs.cluster_name }} - name: Notify in Slack in case of failure id: slack-notification From 11b04850fe11fae35228aa6c38b7a6ff9280c511 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 12:19:54 +0200 Subject: [PATCH 13/51] add utility acton --- .../actions/aurora-manage-cluster/action.yml | 158 ++++++++++++++++++ .github/actions/eks-manage-cluster/action.yml | 85 +++------- .github/actions/utility-action/action.yml | 112 +++++++++++++ .../workflows/test-gha-eks-manage-cluster.yml | 5 - 4 files changed, 289 insertions(+), 71 deletions(-) create mode 100644 .github/actions/aurora-manage-cluster/action.yml create mode 100644 .github/actions/utility-action/action.yml diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml new file mode 100644 index 00000000..aea2709e --- /dev/null +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -0,0 +1,158 @@ +name: Deploy RDS Aurora Cluster + +description: | + This GitHub Action automates the deployment of an RDS Aurora cluster using Terraform. + This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. + +inputs: + aws-region: + description: 'AWS region where the RDS Aurora cluster will be deployed' + required: true + cluster-name: + description: 'Name of the RDS Aurora cluster to deploy' + required: true + engine-version: + description: 'Version of the Aurora engine to use' + required: true + # TODO: renovate + default: "15.4" + instance-class: + description: 'Instance class for the Aurora cluster' + required: true + default: "db.t3.medium" + num-instances: + description: 'Number of instances in the Aurora cluster' + required: true + default: "1" + username: + description: 'Username for the PostgreSQL admin user' + required: true + password: + description: 'Password for the PostgreSQL admin user' + required: true + vpc-id: + description: 'VPC ID to create the cluster in' + required: true + subnet-ids: + description: 'List of subnet IDs to create the cluster in' + required: true + cidr-blocks: + description: 'CIDR blocks to allow access from and to' + required: true + tags: + description: 'Tags to add to the resources' + default: '{}' + required: false + s3-backend-bucket: + description: 'Name of the S3 bucket to store Terraform state' + required: true + s3-bucket-region: + description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + tf-modules-revision: + description: 'Git revision of the tf modules to use' + default: 'main' + required: true + tf-modules-path: + description: 'Path where the tf Aurora modules will be cloned' + default: './.action-tf-modules/aurora/' + required: true + + # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml + tf-cli-config-credentials-hostname: + description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' + default: 'app.terraform.io' + required: false + tf-cli-config-credentials-token: + description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' + required: false + tf-terraform-version: + description: 'The version of Terraform CLI to install. Defaults to `latest`.' + default: 'latest' + required: false + tf-terraform-wrapper: + description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' + default: 'true' + required: false + awscli-version: + description: 'Version of the aws cli to use' + required: true + # renovate: datasource=github-releases depName=aws/aws-cli + default: "2.15.52" + +outputs: + aurora-endpoint: + description: 'The endpoint of the deployed Aurora cluster' + value: ${{ steps.apply.outputs.aurora_endpoint }} + + terraform-state-url: + description: 'URL of the Terraform state file in the S3 bucket' + value: ${{ steps.utility.outputs.terraform-state-url }} + + # Add all terraform outputs dynamically + all-terraform-outputs: + description: 'All outputs from Terraform' + value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} + +runs: + using: 'composite' + steps: + - name: Use Utility Actions + id: utility + uses: ./../utility-action + with: + awscli-version: '${{ inputs.awscli-version }}' + terraform-version: '${{ inputs.terraform-version }}' + + aws-region: '${{ inputs.aws-region }}' + s3-backend-bucket: '${{ inputs.s3-backend-bucket }}' + s3-bucket-region: '${{ inputs.s3-bucket-region }}' + + name: '${{ inputs.cluster-name }}' + module-name: 'aurora' + + tf-cli-config-credentials-hostname: '${{ inputs.tf-cli-config-credentials-hostname }}' + tf-cli-config-credentials-token: '${{ inputs.tf-cli-config-credentials-token }}' + tf-terraform-wrapper: '${{ inputs.tf-terraform-wrapper }}' + + - name: Checkout Repository Aurora modules + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + repository: "camunda/camunda-tf-eks-module" + ref: ${{ inputs.tf-modules-revision }} + path: ${{ inputs.tf-modules-path }} + fetch-depth: 0 + + - name: Terraform Init + shell: bash + id: init + working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" + run: | + cp ../fixtures/backend.tf ./ + terraform version + terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" + terraform validate -no-color + + - name: Terraform Plan + shell: bash + id: plan + working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" + run: | + terraform plan -no-color -out aurora.plan -var "cluster_name=${{ inputs.cluster-name }}" \ + -var "engine_version=${{ inputs.engine-version }}" \ + -var "instance_class=${{ inputs.instance-class }}" \ + -var "num_instances=${{ inputs.num-instances }}" \ + -var "username=${{ inputs.username }}" \ + -var "password=${{ inputs.password }}" \ + -var "vpc_id=${{ inputs.vpc-id }}" \ + -var "subnet_ids=${{ inputs.subnet-ids }}" \ + -var "cidr_blocks=${{ inputs.cidr-blocks }}" \ + -var "tags=${{ inputs.tags }}" + + - name: Terraform Apply + shell: bash + id: apply + working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" + run: | + terraform apply -no-color aurora.plan + export aurora_endpoint="$(terraform output -raw aurora_endpoint)" + echo "aurora_endpoint=$aurora_endpoint" >> "$GITHUB_OUTPUT" diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 06006ef1..a6e74dbd 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -95,7 +95,7 @@ inputs: outputs: eks-cluster-endpoint: description: 'The API endpoint of the deployed EKS cluster' - value: ${{ steps.cluster_info.outputs.cluster_endpoint }} + value: ${{ steps.apply.outputs.cluster_endpoint }} eks-cluster-id: description: 'The ID of the deployed EKS cluster' @@ -103,7 +103,7 @@ outputs: terraform-state-url: description: 'URL of the Terraform state file in the S3 bucket' - value: ${{ steps.set-terraform-variables.outputs.terraform-state-url }} + value: ${{ steps.utility.outputs.terraform-state-url }} # Add all terraform outputs dynamically all-terraform-outputs: @@ -113,72 +113,23 @@ outputs: runs: using: 'composite' steps: - - name: Install Terraform - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3 + - name: Use Utility Actions + id: utility + uses: ./../utility-action with: - cli_config_credentials_hostname: ${{ inputs.tf-cli-config-credentials-hostname }} - cli_config_credentials_token: ${{ inputs.tf-cli-config-credentials-token }} - terraform_version: ${{ inputs.tf-terraform-version }} - terraform_wrapper: ${{ inputs.tf-terraform-wrapper }} + awscli-version: '${{ inputs.awscli-version }}' + terraform-version: '${{ inputs.terraform-version }}' - - name: Install AWS CLI - shell: bash - run: | - if ! command -v aws &> /dev/null; then - echo "AWS CLI not found, installing..." - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" - unzip -qq awscliv2.zip - sudo ./aws/install - rm -f awscliv2.zip - else - echo "Warning: AWS CLI is already installed." - fi - - - name: Install kubectl - shell: bash - run: | - if ! command -v kubectl &> /dev/null; then - echo "kubectl not found, installing..." - curl -LO "https://dl.k8s.io/release/v${{ inputs.kubernetes-version }}/bin/linux/amd64/kubectl" - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - kubectl version --client - else - echo "Warning: kubectl is already installed." - fi - - - name: Set Terraform variables - shell: bash - id: set-terraform-variables - run: | - export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" - export TFSTATE_KEY="terraform/${{ inputs.cluster-name }}/gha/eks-cluster/terraform.tfstate" + aws-region: '${{ inputs.aws-region }}' + s3-backend-bucket: '${{ inputs.s3-backend-bucket }}' + s3-bucket-region: '${{ inputs.s3-bucket-region }}' - if [ -z "${{ inputs.s3-bucket-region }}" ]; then - export TFSTATE_REGION="${{ inputs.aws-region }}" - else - export TFSTATE_REGION="${{ inputs.s3-bucket-region }}" - fi - - echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT" - echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT" - echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT" - - terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}" - echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT" - - - name: Check if S3 bucket exists - id: create-s3-bucket - shell: bash - run: | - if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then - echo "Bucket already exists" - else - echo "Bucket does not exist, creating..." - aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} - fi + name: '${{ inputs.cluster-name }}' + module-name: 'eks-cluster' - aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" + tf-cli-config-credentials-hostname: '${{ inputs.tf-cli-config-credentials-hostname }}' + tf-cli-config-credentials-token: '${{ inputs.tf-cli-config-credentials-token }}' + tf-terraform-wrapper: '${{ inputs.tf-terraform-wrapper }}' - name: Checkout Repository EKS modules uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 @@ -195,7 +146,7 @@ runs: run: | cp ../fixtures/backend.tf ./ terraform version - terraform init -backend-config="bucket=${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}" + terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" terraform validate -no-color - name: Terraform Plan @@ -219,12 +170,14 @@ runs: - name: Terraform Apply shell: bash - id: apply-or-destroy + id: apply working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" run: | terraform apply -no-color eks.plan export cluster_endpoint="$(terraform output -raw cluster_endpoint)" echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" + export cluster_id="$(terraform output -raw cluster_id)" + echo "cluster_id=$cluster_id" >> "$GITHUB_OUTPUT" - name: Configure kubectl shell: bash diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml new file mode 100644 index 00000000..470e1c4c --- /dev/null +++ b/.github/actions/utility-action/action.yml @@ -0,0 +1,112 @@ +name: Utility Actions + +description: | + A set of utility steps to be used across different workflows, including: + - Installing Terraform + - Installing AWS CLI + - Setting Terraform variables + - Checking/Creating an S3 bucket + +inputs: + awscli-version: + description: 'Version of the AWS CLI to install' + required: true + # renovate: datasource=github-releases depName=aws/aws-cli + default: '2.15.52' + terraform-version: + description: 'Version of Terraform to install' + required: true + default: 'latest' + s3-backend-bucket: + description: 'Name of the S3 bucket to store Terraform state' + required: true + s3-bucket-region: + description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + aws-region: + description: 'AWS region to use for S3 bucket operations' + required: true + name: + description: 'Name of resource instance (e.g., uid)' + required: true + module-name: + description: 'Name of the Terraform module (e.g., eks-cluster, aurora)' + required: true + tf-cli-config-credentials-hostname: + description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' + default: 'app.terraform.io' + tf-cli-config-credentials-token: + description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' + tf-terraform-wrapper: + description: 'Whether or not to install a wrapper for Terraform CLI' + default: 'true' + +outputs: + terraform-state-url: + description: 'URL of the Terraform state file in the S3 bucket' + value: "${{ steps.set-terraform-variables.outputs.terraform-state-url }}" + TFSTATE_BUCKET: + description: 'S3 bucket name for Terraform state' + value: "${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" + TFSTATE_REGION: + description: 'Region of the S3 bucket for Terraform state' + value: "${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}" + TFSTATE_KEY: + description: 'Key of the Terraform state file in the S3 bucket' + value: "${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" + +runs: + using: 'composite' + steps: + - name: Install Terraform + uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3 + with: + cli_config_credentials_hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + cli_config_credentials_token: ${{ inputs.tf-cli-config-credentials-token }} + terraform_version: ${{ inputs.terraform-version }} + terraform_wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Install AWS CLI + shell: bash + run: | + if ! command -v aws &> /dev/null; then + echo "AWS CLI not found, installing..." + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install + rm -f awscliv2.zip + else + echo "Warning: AWS CLI is already installed." + fi + + - name: Set Terraform variables + shell: bash + id: set-terraform-variables + run: | + export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" + export TFSTATE_KEY="terraform/${{ inputs.name }}/gha/${{ inputs.module-name }}/terraform.tfstate" + + if [ -z "${{ inputs.s3-bucket-region }}" ]; then + export TFSTATE_REGION="${{ inputs.aws-region }}" + else + export TFSTATE_REGION="${{ inputs.s3-bucket-region }}" + fi + + echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT" + + terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}" + echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT" + + - name: Check if S3 bucket exists + id: create-s3-bucket + shell: bash + run: | + if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then + echo "Bucket already exists" + else + echo "Bucket does not exist, creating..." + aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} + fi + + aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml index 06444176..638dbbc7 100644 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -51,11 +51,6 @@ jobs: ref: ${{ github.head_ref }} fetch-depth: 0 - - name: Setup tmate session - uses: mxschmitt/action-tmate@v3 - with: - detached: true - - name: Install tooling using asdf uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 From 5de0684abc0a649ba8a63269fd94077a3eabd06b Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 13:20:47 +0200 Subject: [PATCH 14/51] fix missing action --- .github/actions/aurora-manage-cluster/action.yml | 2 +- .github/actions/eks-manage-cluster/action.yml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index aea2709e..f670f954 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -98,7 +98,7 @@ runs: steps: - name: Use Utility Actions id: utility - uses: ./../utility-action + uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} with: awscli-version: '${{ inputs.awscli-version }}' terraform-version: '${{ inputs.terraform-version }}' diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index a6e74dbd..bc24a846 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -115,7 +115,8 @@ runs: steps: - name: Use Utility Actions id: utility - uses: ./../utility-action + # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet + uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} with: awscli-version: '${{ inputs.awscli-version }}' terraform-version: '${{ inputs.terraform-version }}' From 921c4e7f55b6485ed8e9551769bdde5a77b8c798 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 13:23:41 +0200 Subject: [PATCH 15/51] fix action --- .github/actions/aurora-manage-cluster/action.yml | 7 ++++++- .github/actions/eks-manage-cluster/action.yml | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index f670f954..4592a545 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -98,7 +98,12 @@ runs: steps: - name: Use Utility Actions id: utility - uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet + # steps.uses cannot access the github context. + # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # TODO: hard pin this one once it's merged + #uses: "camunda/camunda-tf-eks-module/utility-action@main" + uses: "./.github/actions/utility-action" with: awscli-version: '${{ inputs.awscli-version }}' terraform-version: '${{ inputs.terraform-version }}' diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index bc24a846..86136969 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -116,7 +116,11 @@ runs: - name: Use Utility Actions id: utility # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet - uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # steps.uses cannot access the github context. + # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # TODO: hard pin this one once it's merged + #uses: "camunda/camunda-tf-eks-module/utility-action@main" + uses: "./.github/actions/utility-action" with: awscli-version: '${{ inputs.awscli-version }}' terraform-version: '${{ inputs.terraform-version }}' From 8746379bec358b91292839d05c039b7cc788e50c Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 15:29:42 +0200 Subject: [PATCH 16/51] add aurora in the tests --- .../actions/aurora-manage-cluster/README.md | 76 ++++++++++ .../actions/aurora-manage-cluster/action.yml | 3 - .github/actions/eks-manage-cluster/README.md | 1 - .github/actions/eks-manage-cluster/action.yml | 6 - .../test-gha-aurora-manage-cluster.yml | 139 ++++++++++++++++++ .../workflows/test-gha-eks-manage-cluster.yml | 7 +- .github/workflows/tests.yml | 1 + 7 files changed, 220 insertions(+), 13 deletions(-) create mode 100644 .github/actions/aurora-manage-cluster/README.md create mode 100644 .github/workflows/test-gha-aurora-manage-cluster.yml diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md new file mode 100644 index 00000000..81a505ca --- /dev/null +++ b/.github/actions/aurora-manage-cluster/README.md @@ -0,0 +1,76 @@ +# Deploy RDS Aurora Cluster GitHub Action + +This GitHub Action automates the deployment of an Amazon RDS Aurora cluster using Terraform. It installs Terraform and AWS CLI, and outputs the Aurora cluster endpoint along with other relevant details. + +## Description + +The **Deploy RDS Aurora Cluster** action enables you to: + +- Automate the deployment of an RDS Aurora cluster on AWS. +- Use Terraform for infrastructure as code. +- Install specific versions of Terraform and AWS CLI. +- Output the Aurora cluster endpoint, Terraform state URL, and all other Terraform outputs dynamically. + +## Inputs + +The following inputs are required for the action: + +| Input | Description | Required | Default | +|-------|-------------|----------|---------| +| `aws-region` | AWS region where the RDS Aurora cluster will be deployed. | Yes | - | +| `cluster-name` | Name of the RDS Aurora cluster to deploy. | Yes | - | +| `engine-version` | Version of the Aurora engine to use. | Yes | see `action.yml` | +| `instance-class` | Instance class for the Aurora cluster. | Yes | `db.t3.medium` | +| `num-instances` | Number of instances in the Aurora cluster. | Yes | `1` | +| `username` | Username for the PostgreSQL admin user. | Yes | - | +| `password` | Password for the PostgreSQL admin user. | Yes | - | +| `vpc-id` | VPC ID to create the cluster in. | No | - | +| `subnet-ids` | List of subnet IDs to create the cluster in. | No | - | +| `cidr-blocks` | CIDR blocks to allow access from and to. | No | - | +| `tags` | Tags to add to the resources. | No | `{}` | +| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | +| `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | +| `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | +| `tf-modules-path` | Path where the Terraform Aurora modules will be cloned. | Yes | `./.action-tf-modules/aurora/` | +| `tf-cli-config-credentials-hostname` | The hostname of a HCP Terraform/Terraform Enterprise instance for the CLI configuration file. | No | `app.terraform.io` | +| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | - | +| `tf-terraform-version` | The version of Terraform CLI to install. | No | `latest` | +| `tf-terraform-wrapper` | Whether to install a wrapper for the Terraform binary. | No | `true` | +| `awscli-version` | Version of the AWS CLI to use. | Yes | see `action.yml` | + +## Outputs + +The action provides the following outputs: + +| Output | Description | +|--------|-------------| +| `aurora-endpoint` | The endpoint of the deployed Aurora cluster. | +| `terraform-state-url` | URL of the Terraform state file in the S3 bucket. | +| `all-terraform-outputs` | All outputs from Terraform. | + +## Usage + +To use this GitHub Action, include it in your workflow file: + +```yaml +jobs: + deploy_aurora: + runs-on: ubuntu-latest + steps: + - name: Deploy Aurora Cluster + uses: camunda/camunda-tf-eks-module/aurora-manage-cluster@main + with: + aws-region: 'us-west-2' + cluster-name: 'my-aurora-cluster' + engine-version: '15.4' + instance-class: 'db.t3.medium' + num-instances: '2' + username: 'admin' + password: ${{ secrets.DB_PASSWORD }} + vpc-id: 'vpc-12345678' + subnet-ids: 'subnet-12345,subnet-67890' + cidr-blocks: '10.0.0.0/16' + tags: '{"env": "prod", "team": "devops"}' + s3-backend-bucket: 'my-terraform-state-bucket' + s3-bucket-region: 'us-west-2' +``` diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 4592a545..369c43c3 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -32,13 +32,10 @@ inputs: required: true vpc-id: description: 'VPC ID to create the cluster in' - required: true subnet-ids: description: 'List of subnet IDs to create the cluster in' - required: true cidr-blocks: description: 'CIDR blocks to allow access from and to' - required: true tags: description: 'Tags to add to the resources' default: '{}' diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md index a017cbd3..b18ca102 100644 --- a/.github/actions/eks-manage-cluster/README.md +++ b/.github/actions/eks-manage-cluster/README.md @@ -72,6 +72,5 @@ jobs: | Output Name | Description | |----------------------------|------------------------------------------------------------------| | `eks-cluster-endpoint` | The API endpoint of the deployed EKS cluster. | -| `eks-cluster-id` | The ID of the deployed EKS cluster. | | `terraform-state-url` | URL of the Terraform state file in the S3 bucket. | | `all-terraform-outputs` | All outputs from Terraform. | diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 86136969..33b192b1 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -97,10 +97,6 @@ outputs: description: 'The API endpoint of the deployed EKS cluster' value: ${{ steps.apply.outputs.cluster_endpoint }} - eks-cluster-id: - description: 'The ID of the deployed EKS cluster' - value: ${{ steps.apply.outputs.cluster_id }} - terraform-state-url: description: 'URL of the Terraform state file in the S3 bucket' value: ${{ steps.utility.outputs.terraform-state-url }} @@ -181,8 +177,6 @@ runs: terraform apply -no-color eks.plan export cluster_endpoint="$(terraform output -raw cluster_endpoint)" echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" - export cluster_id="$(terraform output -raw cluster_id)" - echo "cluster_id=$cluster_id" >> "$GITHUB_OUTPUT" - name: Configure kubectl shell: bash diff --git a/.github/workflows/test-gha-aurora-manage-cluster.yml b/.github/workflows/test-gha-aurora-manage-cluster.yml new file mode 100644 index 00000000..0cf565e6 --- /dev/null +++ b/.github/workflows/test-gha-aurora-manage-cluster.yml @@ -0,0 +1,139 @@ +name: Aurora Cluster creation and destruction test + +on: + schedule: + - cron: '0 2 * * 1' # At 02:00 on Monday. + + workflow_dispatch: + inputs: + cluster_name: + description: "Aurora Cluster name." + required: false + type: string + delete_cluster: + description: "Whether to delete the Aurora cluster." + required: false + type: boolean + default: true + db_username: + description: "Database username." + required: false + type: string + db_password: + description: "Database password." + required: false + type: string + + pull_request: + paths: + - modules/fixtures/backend.tf + - modules/fixtures/fixtures.default.aurora.tfvars + - modules/aurora/**.tf + - .tool-versions + - .github/workflows/test-gha-aurora-manage-cluster.yml + - .github/actions/aurora-manage-cluster/*.yml + - justfile + +concurrency: + group: "${{ github.workflow }}-${{ github.ref }}" + cancel-in-progress: true + +env: + AWS_PROFILE: "infex" + AWS_REGION: "eu-west-2" + + # please keep those synced with tests.yml + TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" + TF_STATE_BUCKET_REGION: "eu-central-1" + +jobs: + action-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + + - name: Get Cluster Info + id: commit_info + run: | + if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then + cluster_name="${{ github.event.inputs.cluster_name }}" + else + cluster_name="aurora-$(git rev-parse --short HEAD)" + fi + + if [[ -n "${{ github.event.inputs.db_username }}" ]]; then + db_username="${{ github.event.inputs.db_username }}" + else + db_username="user$(openssl rand -hex 4)" + fi + + if [[ -n "${{ github.event.inputs.db_password }}" ]]; then + db_password="${{ github.event.inputs.db_password }}" + else + db_password="$(openssl rand -base64 12)" + fi + + echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" + echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" + echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" + + tf_modules_revision=$(git rev-parse HEAD) + echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" + + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + + - name: Create Aurora Cluster + timeout-minutes: 125 + uses: ./.github/actions/aurora-manage-cluster + id: create_cluster + with: + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + username: ${{ steps.commit_info.outputs.db_username }} + password: ${{ steps.commit_info.outputs.db_password }} + aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + + - name: Delete Aurora Cluster + timeout-minutes: 125 + if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') + uses: ./.github/actions/eks-cleanup-resources + with: + tf-bucket: ${{ env.TF_STATE_BUCKET }} + tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + max-age-hours: 0 + target: ${{ steps.commit_info.outputs.cluster_name }} + + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml index 638dbbc7..be2ead6a 100644 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ b/.github/workflows/test-gha-eks-manage-cluster.yml @@ -20,9 +20,9 @@ on: pull_request: # the paths should be synced with ../labeler.yml paths: - - modules/fixtures/**/*.tf - - modules/fixtures/**/*.tfvars - - modules/**.tf + - modules/fixtures/backend.tf + - modules/fixtures/fixtures.default.eks.tfvars + - modules/eks-cluster/**.tf - .tool-versions - .github/workflows/test-gha-eks-manage-cluster.yml - .github/actions/eks-manage-cluster/*.yml @@ -38,6 +38,7 @@ env: AWS_PROFILE: "infex" AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config + # please keep those synced with tests.yml TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" TF_STATE_BUCKET_REGION: "eu-central-1" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c649820b..53790fb6 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -27,6 +27,7 @@ env: AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config TESTS_TF_BINARY_NAME: "terraform" + # please keep test-gha*.yml synced TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" TF_STATE_BUCKET_REGION: "eu-central-1" From e0954f53c064e91444aa982e703cac1f8f5ca50b Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 17:56:14 +0200 Subject: [PATCH 17/51] cidrs and vpc is now optional for aurora --- modules/aurora/main.tf | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index 185a162b..6f68146b 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -73,12 +73,16 @@ resource "aws_security_group" "this" { vpc_id = var.vpc_id + count = var.vpc_id != "" ? 1 : 0 + tags = var.tags } resource "aws_security_group_rule" "allow_egress" { description = "Allow outgoing traffic for the aurora db" + count = length(var.cidr_blocks) > 0 ? 1 : 0 + type = "egress" from_port = 0 to_port = 0 @@ -92,6 +96,8 @@ resource "aws_security_group_rule" "allow_egress" { resource "aws_security_group_rule" "allow_ingress" { description = "Allow incoming traffic for the aurora db for port 5432" + count = length(var.cidr_blocks) > 0 ? 1 : 0 + type = "ingress" from_port = 5432 to_port = 5432 @@ -102,6 +108,8 @@ resource "aws_security_group_rule" "allow_ingress" { } resource "aws_db_subnet_group" "this" { + count = length(var.subnet_ids) > 0 ? 1 : 0 + name = var.cluster_name description = "For Aurora cluster ${var.cluster_name}" From eb874adedd110796df30639ace984e1b37a4d69d Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:02:34 +0200 Subject: [PATCH 18/51] fix inded --- modules/aurora/main.tf | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index 6f68146b..ae912210 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -15,8 +15,8 @@ resource "aws_rds_cluster" "aurora_cluster" { iam_database_authentication_enabled = var.iam_auth_enabled iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 - vpc_security_group_ids = [aws_security_group.this.id] - db_subnet_group_name = aws_db_subnet_group.this.name + vpc_security_group_ids = [aws_security_group.this[0].id] + db_subnet_group_name = aws_db_subnet_group.this[0].name skip_final_snapshot = true apply_immediately = true storage_encrypted = true @@ -50,7 +50,7 @@ resource "aws_rds_cluster_instance" "aurora_instance" { auto_minor_version_upgrade = var.auto_minor_version_upgrade instance_class = var.instance_class - db_subnet_group_name = aws_db_subnet_group.this.name + db_subnet_group_name = aws_db_subnet_group.this[0].name apply_immediately = true @@ -89,7 +89,7 @@ resource "aws_security_group_rule" "allow_egress" { protocol = "-1" cidr_blocks = var.cidr_blocks - security_group_id = aws_security_group.this.id + security_group_id = aws_security_group.this[0].id } @@ -104,7 +104,7 @@ resource "aws_security_group_rule" "allow_ingress" { protocol = "tcp" cidr_blocks = var.cidr_blocks - security_group_id = aws_security_group.this.id + security_group_id = aws_security_group.this[0].id } resource "aws_db_subnet_group" "this" { From 67eebe7ed5f5a18ea89e96c0455940a0d9ffa032 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:10:49 +0200 Subject: [PATCH 19/51] add default cidrs --- .github/actions/aurora-manage-cluster/README.md | 4 ++-- .github/actions/aurora-manage-cluster/action.yml | 3 +++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index 81a505ca..c46c371d 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -25,8 +25,8 @@ The following inputs are required for the action: | `username` | Username for the PostgreSQL admin user. | Yes | - | | `password` | Password for the PostgreSQL admin user. | Yes | - | | `vpc-id` | VPC ID to create the cluster in. | No | - | -| `subnet-ids` | List of subnet IDs to create the cluster in. | No | - | -| `cidr-blocks` | CIDR blocks to allow access from and to. | No | - | +| `subnet-ids` | List of subnet IDs to create the cluster in. | No | `[]` | +| `cidr-blocks` | CIDR blocks to allow access from and to. | No | `[]` | | `tags` | Tags to add to the resources. | No | `{}` | | `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | | `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 369c43c3..64ef9c2c 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -32,10 +32,13 @@ inputs: required: true vpc-id: description: 'VPC ID to create the cluster in' + default: '' subnet-ids: description: 'List of subnet IDs to create the cluster in' + default: '[]' cidr-blocks: description: 'CIDR blocks to allow access from and to' + default: '[]' tags: description: 'Tags to add to the resources' default: '{}' From 7b6c58ad8268bf791eb7896f60a8af6876b63569 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:13:19 +0200 Subject: [PATCH 20/51] fix tags --- .github/actions/aurora-manage-cluster/README.md | 2 +- .github/actions/aurora-manage-cluster/action.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index c46c371d..959ed46d 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -27,7 +27,7 @@ The following inputs are required for the action: | `vpc-id` | VPC ID to create the cluster in. | No | - | | `subnet-ids` | List of subnet IDs to create the cluster in. | No | `[]` | | `cidr-blocks` | CIDR blocks to allow access from and to. | No | `[]` | -| `tags` | Tags to add to the resources. | No | `{}` | +| `tags` | Tags to add to the resources. | No | `` | | `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | | `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | | `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 64ef9c2c..ba22f43c 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -41,7 +41,7 @@ inputs: default: '[]' tags: description: 'Tags to add to the resources' - default: '{}' + default: '' required: false s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' From 607c01d5d61490de4cb79210a05eb151f09d0661 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:15:30 +0200 Subject: [PATCH 21/51] remove tags --- .github/actions/aurora-manage-cluster/README.md | 1 - .github/actions/aurora-manage-cluster/action.yml | 7 +------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index 959ed46d..cf0da812 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -27,7 +27,6 @@ The following inputs are required for the action: | `vpc-id` | VPC ID to create the cluster in. | No | - | | `subnet-ids` | List of subnet IDs to create the cluster in. | No | `[]` | | `cidr-blocks` | CIDR blocks to allow access from and to. | No | `[]` | -| `tags` | Tags to add to the resources. | No | `` | | `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | | `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | | `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index ba22f43c..f63c4888 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -39,10 +39,6 @@ inputs: cidr-blocks: description: 'CIDR blocks to allow access from and to' default: '[]' - tags: - description: 'Tags to add to the resources' - default: '' - required: false s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true @@ -150,8 +146,7 @@ runs: -var "password=${{ inputs.password }}" \ -var "vpc_id=${{ inputs.vpc-id }}" \ -var "subnet_ids=${{ inputs.subnet-ids }}" \ - -var "cidr_blocks=${{ inputs.cidr-blocks }}" \ - -var "tags=${{ inputs.tags }}" + -var "cidr_blocks=${{ inputs.cidr-blocks }}" - name: Terraform Apply shell: bash From fc8ba7dbc212f6289bdd2ec8c0a0aa7692dff02a Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:39:09 +0200 Subject: [PATCH 22/51] default vpc --- .../actions/aurora-manage-cluster/action.yml | 6 +- .../test-gha-aurora-manage-cluster.yml | 90 +++++++++++++------ modules/aurora/main.tf | 18 ++-- 3 files changed, 73 insertions(+), 41 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index f63c4888..2b71c739 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -32,13 +32,13 @@ inputs: required: true vpc-id: description: 'VPC ID to create the cluster in' - default: '' + required: true subnet-ids: description: 'List of subnet IDs to create the cluster in' - default: '[]' + required: true cidr-blocks: description: 'CIDR blocks to allow access from and to' - default: '[]' + required: true s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true diff --git a/.github/workflows/test-gha-aurora-manage-cluster.yml b/.github/workflows/test-gha-aurora-manage-cluster.yml index 0cf565e6..5f5bfdbf 100644 --- a/.github/workflows/test-gha-aurora-manage-cluster.yml +++ b/.github/workflows/test-gha-aurora-manage-cluster.yml @@ -23,6 +23,21 @@ on: description: "Database password." required: false type: string + vpc-id: + description: "VPC ID to create the cluster in." + required: false + type: string + default: '' + subnet-ids: + description: "List of subnet IDs to create the cluster in." + required: false + type: string + default: '' + cidr-blocks: + description: "CIDR blocks to allow access from and to." + required: false + type: string + default: '' pull_request: paths: @@ -59,53 +74,74 @@ jobs: - name: Install tooling using asdf uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + - name: Get Cluster Info id: commit_info run: | - if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then - cluster_name="${{ github.event.inputs.cluster_name }}" + if [[ -n "${{ inputs.cluster_name }}" ]]; then + cluster_name="${{ inputs.cluster_name }}" else cluster_name="aurora-$(git rev-parse --short HEAD)" fi - if [[ -n "${{ github.event.inputs.db_username }}" ]]; then - db_username="${{ github.event.inputs.db_username }}" + if [[ -n "${{ inputs.db_username }}" ]]; then + db_username="${{ inputs.db_username }}" else db_username="user$(openssl rand -hex 4)" fi - if [[ -n "${{ github.event.inputs.db_password }}" ]]; then - db_password="${{ github.event.inputs.db_password }}" + if [[ -n "${{ inputs.db_password }}" ]]; then + db_password="${{ inputs.db_password }}" else db_password="$(openssl rand -base64 12)" fi + if [[ -n "${{ inputs.vpc-id }}" ]]; then + vpc_id="${{ inputs.vpc-id }}" + else + vpc_id="$(aws ec2 describe-vpcs --query 'Vpcs[?IsDefault].VpcId' --output text --profile ${{ env.AWS_PROFILE }})" + fi + + if [[ -n "${{ inputs.subnet-ids }}" ]]; then + subnet_ids="${{ inputs.subnet-ids }}" + else + subnet_ids="$(aws ec2 describe-subnets --filters Name=vpc-id,Values= --query 'Subnets[*].SubnetId' --output json --profile \"${{ env.AWS_PROFILE }}\")" + fi + + if [[ -n "${{ inputs.cidr-blocks }}" ]]; then + cidr_blocks="${{ inputs.cidr-blocks }}" + else + cidr_blocks='["0.0.0.0/0"]' # Default CIDR to allow access from anywhere + fi + echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" + echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" + echo "subnet_ids=$subnet_ids" | tee -a "$GITHUB_OUTPUT" + echo "cidr_blocks=$cidr_blocks" | tee -a "$GITHUB_OUTPUT" tf_modules_revision=$(git rev-parse HEAD) echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - name: Create Aurora Cluster timeout-minutes: 125 uses: ./.github/actions/aurora-manage-cluster @@ -119,6 +155,10 @@ jobs: s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + vpc-id: ${{ steps.commit_info.outputs.vpc_id }} + subnet-ids: ${{ steps.commit_info.outputs.subnet_ids }} + cidr-blocks: ${{ steps.commit_info.outputs.cidr_blocks }} + - name: Delete Aurora Cluster timeout-minutes: 125 if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index ae912210..185a162b 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -15,8 +15,8 @@ resource "aws_rds_cluster" "aurora_cluster" { iam_database_authentication_enabled = var.iam_auth_enabled iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 - vpc_security_group_ids = [aws_security_group.this[0].id] - db_subnet_group_name = aws_db_subnet_group.this[0].name + vpc_security_group_ids = [aws_security_group.this.id] + db_subnet_group_name = aws_db_subnet_group.this.name skip_final_snapshot = true apply_immediately = true storage_encrypted = true @@ -50,7 +50,7 @@ resource "aws_rds_cluster_instance" "aurora_instance" { auto_minor_version_upgrade = var.auto_minor_version_upgrade instance_class = var.instance_class - db_subnet_group_name = aws_db_subnet_group.this[0].name + db_subnet_group_name = aws_db_subnet_group.this.name apply_immediately = true @@ -73,43 +73,35 @@ resource "aws_security_group" "this" { vpc_id = var.vpc_id - count = var.vpc_id != "" ? 1 : 0 - tags = var.tags } resource "aws_security_group_rule" "allow_egress" { description = "Allow outgoing traffic for the aurora db" - count = length(var.cidr_blocks) > 0 ? 1 : 0 - type = "egress" from_port = 0 to_port = 0 protocol = "-1" cidr_blocks = var.cidr_blocks - security_group_id = aws_security_group.this[0].id + security_group_id = aws_security_group.this.id } resource "aws_security_group_rule" "allow_ingress" { description = "Allow incoming traffic for the aurora db for port 5432" - count = length(var.cidr_blocks) > 0 ? 1 : 0 - type = "ingress" from_port = 5432 to_port = 5432 protocol = "tcp" cidr_blocks = var.cidr_blocks - security_group_id = aws_security_group.this[0].id + security_group_id = aws_security_group.this.id } resource "aws_db_subnet_group" "this" { - count = length(var.subnet_ids) > 0 ? 1 : 0 - name = var.cluster_name description = "For Aurora cluster ${var.cluster_name}" From f8228bf53da42f48fc1cd407b78295031431fd88 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:44:33 +0200 Subject: [PATCH 23/51] default vpc --- .github/workflows/test-gha-aurora-manage-cluster.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-gha-aurora-manage-cluster.yml b/.github/workflows/test-gha-aurora-manage-cluster.yml index 5f5bfdbf..c54f46c3 100644 --- a/.github/workflows/test-gha-aurora-manage-cluster.yml +++ b/.github/workflows/test-gha-aurora-manage-cluster.yml @@ -123,7 +123,7 @@ jobs: if [[ -n "${{ inputs.subnet-ids }}" ]]; then subnet_ids="${{ inputs.subnet-ids }}" else - subnet_ids="$(aws ec2 describe-subnets --filters Name=vpc-id,Values= --query 'Subnets[*].SubnetId' --output json --profile \"${{ env.AWS_PROFILE }}\")" + subnet_ids=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query "Subnets[*].SubnetId" --output json --profile "${{ env.AWS_PROFILE }}") fi if [[ -n "${{ inputs.cidr-blocks }}" ]]; then From 0f7dba789a9a33edb92e7529ace9f3bfc14508cb Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:47:19 +0200 Subject: [PATCH 24/51] fix --- .github/actions/aurora-manage-cluster/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 2b71c739..eec8616f 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -145,8 +145,8 @@ runs: -var "username=${{ inputs.username }}" \ -var "password=${{ inputs.password }}" \ -var "vpc_id=${{ inputs.vpc-id }}" \ - -var "subnet_ids=${{ inputs.subnet-ids }}" \ - -var "cidr_blocks=${{ inputs.cidr-blocks }}" + -var 'subnet_ids=${{ inputs.subnet-ids }}' \ + -var 'cidr_blocks=${{ inputs.cidr-blocks }}' - name: Terraform Apply shell: bash From 67fd6896693b09ca157364810c3f836f5832f4e0 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Thu, 12 Sep 2024 18:56:11 +0200 Subject: [PATCH 25/51] subnet is now optionnal --- modules/aurora/main.tf | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index 185a162b..ccea6b72 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -16,7 +16,7 @@ resource "aws_rds_cluster" "aurora_cluster" { iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 vpc_security_group_ids = [aws_security_group.this.id] - db_subnet_group_name = aws_db_subnet_group.this.name + db_subnet_group_name = aws_db_subnet_group.this[0].name skip_final_snapshot = true apply_immediately = true storage_encrypted = true @@ -50,7 +50,7 @@ resource "aws_rds_cluster_instance" "aurora_instance" { auto_minor_version_upgrade = var.auto_minor_version_upgrade instance_class = var.instance_class - db_subnet_group_name = aws_db_subnet_group.this.name + db_subnet_group_name = aws_db_subnet_group.this[0].name apply_immediately = true @@ -104,6 +104,8 @@ resource "aws_security_group_rule" "allow_ingress" { resource "aws_db_subnet_group" "this" { name = var.cluster_name + count = length(var.subnet_ids) > 0 ? 1 : 0 + description = "For Aurora cluster ${var.cluster_name}" subnet_ids = var.subnet_ids From 0244eb2119b9a0ce369e86ae7673499e9c881935 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 11:19:48 +0200 Subject: [PATCH 26/51] integrate aurora gha in eks --- .../actions/aurora-manage-cluster/action.yml | 8 + .github/actions/eks-manage-cluster/action.yml | 8 + .../workflows/test-gha-eks-manage-cluster.yml | 121 --------------- ...ra-manage-cluster.yml => test-gha-eks.yml} | 145 +++++++++--------- modules/aurora/main.tf | 4 +- 5 files changed, 88 insertions(+), 198 deletions(-) delete mode 100644 .github/workflows/test-gha-eks-manage-cluster.yml rename .github/workflows/{test-gha-aurora-manage-cluster.yml => test-gha-eks.yml} (62%) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index eec8616f..a8b6e16c 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -156,3 +156,11 @@ runs: terraform apply -no-color aurora.plan export aurora_endpoint="$(terraform output -raw aurora_endpoint)" echo "aurora_endpoint=$aurora_endpoint" >> "$GITHUB_OUTPUT" + + - name: Fetch Terraform Outputs + shell: bash + id: fetch_outputs + working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" + run: | + all_outputs=$(terraform output -json) + echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 33b192b1..2b92db60 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -178,6 +178,14 @@ runs: export cluster_endpoint="$(terraform output -raw cluster_endpoint)" echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" + - name: Fetch Terraform Outputs + shell: bash + id: fetch_outputs + working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" + run: | + all_outputs=$(terraform output -json) + echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" + - name: Configure kubectl shell: bash id: kube_config diff --git a/.github/workflows/test-gha-eks-manage-cluster.yml b/.github/workflows/test-gha-eks-manage-cluster.yml deleted file mode 100644 index be2ead6a..00000000 --- a/.github/workflows/test-gha-eks-manage-cluster.yml +++ /dev/null @@ -1,121 +0,0 @@ ---- -name: EKS Cluster creation and destruction test - -on: - schedule: - - cron: '0 1 * * 1' # At 01:00 on Monday. - - workflow_dispatch: - inputs: - cluster_name: - description: "Cluster name." - required: false - type: string - delete_cluster: - description: "Whether to delete the cluster." - required: false - type: boolean - default: true - - pull_request: - # the paths should be synced with ../labeler.yml - paths: - - modules/fixtures/backend.tf - - modules/fixtures/fixtures.default.eks.tfvars - - modules/eks-cluster/**.tf - - .tool-versions - - .github/workflows/test-gha-eks-manage-cluster.yml - - .github/actions/eks-manage-cluster/*.yml - - justfile - -# limit to a single execution per actor of this workflow -concurrency: - group: "${{ github.workflow }}-${{ github.ref }}" - cancel-in-progress: true - -env: - - AWS_PROFILE: "infex" - AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config - - # please keep those synced with tests.yml - TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" - TF_STATE_BUCKET_REGION: "eu-central-1" - -jobs: - action-test: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 - - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - - name: Get Cluster Info - id: commit_info - run: | - if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then - cluster_name="${{ github.event.inputs.cluster_name }}" - else - cluster_name="cl-$(git rev-parse --short HEAD)" - fi - echo "cluster_name=$cluster_name" >> "$GITHUB_OUTPUT" - - # Get the current commit hash for the modules revision - tf_modules_revision=$(git rev-parse HEAD) - - echo "cluster_name=$cluster_name" >> "$GITHUB_OUTPUT" - echo "tf_modules_revision=$tf_modules_revision" >> "$GITHUB_OUTPUT" - - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - - name: Create Cluster - timeout-minutes: 125 - uses: ./.github/actions/eks-manage-cluster - id: create_cluster - with: - cluster-name: ${{ steps.commit_info.outputs.cluster_name }} - aws-region: ${{ env.AWS_REGION }} - s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} - s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - - - name: Delete Cluster - timeout-minutes: 125 - if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') - uses: ./.github/actions/eks-cleanup-resources - with: - tf-bucket: ${{ env.TF_STATE_BUCKET }} - tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - max-age-hours: 0 - target: ${{ steps.commit_info.outputs.cluster_name }} - - - name: Notify in Slack in case of failure - id: slack-notification - if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/test-gha-aurora-manage-cluster.yml b/.github/workflows/test-gha-eks.yml similarity index 62% rename from .github/workflows/test-gha-aurora-manage-cluster.yml rename to .github/workflows/test-gha-eks.yml index c54f46c3..6305d06e 100644 --- a/.github/workflows/test-gha-aurora-manage-cluster.yml +++ b/.github/workflows/test-gha-eks.yml @@ -1,18 +1,22 @@ -name: Aurora Cluster creation and destruction test +--- +name: EKS Cluster with an AuroraDB creation and destruction test on: schedule: - - cron: '0 2 * * 1' # At 02:00 on Monday. + - cron: '0 1 * * 1' # At 01:00 on Monday. workflow_dispatch: inputs: cluster_name: - description: "Aurora Cluster name." + description: "Cluster name." required: false type: string + create_db: + description: "Should the aurora db be created" + type: boolean + default: true delete_cluster: - description: "Whether to delete the Aurora cluster." - required: false + description: "Whether to delete the cluster." type: boolean default: true db_username: @@ -23,41 +27,31 @@ on: description: "Database password." required: false type: string - vpc-id: - description: "VPC ID to create the cluster in." - required: false - type: string - default: '' - subnet-ids: - description: "List of subnet IDs to create the cluster in." - required: false - type: string - default: '' - cidr-blocks: - description: "CIDR blocks to allow access from and to." - required: false - type: string - default: '' pull_request: + # the paths should be synced with ../labeler.yml paths: - modules/fixtures/backend.tf + - modules/fixtures/fixtures.default.eks.tfvars - modules/fixtures/fixtures.default.aurora.tfvars + - modules/eks-cluster/**.tf - modules/aurora/**.tf - .tool-versions - - .github/workflows/test-gha-aurora-manage-cluster.yml - - .github/actions/aurora-manage-cluster/*.yml + - .github/workflows/test-gha-eks-manage-cluster.yml + - .github/actions/eks-manage-cluster/*.yml - justfile +# limit to a single execution per actor of this workflow concurrency: group: "${{ github.workflow }}-${{ github.ref }}" cancel-in-progress: true env: + AWS_PROFILE: "infex" - AWS_REGION: "eu-west-2" + AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config - # please keep those synced with tests.yml + # please keep those synced with tests.yml TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" TF_STATE_BUCKET_REGION: "eu-central-1" @@ -74,78 +68,81 @@ jobs: - name: Install tooling using asdf uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - name: Get Cluster Info id: commit_info run: | - if [[ -n "${{ inputs.cluster_name }}" ]]; then - cluster_name="${{ inputs.cluster_name }}" + if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then + cluster_name="${{ github.event.inputs.cluster_name }}" else - cluster_name="aurora-$(git rev-parse --short HEAD)" + cluster_name="cl-$(git rev-parse --short HEAD)" fi + echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" if [[ -n "${{ inputs.db_username }}" ]]; then db_username="${{ inputs.db_username }}" else db_username="user$(openssl rand -hex 4)" fi + echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" if [[ -n "${{ inputs.db_password }}" ]]; then db_password="${{ inputs.db_password }}" else db_password="$(openssl rand -base64 12)" fi + echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" - if [[ -n "${{ inputs.vpc-id }}" ]]; then - vpc_id="${{ inputs.vpc-id }}" - else - vpc_id="$(aws ec2 describe-vpcs --query 'Vpcs[?IsDefault].VpcId' --output text --profile ${{ env.AWS_PROFILE }})" - fi + # Get the current commit hash for the modules revision + tf_modules_revision=$(git rev-parse HEAD) + echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" - if [[ -n "${{ inputs.subnet-ids }}" ]]; then - subnet_ids="${{ inputs.subnet-ids }}" - else - subnet_ids=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query "Subnets[*].SubnetId" --output json --profile "${{ env.AWS_PROFILE }}") - fi + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - if [[ -n "${{ inputs.cidr-blocks }}" ]]; then - cidr_blocks="${{ inputs.cidr-blocks }}" - else - cidr_blocks='["0.0.0.0/0"]' # Default CIDR to allow access from anywhere - fi + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" - echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" - echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" - echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" - echo "subnet_ids=$subnet_ids" | tee -a "$GITHUB_OUTPUT" - echo "cidr_blocks=$cidr_blocks" | tee -a "$GITHUB_OUTPUT" + - name: Create EKS Cluster + timeout-minutes: 125 + uses: ./.github/actions/eks-manage-cluster + id: create_eks_cluster + with: + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - tf_modules_revision=$(git rev-parse HEAD) - echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" + - name: Prepare Aurora Cluster + id: prepare_aurora_cluster + run: | + vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.vpc_id') + echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPOUT" + + private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_subnet_ids') + echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPOUT" + + private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_vpc_cidr_blocks') + echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPOUT" - name: Create Aurora Cluster timeout-minutes: 125 uses: ./.github/actions/aurora-manage-cluster - id: create_cluster + id: create_aurora_cluster + if: inputs.create_db == 'true' with: cluster-name: ${{ steps.commit_info.outputs.cluster_name }} username: ${{ steps.commit_info.outputs.db_username }} @@ -155,11 +152,11 @@ jobs: s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - vpc-id: ${{ steps.commit_info.outputs.vpc_id }} - subnet-ids: ${{ steps.commit_info.outputs.subnet_ids }} - cidr-blocks: ${{ steps.commit_info.outputs.cidr_blocks }} + vpc-id: ${{ steps.prepare_aurora_cluster.outputs.vpc_id }} + subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} + cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} - - name: Delete Aurora Cluster + - name: Delete Cluster timeout-minutes: 125 if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') uses: ./.github/actions/eks-cleanup-resources diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index ccea6b72..5b7060ad 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -50,7 +50,7 @@ resource "aws_rds_cluster_instance" "aurora_instance" { auto_minor_version_upgrade = var.auto_minor_version_upgrade instance_class = var.instance_class - db_subnet_group_name = aws_db_subnet_group.this[0].name + db_subnet_group_name = aws_db_subnet_group.this.name apply_immediately = true @@ -104,8 +104,6 @@ resource "aws_security_group_rule" "allow_ingress" { resource "aws_db_subnet_group" "this" { name = var.cluster_name - count = length(var.subnet_ids) > 0 ? 1 : 0 - description = "For Aurora cluster ${var.cluster_name}" subnet_ids = var.subnet_ids From 9d329b97e04d5b8a7691522f4433b47224753a37 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 11:23:38 +0200 Subject: [PATCH 27/51] fix index --- modules/aurora/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/aurora/main.tf b/modules/aurora/main.tf index 5b7060ad..185a162b 100644 --- a/modules/aurora/main.tf +++ b/modules/aurora/main.tf @@ -16,7 +16,7 @@ resource "aws_rds_cluster" "aurora_cluster" { iam_roles = var.iam_roles # only needed if wanted to grant access from Aurora to e.g. S3 vpc_security_group_ids = [aws_security_group.this.id] - db_subnet_group_name = aws_db_subnet_group.this[0].name + db_subnet_group_name = aws_db_subnet_group.this.name skip_final_snapshot = true apply_immediately = true storage_encrypted = true From e9e6805936f952c34c9dc6e6bdd93b2a5c25b7c0 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 12:13:56 +0200 Subject: [PATCH 28/51] ensure output is single line --- .github/actions/aurora-manage-cluster/action.yml | 2 +- .github/actions/eks-manage-cluster/action.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index a8b6e16c..65278216 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -162,5 +162,5 @@ runs: id: fetch_outputs working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" run: | - all_outputs=$(terraform output -json) + all_outputs=$(terraform output -json | jq -c .) echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 2b92db60..752ddf50 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -183,7 +183,7 @@ runs: id: fetch_outputs working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" run: | - all_outputs=$(terraform output -json) + all_outputs=$(terraform output -json | jq -c .) echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" - name: Configure kubectl From 00133d86e865cc82e9d16ec68e647265e0e31888 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 12:47:14 +0200 Subject: [PATCH 29/51] fix tee --- .github/workflows/test-gha-eks.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 6305d06e..7ab8c490 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -130,13 +130,13 @@ jobs: id: prepare_aurora_cluster run: | vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.vpc_id') - echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPOUT" + echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_subnet_ids') - echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPOUT" + echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPUT" private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_vpc_cidr_blocks') - echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPOUT" + echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" - name: Create Aurora Cluster timeout-minutes: 125 From 9e39163749ede5d0a62d8b28b9dafa3aca0a072e Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:02:34 +0200 Subject: [PATCH 30/51] fix values --- .github/workflows/test-gha-eks.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 7ab8c490..90ec1d52 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -129,13 +129,13 @@ jobs: - name: Prepare Aurora Cluster id: prepare_aurora_cluster run: | - vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.vpc_id') + vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.vpc_id.value') echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" - private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_subnet_ids') + private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_subnet_ids.value') echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPUT" - private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -r '.private_vpc_cidr_blocks') + private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_vpc_cidr_blocks.value') echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" - name: Create Aurora Cluster From 3318c31efd475355f548142bdbc172c6c55f39db Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:35:28 +0200 Subject: [PATCH 31/51] fix types --- .github/workflows/test-gha-eks.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 90ec1d52..553750fe 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -13,12 +13,10 @@ on: type: string create_db: description: "Should the aurora db be created" - type: boolean - default: true + default: "true" delete_cluster: description: "Whether to delete the cluster." - type: boolean - default: true + default: "true" db_username: description: "Database username." required: false @@ -156,9 +154,9 @@ jobs: subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} - - name: Delete Cluster + - name: Delete Clusters timeout-minutes: 125 - if: always() && !(github.event_name == 'workflow_dispatch' && github.event.inputs.delete_cluster == 'false') + if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') uses: ./.github/actions/eks-cleanup-resources with: tf-bucket: ${{ env.TF_STATE_BUCKET }} From 32906875a481d940585fc8b5e223b381de0e9d75 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 15:09:13 +0200 Subject: [PATCH 32/51] fix inputs --- .github/workflows/test-gha-eks.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 553750fe..cb4a854c 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -53,6 +53,8 @@ env: TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" TF_STATE_BUCKET_REGION: "eu-central-1" + CREATE_DB: "${{ github.event.inputs.create_db || 'true' }}" + jobs: action-test: runs-on: ubuntu-latest @@ -69,8 +71,8 @@ jobs: - name: Get Cluster Info id: commit_info run: | - if [[ -n "${{ github.event.inputs.cluster_name }}" ]]; then - cluster_name="${{ github.event.inputs.cluster_name }}" + if [[ -n "${{ inputs.cluster_name }}" ]]; then + cluster_name="${{ inputs.cluster_name }}" else cluster_name="cl-$(git rev-parse --short HEAD)" fi @@ -140,7 +142,7 @@ jobs: timeout-minutes: 125 uses: ./.github/actions/aurora-manage-cluster id: create_aurora_cluster - if: inputs.create_db == 'true' + if: env.CREATE_DB == 'true' with: cluster-name: ${{ steps.commit_info.outputs.cluster_name }} username: ${{ steps.commit_info.outputs.db_username }} From 2182d64fe193a7b361147a8151cf2c451a476dbf Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 16:29:09 +0200 Subject: [PATCH 33/51] fix availability zones --- .../actions/aurora-manage-cluster/action.yml | 42 +++++++++++++++++++ .github/workflows/test-gha-eks.yml | 5 +++ 2 files changed, 47 insertions(+) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 65278216..b9806351 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -8,37 +8,71 @@ inputs: aws-region: description: 'AWS region where the RDS Aurora cluster will be deployed' required: true + cluster-name: description: 'Name of the RDS Aurora cluster to deploy' required: true + engine-version: description: 'Version of the Aurora engine to use' required: true # TODO: renovate default: "15.4" + instance-class: description: 'Instance class for the Aurora cluster' required: true default: "db.t3.medium" + num-instances: description: 'Number of instances in the Aurora cluster' required: true default: "1" + username: description: 'Username for the PostgreSQL admin user' required: true + password: description: 'Password for the PostgreSQL admin user' required: true + vpc-id: description: 'VPC ID to create the cluster in' required: true + subnet-ids: description: 'List of subnet IDs to create the cluster in' required: true + cidr-blocks: description: 'CIDR blocks to allow access from and to' required: true + + auto-minor-version-upgrade: + description: 'If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window' + default: "true" # Default value from variable.tf + + availability-zones: + description: 'Array of availability zones to use for the Aurora cluster' + default: '' + + iam-roles: + description: 'Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3' + default: "[]" # Default value from variable.tf + + iam-auth-enabled: + description: 'Determines whether IAM auth should be activated for IRSA usage' + default: "false" # Default value from variable.tf + + ca-cert-identifier: + description: 'Specifies the identifier of the CA certificate for the DB instance' + default: 'rds-ca-rsa2048-g1' # Default value from variable.tf + + default-database-name: + description: 'The name for the automatically created database on cluster creation.' + default: 'camunda' # Default value from variable.tf + s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true @@ -48,6 +82,7 @@ inputs: description: 'Git revision of the tf modules to use' default: 'main' required: true + tf-modules-path: description: 'Path where the tf Aurora modules will be cloned' default: './.action-tf-modules/aurora/' @@ -143,7 +178,14 @@ runs: -var "instance_class=${{ inputs.instance-class }}" \ -var "num_instances=${{ inputs.num-instances }}" \ -var "username=${{ inputs.username }}" \ + -var "region=${{ inputs.aws-region }}" \ -var "password=${{ inputs.password }}" \ + -var "auto_minor_version_upgrade=${{ inputs.auto-minor-version-upgrade }}" \ + -var 'availability_zones=${{ inputs.availability-zones }}' \ + -var 'iam_roles=${{ inputs.iam-roles }}' \ + -var "iam_auth_enabled=${{ inputs.iam-auth-enabled }}" \ + -var "ca_cert_identifier=${{ inputs.ca-cert-identifier }}" \ + -var "default_database_name=${{ inputs.default-database-name }}" \ -var "vpc_id=${{ inputs.vpc-id }}" \ -var 'subnet_ids=${{ inputs.subnet-ids }}' \ -var 'cidr_blocks=${{ inputs.cidr-blocks }}' diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index cb4a854c..141217e5 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -138,6 +138,9 @@ jobs: private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_vpc_cidr_blocks.value') echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" + availability_zones=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query 'Subnets[].AvailabilityZone' --output json | jq '.' -c) + echo "availability_zones=$availability_zones" | tee -a "$GITHUB_OUTPUT" + - name: Create Aurora Cluster timeout-minutes: 125 uses: ./.github/actions/aurora-manage-cluster @@ -156,6 +159,8 @@ jobs: subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} + availability-zones: ${{ steps.prepare_aurora_cluster.outputs.availability_zones }} + - name: Delete Clusters timeout-minutes: 125 if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') From 6e9f87c1179420ddad46ff4ff7e39331c0757f99 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:06:45 +0200 Subject: [PATCH 34/51] update action --- .../actions/aurora-manage-cluster/README.md | 26 ++++++++++++++----- .../actions/aurora-manage-cluster/action.yml | 8 +----- .github/workflows/test-gha-eks.yml | 4 +-- 3 files changed, 22 insertions(+), 16 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index cf0da812..f65b539b 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -13,20 +13,25 @@ The **Deploy RDS Aurora Cluster** action enables you to: ## Inputs -The following inputs are required for the action: +The following inputs are required or optional for the action: | Input | Description | Required | Default | |-------|-------------|----------|---------| -| `aws-region` | AWS region where the RDS Aurora cluster will be deployed. | Yes | - | | `cluster-name` | Name of the RDS Aurora cluster to deploy. | Yes | - | | `engine-version` | Version of the Aurora engine to use. | Yes | see `action.yml` | | `instance-class` | Instance class for the Aurora cluster. | Yes | `db.t3.medium` | | `num-instances` | Number of instances in the Aurora cluster. | Yes | `1` | | `username` | Username for the PostgreSQL admin user. | Yes | - | | `password` | Password for the PostgreSQL admin user. | Yes | - | -| `vpc-id` | VPC ID to create the cluster in. | No | - | -| `subnet-ids` | List of subnet IDs to create the cluster in. | No | `[]` | -| `cidr-blocks` | CIDR blocks to allow access from and to. | No | `[]` | +| `vpc-id` | VPC ID to create the cluster in. | Yes | - | +| `subnet-ids` | List of subnet IDs to create the cluster in. | Yes | - | +| `cidr-blocks` | CIDR blocks to allow access from and to. | Yes | - | +| `auto-minor-version-upgrade` | If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window. | No | `true` | +| `availability-zones` | Array of availability zones to use for the Aurora cluster. | No | `[]` | +| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `[]` | +| `iam-auth-enabled` | Determines whether IAM authentication should be activated for IRSA usage. | No | `false` | +| `ca-cert-identifier` | Specifies the identifier of the CA certificate for the DB instance. | No | `rds-ca-rsa2048-g1` | +| `default-database-name` | The name for the automatically created database on cluster creation. | No | `camunda` | | `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | | `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | | `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | @@ -59,7 +64,6 @@ jobs: - name: Deploy Aurora Cluster uses: camunda/camunda-tf-eks-module/aurora-manage-cluster@main with: - aws-region: 'us-west-2' cluster-name: 'my-aurora-cluster' engine-version: '15.4' instance-class: 'db.t3.medium' @@ -69,7 +73,15 @@ jobs: vpc-id: 'vpc-12345678' subnet-ids: 'subnet-12345,subnet-67890' cidr-blocks: '10.0.0.0/16' - tags: '{"env": "prod", "team": "devops"}' + auto-minor-version-upgrade: 'true' + availability-zones: '["us-west-2a", "us-west-2b"]' + iam-roles: '["arn:aws:iam::123456789012:role/my-role"]' + iam-auth-enabled: 'false' + ca-cert-identifier: 'rds-ca-rsa2048-g1' + default-database-name: 'mydatabase' s3-backend-bucket: 'my-terraform-state-bucket' s3-bucket-region: 'us-west-2' + tf-modules-revision: 'main' + tf-modules-path: './.action-tf-modules/aurora/' + awscli-version: '2.15.52' ``` diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index b9806351..2cce8516 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -5,10 +5,6 @@ description: | This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. inputs: - aws-region: - description: 'AWS region where the RDS Aurora cluster will be deployed' - required: true - cluster-name: description: 'Name of the RDS Aurora cluster to deploy' required: true @@ -77,7 +73,7 @@ inputs: description: 'Name of the S3 bucket to store Terraform state' required: true s3-bucket-region: - description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + description: 'Region of the bucket containing the resources states' tf-modules-revision: description: 'Git revision of the tf modules to use' default: 'main' @@ -139,7 +135,6 @@ runs: awscli-version: '${{ inputs.awscli-version }}' terraform-version: '${{ inputs.terraform-version }}' - aws-region: '${{ inputs.aws-region }}' s3-backend-bucket: '${{ inputs.s3-backend-bucket }}' s3-bucket-region: '${{ inputs.s3-bucket-region }}' @@ -178,7 +173,6 @@ runs: -var "instance_class=${{ inputs.instance-class }}" \ -var "num_instances=${{ inputs.num-instances }}" \ -var "username=${{ inputs.username }}" \ - -var "region=${{ inputs.aws-region }}" \ -var "password=${{ inputs.password }}" \ -var "auto_minor_version_upgrade=${{ inputs.auto-minor-version-upgrade }}" \ -var 'availability_zones=${{ inputs.availability-zones }}' \ diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 141217e5..b2721732 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -138,7 +138,7 @@ jobs: private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_vpc_cidr_blocks.value') echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" - availability_zones=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query 'Subnets[].AvailabilityZone' --output json | jq '.' -c) + availability_zones=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query 'Subnets[].AvailabilityZone' --output json | jq 'unique' -c) echo "availability_zones=$availability_zones" | tee -a "$GITHUB_OUTPUT" - name: Create Aurora Cluster @@ -150,7 +150,7 @@ jobs: cluster-name: ${{ steps.commit_info.outputs.cluster_name }} username: ${{ steps.commit_info.outputs.db_username }} password: ${{ steps.commit_info.outputs.db_password }} - aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} From f80203e1da5efdc879b1c3b23654d80b0fd475f9 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:34:23 +0200 Subject: [PATCH 35/51] fix iam roles --- .github/actions/aurora-manage-cluster/README.md | 2 +- .github/actions/aurora-manage-cluster/action.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index f65b539b..b763c6b6 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -28,7 +28,7 @@ The following inputs are required or optional for the action: | `cidr-blocks` | CIDR blocks to allow access from and to. | Yes | - | | `auto-minor-version-upgrade` | If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window. | No | `true` | | `availability-zones` | Array of availability zones to use for the Aurora cluster. | No | `[]` | -| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `[]` | +| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `` | | `iam-auth-enabled` | Determines whether IAM authentication should be activated for IRSA usage. | No | `false` | | `ca-cert-identifier` | Specifies the identifier of the CA certificate for the DB instance. | No | `rds-ca-rsa2048-g1` | | `default-database-name` | The name for the automatically created database on cluster creation. | No | `camunda` | diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 2cce8516..ede57a09 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -55,7 +55,7 @@ inputs: iam-roles: description: 'Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3' - default: "[]" # Default value from variable.tf + default: "" # Default value from variable.tf iam-auth-enabled: description: 'Determines whether IAM auth should be activated for IRSA usage' From 177760ab2fe6b06a9544be50b46609db48aebed3 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 18:08:40 +0200 Subject: [PATCH 36/51] enforce list of strings --- .github/actions/aurora-manage-cluster/README.md | 2 +- .github/actions/aurora-manage-cluster/action.yml | 2 +- modules/aurora/README.md | 2 +- modules/aurora/variables.tf | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index b763c6b6..f65b539b 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -28,7 +28,7 @@ The following inputs are required or optional for the action: | `cidr-blocks` | CIDR blocks to allow access from and to. | Yes | - | | `auto-minor-version-upgrade` | If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window. | No | `true` | | `availability-zones` | Array of availability zones to use for the Aurora cluster. | No | `[]` | -| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `` | +| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `[]` | | `iam-auth-enabled` | Determines whether IAM authentication should be activated for IRSA usage. | No | `false` | | `ca-cert-identifier` | Specifies the identifier of the CA certificate for the DB instance. | No | `rds-ca-rsa2048-g1` | | `default-database-name` | The name for the automatically created database on cluster creation. | No | `camunda` | diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index ede57a09..2cce8516 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -55,7 +55,7 @@ inputs: iam-roles: description: 'Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3' - default: "" # Default value from variable.tf + default: "[]" # Default value from variable.tf iam-auth-enabled: description: 'Determines whether IAM auth should be activated for IRSA usage' diff --git a/modules/aurora/README.md b/modules/aurora/README.md index 39e8c7f2..26c72c90 100644 --- a/modules/aurora/README.md +++ b/modules/aurora/README.md @@ -55,7 +55,7 @@ No modules. | [engine](#input\_engine) | The engine type e.g. aurora, aurora-mysql, aurora-postgresql, ... | `string` | `"aurora-postgresql"` | no | | [engine\_version](#input\_engine\_version) | The DB engine version for Postgres to use. | `string` | `"15.4"` | no | | [iam\_auth\_enabled](#input\_iam\_auth\_enabled) | Determines whether IAM auth should be activated for IRSA usage | `bool` | `false` | no | -| [iam\_roles](#input\_iam\_roles) | Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3 | `list` | `[]` | no | +| [iam\_roles](#input\_iam\_roles) | Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3 | `list(string)` | `[]` | no | | [instance\_class](#input\_instance\_class) | The instance type of the Aurora instances | `string` | `"db.t3.medium"` | no | | [num\_instances](#input\_num\_instances) | Number of instances | `string` | `"1"` | no | | [password](#input\_password) | The password for the postgres admin user. Important: secret value! | `string` | n/a | yes | diff --git a/modules/aurora/variables.tf b/modules/aurora/variables.tf index f208f256..ebdbb766 100644 --- a/modules/aurora/variables.tf +++ b/modules/aurora/variables.tf @@ -69,6 +69,7 @@ variable "vpc_id" { # Allows adding additional iam roles to grant access from Aurora to e.g. S3 variable "iam_roles" { + type = list(string) default = [] description = "Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3" } From 214d0f32104f99796e3fafa333ed4e0eeeaa3262 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Fri, 13 Sep 2024 19:10:00 +0200 Subject: [PATCH 37/51] pin versions and cleanup --- .../actions/aurora-manage-cluster/action.yml | 2 +- .github/workflows/daily-cleanup.yml | 2 +- .github/workflows/labeler.yml | 2 +- .github/workflows/links.yml | 2 +- .github/workflows/test-gha-eks.yml | 9 ++++---- .github/workflows/tests.yml | 22 +++++++++---------- modules/aurora/variables.tf | 3 ++- 7 files changed, 22 insertions(+), 20 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 2cce8516..9eed5be9 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -12,7 +12,7 @@ inputs: engine-version: description: 'Version of the Aurora engine to use' required: true - # TODO: renovate + # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver default: "15.4" instance-class: diff --git a/.github/workflows/daily-cleanup.yml b/.github/workflows/daily-cleanup.yml index 726ef4b3..280d8773 100644 --- a/.github/workflows/daily-cleanup.yml +++ b/.github/workflows/daily-cleanup.yml @@ -76,7 +76,7 @@ jobs: - name: Notify in Slack in case of failure id: slack-notification if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@5b7cb35357c6cbfd7ebe0d9f45b0bcd45632676b # main + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main with: vault_addr: ${{ secrets.VAULT_ADDR }} vault_role_id: ${{ secrets.VAULT_ROLE_ID }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index b911aa4a..0af34642 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -19,7 +19,7 @@ jobs: - name: Notify in Slack in case of failure id: slack-notification if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@5b7cb35357c6cbfd7ebe0d9f45b0bcd45632676b # main + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main with: vault_addr: ${{ secrets.VAULT_ADDR }} vault_role_id: ${{ secrets.VAULT_ROLE_ID }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index e861d61b..f9617703 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -45,7 +45,7 @@ jobs: - name: Notify in Slack in case of failure id: slack-notification if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@5b7cb35357c6cbfd7ebe0d9f45b0bcd45632676b # main + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main with: vault_addr: ${{ secrets.VAULT_ADDR }} vault_role_id: ${{ secrets.VAULT_ROLE_ID }} diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index b2721732..1026fa41 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -47,7 +47,8 @@ concurrency: env: AWS_PROFILE: "infex" - AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config + AWS_REGION: "eu-west-2" + # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config # please keep those synced with tests.yml TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" @@ -60,7 +61,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 with: ref: ${{ github.head_ref }} fetch-depth: 0 @@ -98,7 +99,7 @@ jobs: - name: Import Secrets id: secrets - uses: hashicorp/vault-action@v3 + uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 with: url: ${{ secrets.VAULT_ADDR }} method: approle @@ -174,7 +175,7 @@ jobs: - name: Notify in Slack in case of failure id: slack-notification if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@main + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main with: vault_addr: ${{ secrets.VAULT_ADDR }} vault_role_id: ${{ secrets.VAULT_ROLE_ID }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 53790fb6..8bc6fda3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -5,16 +5,16 @@ on: schedule: - cron: '0 1 * * 2' workflow_dispatch: - # pull_request: TODO: revert - # # the paths should be synced with ../labeler.yml - # paths: - # - test/**.go - # - test/**/go.mod - # - modules/fixtures/** - # - modules/**.tf - # - .tool-versions - # - .github/workflows/tests.yml - # - justfile + pull_request: + # the paths should be synced with ../labeler.yml + paths: + - test/**.go + - test/**/go.mod + - modules/fixtures/** + - modules/**.tf + - .tool-versions + - .github/workflows/tests.yml + - justfile # limit to a single execution per ref of this workflow concurrency: @@ -242,7 +242,7 @@ jobs: - name: Notify in Slack in case of failure id: slack-notification if: github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@5b7cb35357c6cbfd7ebe0d9f45b0bcd45632676b # main + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main with: vault_addr: ${{ secrets.VAULT_ADDR }} vault_role_id: ${{ secrets.VAULT_ROLE_ID }} diff --git a/modules/aurora/variables.tf b/modules/aurora/variables.tf index ebdbb766..565b7a5f 100644 --- a/modules/aurora/variables.tf +++ b/modules/aurora/variables.tf @@ -9,7 +9,8 @@ variable "engine" { } variable "engine_version" { - type = string + type = string + # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver default = "15.4" description = "The DB engine version for Postgres to use." } From bfff6a30de7305c11c2785264de6ca81d5e66df3 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 09:57:18 +0200 Subject: [PATCH 38/51] fix params --- .github/workflows/test-gha-eks.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 1026fa41..c74c58e5 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -82,14 +82,14 @@ jobs: if [[ -n "${{ inputs.db_username }}" ]]; then db_username="${{ inputs.db_username }}" else - db_username="user$(openssl rand -hex 4)" + db_username="user$(openssl rand -hex 4 | tr -d '/@" ')" fi echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" if [[ -n "${{ inputs.db_password }}" ]]; then db_password="${{ inputs.db_password }}" else - db_password="$(openssl rand -base64 12)" + db_password="$(openssl rand -base64 12 | tr -d '/@" ')" fi echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" From 5f40c909919a2a3a94597f4131b990a95a376f1f Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:15:31 +0200 Subject: [PATCH 39/51] realign doc of gha and params --- .../actions/aurora-manage-cluster/README.md | 16 ++++---- .../actions/aurora-manage-cluster/action.yml | 18 ++++----- .../actions/eks-cleanup-resources/README.md | 4 +- .../actions/eks-cleanup-resources/action.yml | 8 ++-- .github/actions/eks-manage-cluster/README.md | 38 +++++++++---------- .github/actions/eks-manage-cluster/action.yml | 38 ++++++++++--------- .github/actions/utility-action/action.yml | 13 ++++++- 7 files changed, 74 insertions(+), 61 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index f65b539b..96d5e578 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -18,29 +18,29 @@ The following inputs are required or optional for the action: | Input | Description | Required | Default | |-------|-------------|----------|---------| | `cluster-name` | Name of the RDS Aurora cluster to deploy. | Yes | - | -| `engine-version` | Version of the Aurora engine to use. | Yes | see `action.yml` | -| `instance-class` | Instance class for the Aurora cluster. | Yes | `db.t3.medium` | -| `num-instances` | Number of instances in the Aurora cluster. | Yes | `1` | +| `engine-version` | Version of the Aurora engine to use. | No | see `action.yml` | +| `instance-class` | Instance class for the Aurora cluster. | No | `db.t3.medium` | +| `num-instances` | Number of instances in the Aurora cluster. | No | `1` | | `username` | Username for the PostgreSQL admin user. | Yes | - | | `password` | Password for the PostgreSQL admin user. | Yes | - | | `vpc-id` | VPC ID to create the cluster in. | Yes | - | | `subnet-ids` | List of subnet IDs to create the cluster in. | Yes | - | | `cidr-blocks` | CIDR blocks to allow access from and to. | Yes | - | | `auto-minor-version-upgrade` | If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window. | No | `true` | -| `availability-zones` | Array of availability zones to use for the Aurora cluster. | No | `[]` | +| `availability-zones` | Array of availability zones to use for the Aurora cluster. | Yes | - | | `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `[]` | | `iam-auth-enabled` | Determines whether IAM authentication should be activated for IRSA usage. | No | `false` | | `ca-cert-identifier` | Specifies the identifier of the CA certificate for the DB instance. | No | `rds-ca-rsa2048-g1` | | `default-database-name` | The name for the automatically created database on cluster creation. | No | `camunda` | | `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | -| `s3-bucket-region` | Region of the bucket containing the resources states. Fallbacks to `aws-region` if not set. | No | - | -| `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | -| `tf-modules-path` | Path where the Terraform Aurora modules will be cloned. | Yes | `./.action-tf-modules/aurora/` | +| `s3-bucket-region` | Region of the bucket containing the resources states. | No | Fallbacks to `aws-region` if not set. | +| `tf-modules-revision` | Git revision of the Terraform modules to use. | No | `main` | +| `tf-modules-path` | Path where the Terraform Aurora modules will be cloned. | No | `./.action-tf-modules/aurora/` | | `tf-cli-config-credentials-hostname` | The hostname of a HCP Terraform/Terraform Enterprise instance for the CLI configuration file. | No | `app.terraform.io` | | `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | - | | `tf-terraform-version` | The version of Terraform CLI to install. | No | `latest` | | `tf-terraform-wrapper` | Whether to install a wrapper for the Terraform binary. | No | `true` | -| `awscli-version` | Version of the AWS CLI to use. | Yes | see `action.yml` | +| `awscli-version` | Version of the AWS CLI to use. | No | see `action.yml` | ## Outputs diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 9eed5be9..2af2c3df 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -11,18 +11,15 @@ inputs: engine-version: description: 'Version of the Aurora engine to use' - required: true # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver default: "15.4" instance-class: description: 'Instance class for the Aurora cluster' - required: true default: "db.t3.medium" num-instances: description: 'Number of instances in the Aurora cluster' - required: true default: "1" username: @@ -51,7 +48,7 @@ inputs: availability-zones: description: 'Array of availability zones to use for the Aurora cluster' - default: '' + required: true iam-roles: description: 'Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3' @@ -72,37 +69,38 @@ inputs: s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true + s3-bucket-region: description: 'Region of the bucket containing the resources states' + required: false + tf-modules-revision: description: 'Git revision of the tf modules to use' default: 'main' - required: true tf-modules-path: description: 'Path where the tf Aurora modules will be cloned' default: './.action-tf-modules/aurora/' - required: true # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml tf-cli-config-credentials-hostname: description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' default: 'app.terraform.io' - required: false + tf-cli-config-credentials-token: description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' required: false + tf-terraform-version: description: 'The version of Terraform CLI to install. Defaults to `latest`.' default: 'latest' - required: false + tf-terraform-wrapper: description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' default: 'true' - required: false + awscli-version: description: 'Version of the aws cli to use' - required: true # renovate: datasource=github-releases depName=aws/aws-cli default: "2.15.52" diff --git a/.github/actions/eks-cleanup-resources/README.md b/.github/actions/eks-cleanup-resources/README.md index 3c6535e2..da90c6e0 100644 --- a/.github/actions/eks-cleanup-resources/README.md +++ b/.github/actions/eks-cleanup-resources/README.md @@ -32,8 +32,8 @@ The action supports the following input parameters: | Input Name | Description | Required | Default | |--------------------|-------------------------------------------------------------------------------------------|----------|----------------------------| -| `tf-bucket` | The S3 bucket containing the resources' state files. | Yes | N/A | +| `tf-bucket` | The S3 bucket containing the resources' state files. | Yes | - | | `tf-bucket-region` | The region of the S3 bucket containing the resources state files. Falls back to `AWS_REGION` if not set. | No | AWS_REGION | | `max-age-hours` | The maximum age (in hours) for resources to be deleted. | No | "20" | -| `target` | Specifies an ID to destroy specific resources or "all" to destroy all resources. | Yes | "all" | +| `target` | Specifies an ID to destroy specific resources or "all" to destroy all resources. | No | "all" | | `temp-dir` | Temporary directory prefix used for storing resource data during processing. | No | "./tmp/eks-cleanup/" | diff --git a/.github/actions/eks-cleanup-resources/action.yml b/.github/actions/eks-cleanup-resources/action.yml index 28de6940..45f5b696 100644 --- a/.github/actions/eks-cleanup-resources/action.yml +++ b/.github/actions/eks-cleanup-resources/action.yml @@ -7,19 +7,21 @@ inputs: tf-bucket: description: 'Bucket containing the resources states' required: true + tf-bucket-region: description: 'Region of the bucket containing the resources states, if not set, will fallback on AWS_REGION' + required: false + max-age-hours: description: 'Maximum age of resources in hours' - required: false default: "20" + target: description: 'Specify an ID to destroy specific resources or "all" to destroy all resources' - required: true default: "all" + temp-dir: description: 'Temporary directory prefix used for storing resource data during processing' - required: false default: "./tmp/eks-cleanup/" runs: diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md index b18ca102..73e1d699 100644 --- a/.github/actions/eks-manage-cluster/README.md +++ b/.github/actions/eks-manage-cluster/README.md @@ -44,28 +44,28 @@ jobs: | Input Name | Description | Required | Default | |-------------------------------------|--------------------------------------------------------------------------------------------------------------|----------|----------------------------------| -| `aws-region` | AWS region where the EKS cluster will be deployed. | Yes | N/A | -| `cluster-name` | Name of the EKS cluster to deploy. | Yes | N/A | -| `kubernetes-version` | Version of Kubernetes to use for the EKS cluster. | Yes | `1.30` | -| `cluster-service-ipv4-cidr` | CIDR block for cluster service IPs. | Yes | `10.190.0.0/16` | -| `cluster-node-ipv4-cidr` | CIDR block for cluster node IPs. | Yes | `10.192.0.0/16` | -| `np-instance-types` | List of instance types for the node pool. | Yes | `["t2.medium"]` | -| `np-capacity-type` | Capacity type for non-production instances (e.g., SPOT). | Yes | `SPOT` | -| `np-node-desired-count` | Desired number of nodes in the EKS node group. | Yes | `4` | -| `np-node-min-count` | Minimum number of nodes in the EKS node group. | Yes | `1` | -| `np-disk-size` | Disk size of the nodes on the default node pool (in GB). | Yes | `20` | -| `np-ami-type` | Amazon Machine Image type. | Yes | `AL2_x86_64` | -| `np-node-max-count` | Maximum number of nodes in the EKS node group. | Yes | `10` | -| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | N/A | -| `s3-bucket-region` | Region of the bucket containing the resources states; falls back on `aws-region` if not set. | No | N/A | -| `tf-modules-revision` | Git revision of the Terraform modules to use. | Yes | `main` | -| `tf-modules-path` | Path where the Terraform EKS modules will be cloned. | Yes | `./.action-tf-modules/eks/` | -| `login` | Authenticate the current kube context on the created cluster. | Yes | `true` | +| `aws-region` | AWS region where the EKS cluster will be deployed. | Yes | - | +| `cluster-name` | Name of the EKS cluster to deploy. | Yes | - | +| `kubernetes-version` | Version of Kubernetes to use for the EKS cluster. | No | `1.30` | +| `cluster-service-ipv4-cidr` | CIDR block for cluster service IPs. | No | `10.190.0.0/16` | +| `cluster-node-ipv4-cidr` | CIDR block for cluster node IPs. | No | `10.192.0.0/16` | +| `np-instance-types` | List of instance types for the node pool. | No | `["t2.medium"]` | +| `np-capacity-type` | Capacity type for non-production instances (e.g., SPOT). | No | `SPOT` | +| `np-node-desired-count` | Desired number of nodes in the EKS node group. | No | `4` | +| `np-node-min-count` | Minimum number of nodes in the EKS node group. | No | `1` | +| `np-disk-size` | Disk size of the nodes on the default node pool (in GB). | No | `20` | +| `np-ami-type` | Amazon Machine Image type. | No | `AL2_x86_64` | +| `np-node-max-count` | Maximum number of nodes in the EKS node group. | No | `10` | +| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | No | - | +| `s3-bucket-region` | Region of the bucket containing the resources states; falls back on `aws-region` if not set. | No | - | +| `tf-modules-revision` | Git revision of the Terraform modules to use. | No | `main` | +| `tf-modules-path` | Path where the Terraform EKS modules will be cloned. | No | `./.action-tf-modules/eks/` | +| `login` | Authenticate the current kube context on the created cluster. | No | `true` | | `tf-cli-config-credentials-hostname`| The hostname of a HCP Terraform/Terraform Enterprise instance to use for credentials configuration. | No | `app.terraform.io` | -| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | N/A | +| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | - | | `tf-terraform-version` | The version of Terraform CLI to install. Accepts full version or constraints like `<1.13.0` or `latest`. | No | `latest` | | `tf-terraform-wrapper` | Whether or not to install a wrapper for Terraform CLI calls. | No | `true` | -| `awscli-version` | Version of the AWS CLI to install. | Yes | see `action.yml` | +| `awscli-version` | Version of the AWS CLI to install. | No | see `action.yml` | ## Outputs diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 752ddf50..e779545b 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -8,87 +8,91 @@ inputs: aws-region: description: 'AWS region where the EKS cluster will be deployed' required: true + cluster-name: description: 'Name of the EKS cluster to deploy' required: true + kubernetes-version: description: 'Version of Kubernetes to use for the EKS cluster' - required: true # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver default: "1.30" + cluster-service-ipv4-cidr: description: 'CIDR block for cluster service IPs' - required: true default: '10.190.0.0/16' + cluster-node-ipv4-cidr: description: 'CIDR block for cluster node IPs' - required: true default: '10.192.0.0/16' + np-instance-types: description: 'List of instance types' - required: true default: '["t2.medium"]' + np-capacity-type: description: 'Capacity type for non-production instances (e.g., SPOT)' - required: true default: 'SPOT' + np-node-desired-count: description: 'Desired number of nodes in the EKS node group' - required: true default: "4" + np-node-min-count: description: 'Minimum number of nodes in the EKS node group' - required: true default: "1" + np-disk-size: description: 'Disk size of the nodes on the default node pool' - required: true default: "20" + np-ami-type: description: 'Amazon Machine Image' - required: true default: "AL2_x86_64" + np-node-max-count: description: 'Maximum number of nodes in the EKS node group' - required: true default: "10" + s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true + s3-bucket-region: description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + required: false + tf-modules-revision: description: 'Git revision of the tf modules to use' default: 'main' - required: true + tf-modules-path: description: 'Path where the tf EKS modules will be cloned' default: './.action-tf-modules/eks/' - required: true + login: description: 'Authenticate the current kube context on the created cluster' default: "true" - required: true # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml tf-cli-config-credentials-hostname: description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' default: 'app.terraform.io' - required: false + tf-cli-config-credentials-token: description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' required: false + tf-terraform-version: description: 'The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for example `<1.13.0`) to install the latest version satisfying the constraint. A value of `latest` will install the latest version of Terraform CLI. Defaults to `latest`.' default: 'latest' - required: false + tf-terraform-wrapper: description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' default: 'true' - required: false + awscli-version: description: 'Version of the aws cli to use' - required: true # renovate: datasource=github-releases depName=aws/aws-cli default: "2.15.52" diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index 470e1c4c..4211fac1 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -10,32 +10,41 @@ description: | inputs: awscli-version: description: 'Version of the AWS CLI to install' - required: true # renovate: datasource=github-releases depName=aws/aws-cli default: '2.15.52' + terraform-version: description: 'Version of Terraform to install' - required: true default: 'latest' + s3-backend-bucket: description: 'Name of the S3 bucket to store Terraform state' required: true + s3-bucket-region: description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' + required: false + aws-region: description: 'AWS region to use for S3 bucket operations' required: true + name: description: 'Name of resource instance (e.g., uid)' required: true + module-name: description: 'Name of the Terraform module (e.g., eks-cluster, aurora)' required: true + tf-cli-config-credentials-hostname: description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' default: 'app.terraform.io' + tf-cli-config-credentials-token: description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' + required: false + tf-terraform-wrapper: description: 'Whether or not to install a wrapper for Terraform CLI' default: 'true' From 633c098927b2cb9854e90ff1096632589c892f40 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:23:16 +0200 Subject: [PATCH 40/51] reformat yaml using yamlftm --- .../actions/aurora-manage-cluster/action.yml | 382 +++++++------- .../actions/eks-cleanup-resources/action.yml | 55 +- .github/actions/eks-manage-cluster/action.yml | 394 +++++++-------- .github/actions/utility-action/action.yml | 209 ++++---- .github/labeler.yml | 21 +- .github/workflows/daily-cleanup.yml | 132 ++--- .github/workflows/labeler.yml | 45 +- .github/workflows/links.yml | 85 ++-- .github/workflows/lint.yml | 20 +- .github/workflows/test-gha-eks.yml | 331 ++++++------ .github/workflows/tests.yml | 470 +++++++++--------- .lint/terraform_docs/.terraform-docs.yml | 54 +- .pre-commit-config.yaml | 98 ++-- modules/fixtures/postgres-client-irsa.yml | 139 +++--- modules/fixtures/postgres-client.yml | 131 ++--- modules/fixtures/whoami-deployment.yml | 61 +-- 16 files changed, 1325 insertions(+), 1302 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 2af2c3df..9343e94f 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -1,200 +1,204 @@ +--- name: Deploy RDS Aurora Cluster description: | - This GitHub Action automates the deployment of an RDS Aurora cluster using Terraform. - This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. + This GitHub Action automates the deployment of an RDS Aurora cluster using Terraform. + This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. inputs: - cluster-name: - description: 'Name of the RDS Aurora cluster to deploy' - required: true - - engine-version: - description: 'Version of the Aurora engine to use' - # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver - default: "15.4" - - instance-class: - description: 'Instance class for the Aurora cluster' - default: "db.t3.medium" - - num-instances: - description: 'Number of instances in the Aurora cluster' - default: "1" - - username: - description: 'Username for the PostgreSQL admin user' - required: true - - password: - description: 'Password for the PostgreSQL admin user' - required: true - - vpc-id: - description: 'VPC ID to create the cluster in' - required: true - - subnet-ids: - description: 'List of subnet IDs to create the cluster in' - required: true - - cidr-blocks: - description: 'CIDR blocks to allow access from and to' - required: true - - auto-minor-version-upgrade: - description: 'If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window' - default: "true" # Default value from variable.tf - - availability-zones: - description: 'Array of availability zones to use for the Aurora cluster' - required: true - - iam-roles: - description: 'Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3' - default: "[]" # Default value from variable.tf - - iam-auth-enabled: - description: 'Determines whether IAM auth should be activated for IRSA usage' - default: "false" # Default value from variable.tf - - ca-cert-identifier: - description: 'Specifies the identifier of the CA certificate for the DB instance' - default: 'rds-ca-rsa2048-g1' # Default value from variable.tf - - default-database-name: - description: 'The name for the automatically created database on cluster creation.' - default: 'camunda' # Default value from variable.tf - - s3-backend-bucket: - description: 'Name of the S3 bucket to store Terraform state' - required: true - - s3-bucket-region: - description: 'Region of the bucket containing the resources states' - required: false - - tf-modules-revision: - description: 'Git revision of the tf modules to use' - default: 'main' - - tf-modules-path: - description: 'Path where the tf Aurora modules will be cloned' - default: './.action-tf-modules/aurora/' - - # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml - tf-cli-config-credentials-hostname: - description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' - default: 'app.terraform.io' - - tf-cli-config-credentials-token: - description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' - required: false - - tf-terraform-version: - description: 'The version of Terraform CLI to install. Defaults to `latest`.' - default: 'latest' - - tf-terraform-wrapper: - description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' - default: 'true' - - awscli-version: - description: 'Version of the aws cli to use' - # renovate: datasource=github-releases depName=aws/aws-cli - default: "2.15.52" + cluster-name: + description: Name of the RDS Aurora cluster to deploy + required: true + + engine-version: + description: Version of the Aurora engine to use + # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver + default: '15.4' + + instance-class: + description: Instance class for the Aurora cluster + default: db.t3.medium + + num-instances: + description: Number of instances in the Aurora cluster + default: '1' + + username: + description: Username for the PostgreSQL admin user + required: true + + password: + description: Password for the PostgreSQL admin user + required: true + + vpc-id: + description: VPC ID to create the cluster in + required: true + + subnet-ids: + description: List of subnet IDs to create the cluster in + required: true + + cidr-blocks: + description: CIDR blocks to allow access from and to + required: true + + auto-minor-version-upgrade: + description: If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window + default: 'true' # Default value from variable.tf + + availability-zones: + description: Array of availability zones to use for the Aurora cluster + required: true + + iam-roles: + description: Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3 + default: '[]' # Default value from variable.tf + + iam-auth-enabled: + description: Determines whether IAM auth should be activated for IRSA usage + default: 'false' # Default value from variable.tf + + ca-cert-identifier: + description: Specifies the identifier of the CA certificate for the DB instance + default: rds-ca-rsa2048-g1 # Default value from variable.tf + + default-database-name: + description: The name for the automatically created database on cluster creation. + default: camunda # Default value from variable.tf + + s3-backend-bucket: + description: Name of the S3 bucket to store Terraform state + required: true + + s3-bucket-region: + description: Region of the bucket containing the resources states + required: false + + tf-modules-revision: + description: Git revision of the tf modules to use + default: main + + tf-modules-path: + description: Path where the tf Aurora modules will be cloned + default: ./.action-tf-modules/aurora/ + + # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml + tf-cli-config-credentials-hostname: + description: The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file. Defaults to `app.terraform.io`. + default: app.terraform.io + + tf-cli-config-credentials-token: + description: The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file. + required: false + + tf-terraform-version: + description: The version of Terraform CLI to install. Defaults to `latest`. + default: latest + + tf-terraform-wrapper: + description: Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code + as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + default: 'true' + + awscli-version: + description: Version of the aws cli to use + # renovate: datasource=github-releases depName=aws/aws-cli + default: 2.15.52 outputs: - aurora-endpoint: - description: 'The endpoint of the deployed Aurora cluster' - value: ${{ steps.apply.outputs.aurora_endpoint }} + aurora-endpoint: + description: The endpoint of the deployed Aurora cluster + value: ${{ steps.apply.outputs.aurora_endpoint }} - terraform-state-url: - description: 'URL of the Terraform state file in the S3 bucket' - value: ${{ steps.utility.outputs.terraform-state-url }} + terraform-state-url: + description: URL of the Terraform state file in the S3 bucket + value: ${{ steps.utility.outputs.terraform-state-url }} - # Add all terraform outputs dynamically - all-terraform-outputs: - description: 'All outputs from Terraform' - value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} + # Add all terraform outputs dynamically + all-terraform-outputs: + description: All outputs from Terraform + value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} runs: - using: 'composite' - steps: - - name: Use Utility Actions - id: utility - # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet - # steps.uses cannot access the github context. - # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} - # TODO: hard pin this one once it's merged - #uses: "camunda/camunda-tf-eks-module/utility-action@main" - uses: "./.github/actions/utility-action" - with: - awscli-version: '${{ inputs.awscli-version }}' - terraform-version: '${{ inputs.terraform-version }}' - - s3-backend-bucket: '${{ inputs.s3-backend-bucket }}' - s3-bucket-region: '${{ inputs.s3-bucket-region }}' - - name: '${{ inputs.cluster-name }}' - module-name: 'aurora' - - tf-cli-config-credentials-hostname: '${{ inputs.tf-cli-config-credentials-hostname }}' - tf-cli-config-credentials-token: '${{ inputs.tf-cli-config-credentials-token }}' - tf-terraform-wrapper: '${{ inputs.tf-terraform-wrapper }}' - - - name: Checkout Repository Aurora modules - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - repository: "camunda/camunda-tf-eks-module" - ref: ${{ inputs.tf-modules-revision }} - path: ${{ inputs.tf-modules-path }} - fetch-depth: 0 - - - name: Terraform Init - shell: bash - id: init - working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" - run: | - cp ../fixtures/backend.tf ./ - terraform version - terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" - terraform validate -no-color - - - name: Terraform Plan - shell: bash - id: plan - working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" - run: | - terraform plan -no-color -out aurora.plan -var "cluster_name=${{ inputs.cluster-name }}" \ - -var "engine_version=${{ inputs.engine-version }}" \ - -var "instance_class=${{ inputs.instance-class }}" \ - -var "num_instances=${{ inputs.num-instances }}" \ - -var "username=${{ inputs.username }}" \ - -var "password=${{ inputs.password }}" \ - -var "auto_minor_version_upgrade=${{ inputs.auto-minor-version-upgrade }}" \ - -var 'availability_zones=${{ inputs.availability-zones }}' \ - -var 'iam_roles=${{ inputs.iam-roles }}' \ - -var "iam_auth_enabled=${{ inputs.iam-auth-enabled }}" \ - -var "ca_cert_identifier=${{ inputs.ca-cert-identifier }}" \ - -var "default_database_name=${{ inputs.default-database-name }}" \ - -var "vpc_id=${{ inputs.vpc-id }}" \ - -var 'subnet_ids=${{ inputs.subnet-ids }}' \ - -var 'cidr_blocks=${{ inputs.cidr-blocks }}' - - - name: Terraform Apply - shell: bash - id: apply - working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" - run: | - terraform apply -no-color aurora.plan - export aurora_endpoint="$(terraform output -raw aurora_endpoint)" - echo "aurora_endpoint=$aurora_endpoint" >> "$GITHUB_OUTPUT" - - - name: Fetch Terraform Outputs - shell: bash - id: fetch_outputs - working-directory: "${{ inputs.tf-modules-path }}/modules/aurora/" - run: | - all_outputs=$(terraform output -json | jq -c .) - echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" + using: composite + steps: + - name: Use Utility Actions + id: utility + # see https://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet + # steps.uses cannot access the github context. + # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # TODO: hard pin this one once it's merged + #uses: "camunda/camunda-tf-eks-module/utility-action@main" + uses: ./.github/actions/utility-action + with: + awscli-version: ${{ inputs.awscli-version }} + terraform-version: ${{ inputs.terraform-version }} + + s3-backend-bucket: ${{ inputs.s3-backend-bucket }} + s3-bucket-region: ${{ inputs.s3-bucket-region }} + + name: ${{ inputs.cluster-name }} + module-name: aurora + + tf-cli-config-credentials-hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + tf-cli-config-credentials-token: ${{ inputs.tf-cli-config-credentials-token }} + tf-terraform-wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Checkout Repository Aurora modules + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + repository: camunda/camunda-tf-eks-module + ref: ${{ inputs.tf-modules-revision }} + path: ${{ inputs.tf-modules-path }} + fetch-depth: 0 + + - name: Terraform Init + shell: bash + id: init + working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ + run: | + cp ../fixtures/backend.tf ./ + terraform version + terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" + terraform validate -no-color + + - name: Terraform Plan + shell: bash + id: plan + working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ + run: | + terraform plan -no-color -out aurora.plan -var "cluster_name=${{ inputs.cluster-name }}" \ + -var "engine_version=${{ inputs.engine-version }}" \ + -var "instance_class=${{ inputs.instance-class }}" \ + -var "num_instances=${{ inputs.num-instances }}" \ + -var "username=${{ inputs.username }}" \ + -var "password=${{ inputs.password }}" \ + -var "auto_minor_version_upgrade=${{ inputs.auto-minor-version-upgrade }}" \ + -var 'availability_zones=${{ inputs.availability-zones }}' \ + -var 'iam_roles=${{ inputs.iam-roles }}' \ + -var "iam_auth_enabled=${{ inputs.iam-auth-enabled }}" \ + -var "ca_cert_identifier=${{ inputs.ca-cert-identifier }}" \ + -var "default_database_name=${{ inputs.default-database-name }}" \ + -var "vpc_id=${{ inputs.vpc-id }}" \ + -var 'subnet_ids=${{ inputs.subnet-ids }}' \ + -var 'cidr_blocks=${{ inputs.cidr-blocks }}' + + - name: Terraform Apply + shell: bash + id: apply + working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ + run: | + terraform apply -no-color aurora.plan + export aurora_endpoint="$(terraform output -raw aurora_endpoint)" + echo "aurora_endpoint=$aurora_endpoint" >> "$GITHUB_OUTPUT" + + - name: Fetch Terraform Outputs + shell: bash + id: fetch_outputs + working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ + run: | + all_outputs=$(terraform output -json | jq -c .) + echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" diff --git a/.github/actions/eks-cleanup-resources/action.yml b/.github/actions/eks-cleanup-resources/action.yml index 45f5b696..422033ef 100644 --- a/.github/actions/eks-cleanup-resources/action.yml +++ b/.github/actions/eks-cleanup-resources/action.yml @@ -1,38 +1,39 @@ +--- name: Delete EKS resources description: | - This GitHub Action automates the deletion of EKS resources using a shell script. + This GitHub Action automates the deletion of EKS resources using a shell script. inputs: - tf-bucket: - description: 'Bucket containing the resources states' - required: true + tf-bucket: + description: Bucket containing the resources states + required: true - tf-bucket-region: - description: 'Region of the bucket containing the resources states, if not set, will fallback on AWS_REGION' - required: false + tf-bucket-region: + description: Region of the bucket containing the resources states, if not set, will fallback on AWS_REGION + required: false - max-age-hours: - description: 'Maximum age of resources in hours' - default: "20" + max-age-hours: + description: Maximum age of resources in hours + default: '20' - target: - description: 'Specify an ID to destroy specific resources or "all" to destroy all resources' - default: "all" + target: + description: Specify an ID to destroy specific resources or "all" to destroy all resources + default: all - temp-dir: - description: 'Temporary directory prefix used for storing resource data during processing' - default: "./tmp/eks-cleanup/" + temp-dir: + description: Temporary directory prefix used for storing resource data during processing + default: ./tmp/eks-cleanup/ runs: - using: 'composite' - steps: - - name: Delete resources - id: delete_resources - shell: bash - run: | - if [ -n "${{ inputs.tf-bucket-region }}" ]; then - export AWS_S3_REGION="${{ inputs.tf-bucket-region }}" - fi - - ${{ github.action_path }}/scripts/destroy.sh "${{ inputs.tf-bucket }}" ${{ github.action_path }}/../../../modules/ "${{ inputs.temp-dir }}" ${{ inputs.max-age-hours }} ${{ inputs.target }} + using: composite + steps: + - name: Delete resources + id: delete_resources + shell: bash + run: | + if [ -n "${{ inputs.tf-bucket-region }}" ]; then + export AWS_S3_REGION="${{ inputs.tf-bucket-region }}" + fi + + ${{ github.action_path }}/scripts/destroy.sh "${{ inputs.tf-bucket }}" ${{ github.action_path }}/../../../modules/ "${{ inputs.temp-dir }}" ${{ inputs.max-age-hours }} ${{ inputs.target }} diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index e779545b..ea37d2c1 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -1,205 +1,211 @@ +--- name: Deploy an EKS Cluster description: | - This GitHub Action automates the deployment of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. - This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. + This GitHub Action automates the deployment of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. + This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. inputs: - aws-region: - description: 'AWS region where the EKS cluster will be deployed' - required: true - - cluster-name: - description: 'Name of the EKS cluster to deploy' - required: true - - kubernetes-version: - description: 'Version of Kubernetes to use for the EKS cluster' - # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver - default: "1.30" - - cluster-service-ipv4-cidr: - description: 'CIDR block for cluster service IPs' - default: '10.190.0.0/16' - - cluster-node-ipv4-cidr: - description: 'CIDR block for cluster node IPs' - default: '10.192.0.0/16' - - np-instance-types: - description: 'List of instance types' - default: '["t2.medium"]' - - np-capacity-type: - description: 'Capacity type for non-production instances (e.g., SPOT)' - default: 'SPOT' - - np-node-desired-count: - description: 'Desired number of nodes in the EKS node group' - default: "4" - - np-node-min-count: - description: 'Minimum number of nodes in the EKS node group' - default: "1" - - np-disk-size: - description: 'Disk size of the nodes on the default node pool' - default: "20" - - np-ami-type: - description: 'Amazon Machine Image' - default: "AL2_x86_64" - - np-node-max-count: - description: 'Maximum number of nodes in the EKS node group' - default: "10" - - s3-backend-bucket: - description: 'Name of the S3 bucket to store Terraform state' - required: true - - s3-bucket-region: - description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' - required: false - - tf-modules-revision: - description: 'Git revision of the tf modules to use' - default: 'main' - - tf-modules-path: - description: 'Path where the tf EKS modules will be cloned' - default: './.action-tf-modules/eks/' - - login: - description: 'Authenticate the current kube context on the created cluster' - default: "true" - - # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml - tf-cli-config-credentials-hostname: - description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`.' - default: 'app.terraform.io' - - tf-cli-config-credentials-token: - description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.' - required: false - - tf-terraform-version: - description: 'The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for example `<1.13.0`) to install the latest version satisfying the constraint. A value of `latest` will install the latest version of Terraform CLI. Defaults to `latest`.' - default: 'latest' - - tf-terraform-wrapper: - description: 'Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`.' - default: 'true' - - awscli-version: - description: 'Version of the aws cli to use' - # renovate: datasource=github-releases depName=aws/aws-cli - default: "2.15.52" + aws-region: + description: AWS region where the EKS cluster will be deployed + required: true + + cluster-name: + description: Name of the EKS cluster to deploy + required: true + + kubernetes-version: + description: Version of Kubernetes to use for the EKS cluster + # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver + default: '1.30' + + cluster-service-ipv4-cidr: + description: CIDR block for cluster service IPs + default: 10.190.0.0/16 + + cluster-node-ipv4-cidr: + description: CIDR block for cluster node IPs + default: 10.192.0.0/16 + + np-instance-types: + description: List of instance types + default: '["t2.medium"]' + + np-capacity-type: + description: Capacity type for non-production instances (e.g., SPOT) + default: SPOT + + np-node-desired-count: + description: Desired number of nodes in the EKS node group + default: '4' + + np-node-min-count: + description: Minimum number of nodes in the EKS node group + default: '1' + + np-disk-size: + description: Disk size of the nodes on the default node pool + default: '20' + + np-ami-type: + description: Amazon Machine Image + default: AL2_x86_64 + + np-node-max-count: + description: Maximum number of nodes in the EKS node group + default: '10' + + s3-backend-bucket: + description: Name of the S3 bucket to store Terraform state + required: true + + s3-bucket-region: + description: Region of the bucket containing the resources states, if not set, will fallback on aws-region + required: false + + tf-modules-revision: + description: Git revision of the tf modules to use + default: main + + tf-modules-path: + description: Path where the tf EKS modules will be cloned + default: ./.action-tf-modules/eks/ + + login: + description: Authenticate the current kube context on the created cluster + default: 'true' + + # inherited from https://github.com/hashicorp/setup-terraform/blob/main/action.yml + tf-cli-config-credentials-hostname: + description: The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file. Defaults to `app.terraform.io`. + default: app.terraform.io + + tf-cli-config-credentials-token: + description: The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file. + required: false + + tf-terraform-version: + description: The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for + example `<1.13.0`) to install the latest version satisfying the constraint. A value of `latest` will install the latest version of Terraform + CLI. Defaults to `latest`. + default: latest + + tf-terraform-wrapper: + description: Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code + as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + default: 'true' + + awscli-version: + description: Version of the aws cli to use + # renovate: datasource=github-releases depName=aws/aws-cli + default: 2.15.52 outputs: - eks-cluster-endpoint: - description: 'The API endpoint of the deployed EKS cluster' - value: ${{ steps.apply.outputs.cluster_endpoint }} + eks-cluster-endpoint: + description: The API endpoint of the deployed EKS cluster + value: ${{ steps.apply.outputs.cluster_endpoint }} - terraform-state-url: - description: 'URL of the Terraform state file in the S3 bucket' - value: ${{ steps.utility.outputs.terraform-state-url }} + terraform-state-url: + description: URL of the Terraform state file in the S3 bucket + value: ${{ steps.utility.outputs.terraform-state-url }} - # Add all terraform outputs dynamically - all-terraform-outputs: - description: 'All outputs from Terraform' - value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} + # Add all terraform outputs dynamically + all-terraform-outputs: + description: All outputs from Terraform + value: ${{ steps.fetch_outputs.outputs.all_terraform_outputs }} runs: - using: 'composite' - steps: - - name: Use Utility Actions - id: utility - # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet - # steps.uses cannot access the github context. - # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} - # TODO: hard pin this one once it's merged - #uses: "camunda/camunda-tf-eks-module/utility-action@main" - uses: "./.github/actions/utility-action" - with: - awscli-version: '${{ inputs.awscli-version }}' - terraform-version: '${{ inputs.terraform-version }}' - - aws-region: '${{ inputs.aws-region }}' - s3-backend-bucket: '${{ inputs.s3-backend-bucket }}' - s3-bucket-region: '${{ inputs.s3-bucket-region }}' - - name: '${{ inputs.cluster-name }}' - module-name: 'eks-cluster' - - tf-cli-config-credentials-hostname: '${{ inputs.tf-cli-config-credentials-hostname }}' - tf-cli-config-credentials-token: '${{ inputs.tf-cli-config-credentials-token }}' - tf-terraform-wrapper: '${{ inputs.tf-terraform-wrapper }}' - - - name: Checkout Repository EKS modules - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - repository: "camunda/camunda-tf-eks-module" - ref: ${{ inputs.tf-modules-revision }} - path: ${{ inputs.tf-modules-path }} - fetch-depth: 0 - - - name: Terraform Init - shell: bash - id: init - working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" - run: | - cp ../fixtures/backend.tf ./ - terraform version - terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" - terraform validate -no-color - - - name: Terraform Plan - shell: bash - id: plan - working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" - run: | - terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ - -var "region=${{ inputs.aws-region }}" \ - -var "kubernetes_version=${{ inputs.kubernetes-version }}" \ - -var "name=${{ inputs.cluster-name }}" \ - -var "np_desired_node_count=${{ inputs.np-node-desired-count }}" \ - -var "np_min_node_count=${{ inputs.np-node-min-count }}" \ - -var "np_max_node_count=${{ inputs.np-node-max-count }}" \ - -var "np_disk_size=${{ inputs.np-disk-size }}" \ - -var "np_ami_type=${{ inputs.np-ami-type }}" \ - -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ - -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ - -var 'np_instance_types=${{ inputs.np-instance-types }}' \ - -var "np_capacity_type=${{ inputs.np-capacity-type }}" - - - name: Terraform Apply - shell: bash - id: apply - working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" - run: | - terraform apply -no-color eks.plan - export cluster_endpoint="$(terraform output -raw cluster_endpoint)" - echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" - - - name: Fetch Terraform Outputs - shell: bash - id: fetch_outputs - working-directory: "${{ inputs.tf-modules-path }}/modules/eks-cluster/" - run: | - all_outputs=$(terraform output -json | jq -c .) - echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" - - - name: Configure kubectl - shell: bash - id: kube_config - if: inputs.login == 'true' - run: | - aws eks --region ${{ inputs.aws-region }} update-kubeconfig --name ${{ inputs.cluster-name }} - - - name: Output Kube Config - shell: bash - if: inputs.login == 'true' - run: | - kubectl config view - kubectl get ns + using: composite + steps: + - name: Use Utility Actions + id: utility + # seehttps://github.com/orgs/community/discussions/41927 it's not possible to optimize this yet + # steps.uses cannot access the github context. + # uses: ${{ github.action_repository }}/utility-action@${{ github.action_ref }} + # TODO: hard pin this one once it's merged + #uses: "camunda/camunda-tf-eks-module/utility-action@main" + uses: ./.github/actions/utility-action + with: + awscli-version: ${{ inputs.awscli-version }} + terraform-version: ${{ inputs.terraform-version }} + + aws-region: ${{ inputs.aws-region }} + s3-backend-bucket: ${{ inputs.s3-backend-bucket }} + s3-bucket-region: ${{ inputs.s3-bucket-region }} + + name: ${{ inputs.cluster-name }} + module-name: eks-cluster + + tf-cli-config-credentials-hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + tf-cli-config-credentials-token: ${{ inputs.tf-cli-config-credentials-token }} + tf-terraform-wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Checkout Repository EKS modules + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + repository: camunda/camunda-tf-eks-module + ref: ${{ inputs.tf-modules-revision }} + path: ${{ inputs.tf-modules-path }} + fetch-depth: 0 + + - name: Terraform Init + shell: bash + id: init + working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ + run: | + cp ../fixtures/backend.tf ./ + terraform version + terraform init -backend-config="bucket=${{ steps.utility.outputs.TFSTATE_BUCKET }}" -backend-config="key=${{ steps.utility.outputs.TFSTATE_KEY }}" -backend-config="region=${{ steps.utility.outputs.TFSTATE_REGION }}" + terraform validate -no-color + + - name: Terraform Plan + shell: bash + id: plan + working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ + run: | + terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ + -var "region=${{ inputs.aws-region }}" \ + -var "kubernetes_version=${{ inputs.kubernetes-version }}" \ + -var "name=${{ inputs.cluster-name }}" \ + -var "np_desired_node_count=${{ inputs.np-node-desired-count }}" \ + -var "np_min_node_count=${{ inputs.np-node-min-count }}" \ + -var "np_max_node_count=${{ inputs.np-node-max-count }}" \ + -var "np_disk_size=${{ inputs.np-disk-size }}" \ + -var "np_ami_type=${{ inputs.np-ami-type }}" \ + -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ + -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ + -var 'np_instance_types=${{ inputs.np-instance-types }}' \ + -var "np_capacity_type=${{ inputs.np-capacity-type }}" + + - name: Terraform Apply + shell: bash + id: apply + working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ + run: | + terraform apply -no-color eks.plan + export cluster_endpoint="$(terraform output -raw cluster_endpoint)" + echo "cluster_endpoint=$cluster_endpoint" >> "$GITHUB_OUTPUT" + + - name: Fetch Terraform Outputs + shell: bash + id: fetch_outputs + working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ + run: | + all_outputs=$(terraform output -json | jq -c .) + echo "all_terraform_outputs=$all_outputs" | tee -a "$GITHUB_OUTPUT" + + - name: Configure kubectl + shell: bash + id: kube_config + if: inputs.login == 'true' + run: | + aws eks --region ${{ inputs.aws-region }} update-kubeconfig --name ${{ inputs.cluster-name }} + + - name: Output Kube Config + shell: bash + if: inputs.login == 'true' + run: | + kubectl config view + kubectl get ns diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index 4211fac1..04963050 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -1,121 +1,124 @@ +--- name: Utility Actions description: | - A set of utility steps to be used across different workflows, including: - - Installing Terraform - - Installing AWS CLI - - Setting Terraform variables - - Checking/Creating an S3 bucket + A set of utility steps to be used across different workflows, including: + - Installing Terraform + - Installing AWS CLI + - Setting Terraform variables + - Checking/Creating an S3 bucket inputs: - awscli-version: - description: 'Version of the AWS CLI to install' - # renovate: datasource=github-releases depName=aws/aws-cli - default: '2.15.52' + awscli-version: + description: Version of the AWS CLI to install + # renovate: datasource=github-releases depName=aws/aws-cli + default: 2.15.52 - terraform-version: - description: 'Version of Terraform to install' - default: 'latest' + terraform-version: + description: Version of Terraform to install + default: latest - s3-backend-bucket: - description: 'Name of the S3 bucket to store Terraform state' - required: true + s3-backend-bucket: + description: Name of the S3 bucket to store Terraform state + required: true - s3-bucket-region: - description: 'Region of the bucket containing the resources states, if not set, will fallback on aws-region' - required: false + s3-bucket-region: + description: Region of the bucket containing the resources states, if not set, will fallback on aws-region + required: false - aws-region: - description: 'AWS region to use for S3 bucket operations' - required: true + aws-region: + description: AWS region to use for S3 bucket operations + required: true - name: - description: 'Name of resource instance (e.g., uid)' - required: true + name: + description: Name of resource instance (e.g., uid) + required: true - module-name: - description: 'Name of the Terraform module (e.g., eks-cluster, aurora)' - required: true + module-name: + description: Name of the Terraform module (e.g., eks-cluster, aurora) + required: true - tf-cli-config-credentials-hostname: - description: 'The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' - default: 'app.terraform.io' + tf-cli-config-credentials-hostname: + description: The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file + default: app.terraform.io - tf-cli-config-credentials-token: - description: 'The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file' - required: false + tf-cli-config-credentials-token: + description: The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration + file + required: false - tf-terraform-wrapper: - description: 'Whether or not to install a wrapper for Terraform CLI' - default: 'true' + tf-terraform-wrapper: + description: Whether or not to install a wrapper for Terraform CLI + default: 'true' outputs: - terraform-state-url: - description: 'URL of the Terraform state file in the S3 bucket' - value: "${{ steps.set-terraform-variables.outputs.terraform-state-url }}" - TFSTATE_BUCKET: - description: 'S3 bucket name for Terraform state' - value: "${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }}" - TFSTATE_REGION: - description: 'Region of the S3 bucket for Terraform state' - value: "${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }}" - TFSTATE_KEY: - description: 'Key of the Terraform state file in the S3 bucket' - value: "${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }}" + terraform-state-url: + description: URL of the Terraform state file in the S3 bucket + value: ${{ steps.set-terraform-variables.outputs.terraform-state-url }} + TFSTATE_BUCKET: + description: S3 bucket name for Terraform state + value: ${{ steps.set-terraform-variables.outputs.TFSTATE_BUCKET }} + TFSTATE_REGION: + description: Region of the S3 bucket for Terraform state + value: ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} + TFSTATE_KEY: + description: Key of the Terraform state file in the S3 bucket + value: ${{ steps.set-terraform-variables.outputs.TFSTATE_KEY }} runs: - using: 'composite' - steps: - - name: Install Terraform - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3 - with: - cli_config_credentials_hostname: ${{ inputs.tf-cli-config-credentials-hostname }} - cli_config_credentials_token: ${{ inputs.tf-cli-config-credentials-token }} - terraform_version: ${{ inputs.terraform-version }} - terraform_wrapper: ${{ inputs.tf-terraform-wrapper }} - - - name: Install AWS CLI - shell: bash - run: | - if ! command -v aws &> /dev/null; then - echo "AWS CLI not found, installing..." - curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" - unzip -qq awscliv2.zip - sudo ./aws/install - rm -f awscliv2.zip - else - echo "Warning: AWS CLI is already installed." - fi - - - name: Set Terraform variables - shell: bash - id: set-terraform-variables - run: | - export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" - export TFSTATE_KEY="terraform/${{ inputs.name }}/gha/${{ inputs.module-name }}/terraform.tfstate" - - if [ -z "${{ inputs.s3-bucket-region }}" ]; then - export TFSTATE_REGION="${{ inputs.aws-region }}" - else - export TFSTATE_REGION="${{ inputs.s3-bucket-region }}" - fi - - echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT" - echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT" - echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT" - - terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}" - echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT" - - - name: Check if S3 bucket exists - id: create-s3-bucket - shell: bash - run: | - if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then - echo "Bucket already exists" - else - echo "Bucket does not exist, creating..." - aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} - fi - - aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" + using: composite + steps: + - name: Install Terraform + uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd # v3 + with: + cli_config_credentials_hostname: ${{ inputs.tf-cli-config-credentials-hostname }} + cli_config_credentials_token: ${{ inputs.tf-cli-config-credentials-token }} + terraform_version: ${{ inputs.terraform-version }} + terraform_wrapper: ${{ inputs.tf-terraform-wrapper }} + + - name: Install AWS CLI + shell: bash + run: | + if ! command -v aws &> /dev/null; then + echo "AWS CLI not found, installing..." + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64-${{ inputs.awscli-version }}.zip" -o "awscliv2.zip" + unzip -qq awscliv2.zip + sudo ./aws/install + rm -f awscliv2.zip + else + echo "Warning: AWS CLI is already installed." + fi + + - name: Set Terraform variables + shell: bash + id: set-terraform-variables + run: | + export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" + export TFSTATE_KEY="terraform/${{ inputs.name }}/gha/${{ inputs.module-name }}/terraform.tfstate" + + if [ -z "${{ inputs.s3-bucket-region }}" ]; then + export TFSTATE_REGION="${{ inputs.aws-region }}" + else + export TFSTATE_REGION="${{ inputs.s3-bucket-region }}" + fi + + echo "TFSTATE_BUCKET=${TFSTATE_BUCKET}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_REGION=${TFSTATE_REGION}" >> "$GITHUB_OUTPUT" + echo "TFSTATE_KEY=${TFSTATE_KEY}" >> "$GITHUB_OUTPUT" + + terraform_state_url="s3://${TFSTATE_BUCKET}/${TFSTATE_KEY}" + echo "terraform-state-url=${terraform_state_url}" >> "$GITHUB_OUTPUT" + + - name: Check if S3 bucket exists + id: create-s3-bucket + shell: bash + run: | + if aws s3api head-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} 2>/dev/null; then + echo "Bucket already exists" + else + echo "Bucket does not exist, creating..." + aws s3api create-bucket --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --create-bucket-configuration LocationConstraint=${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} + fi + + aws s3api put-public-access-block --bucket ${{ inputs.s3-backend-bucket }} --region ${{ steps.set-terraform-variables.outputs.TFSTATE_REGION }} --public-access-block-configuration "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" diff --git a/.github/labeler.yml b/.github/labeler.yml index c0978e34..92c89942 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,17 +1,18 @@ +--- # Add 'feature' label to any PR where the head branch name starts with `feature` or has a `feature` section in the name feature: - - head-branch: ['^feature', 'feature'] + - head-branch: [^feature, feature] # Add 'test' label to any changes within 'test' folder or any subfolders test: - - changed-files: - - any-glob-to-any-file: - - test/**.go - - test/**/go.mod - - modules/fixtures/** + - changed-files: + - any-glob-to-any-file: + - test/**.go + - test/**/go.mod + - modules/fixtures/** terraform: - - changed-files: - - any-glob-to-any-file: - - modules/**.tf - - .tool-versions + - changed-files: + - any-glob-to-any-file: + - modules/**.tf + - .tool-versions diff --git a/.github/workflows/daily-cleanup.yml b/.github/workflows/daily-cleanup.yml index 280d8773..09190cae 100644 --- a/.github/workflows/daily-cleanup.yml +++ b/.github/workflows/daily-cleanup.yml @@ -2,82 +2,82 @@ name: Daily cleanup resources on: - workflow_dispatch: - inputs: - max_age_hours: - description: "Maximum age of resources in hours" - required: true - default: "20" - pull_request: - # the paths should be synced with ../labeler.yml - paths: - - .github/workflows/daily-cleanup.yml - - .github/actions/eks-cleanup-resources/** + workflow_dispatch: + inputs: + max_age_hours: + description: Maximum age of resources in hours + required: true + default: '20' + pull_request: + # the paths should be synced with ../labeler.yml + paths: + - .github/workflows/daily-cleanup.yml + - .github/actions/eks-cleanup-resources/** - schedule: - - cron: '0 1 * * *' # At 01:00 everyday. + schedule: + - cron: 0 1 * * * # At 01:00 everyday. env: - MAX_AGE_HOURS: "${{ github.event.inputs.max_age_hours || '20' }}" - AWS_PROFILE: "infex" + MAX_AGE_HOURS: ${{ github.event.inputs.max_age_hours || '20' }} + AWS_PROFILE: infex - # please keep those variables synced with tests.yml - AWS_REGION: "eu-west-2" - TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" - TF_STATE_BUCKET_REGION: "eu-central-1" + # please keep those variables synced with tests.yml + AWS_REGION: eu-west-2 + TF_STATE_BUCKET: tests-eks-tf-state-eu-central-1 + TF_STATE_BUCKET_REGION: eu-central-1 jobs: - cleanup-clusters: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 + cleanup-clusters: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - name: Use repo .tool-version as global version - run: cp .tool-versions ~/.tool-versions + - name: Use repo .tool-version as global version + run: cp .tool-versions ~/.tool-versions - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - # Official action does not support profiles - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + # Official action does not support profiles + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - name: Delete orphans resources - timeout-minutes: 360 - if: always() - uses: ./.github/actions/eks-cleanup-resources - with: - tf-bucket: "${{ env.TF_STATE_BUCKET }}" - tf-bucket-region: "${{ env.TF_STATE_BUCKET_REGION }}" - max-age-hours: "${{ env.MAX_AGE_HOURS }}" - target: "all" + - name: Delete orphans resources + timeout-minutes: 360 + if: always() + uses: ./.github/actions/eks-cleanup-resources + with: + tf-bucket: ${{ env.TF_STATE_BUCKET }} + tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + max-age-hours: ${{ env.MAX_AGE_HOURS }} + target: all - - name: Notify in Slack in case of failure - id: slack-notification - if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 0af34642..86b93f3b 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -1,26 +1,27 @@ -name: "Pull Request Labeler" +--- +name: Pull Request Labeler on: - pull_request_target: - schedule: - - cron: "0 1 * * 1" - pull_request: - paths: - - .github/workflows/labeler.yml + pull_request_target: + schedule: + - cron: 0 1 * * 1 + pull_request: + paths: + - .github/workflows/labeler.yml jobs: - labeler: - permissions: - contents: read - pull-requests: write - runs-on: ubuntu-latest - steps: - - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5 + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5 - - name: Notify in Slack in case of failure - id: slack-notification - if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index f9617703..67316bb0 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -1,52 +1,53 @@ +--- name: Check external links on: - push: - workflow_dispatch: - schedule: - - cron: "0 3 1 * *" - pull_request: - paths: - - .github/workflows/links.yml + push: + workflow_dispatch: + schedule: + - cron: 0 3 1 * * + pull_request: + paths: + - .github/workflows/links.yml jobs: - lint: - name: links-check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + lint: + name: links-check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Get Current Timestamp - id: timestamp - run: echo "TIMESTAMP=$(date +%s)" >> "$GITHUB_ENV" + - name: Get Current Timestamp + id: timestamp + run: echo "TIMESTAMP=$(date +%s)" >> "$GITHUB_ENV" - - name: Restore lychee cache - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 - with: - path: .lycheecache - key: "cache-lychee-${{ env.TIMESTAMP }}" - restore-keys: cache-lychee- + - name: Restore lychee cache + uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4 + with: + path: .lycheecache + key: cache-lychee-${{ env.TIMESTAMP }} + restore-keys: cache-lychee- - - name: Link Checker - uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # v1.10.0 - with: - fail: true - args: -c ./lychee-links.toml --base . --cache --max-cache-age 1d . --verbose --no-progress '*.md' './**/*.md' - token: "${{ secrets.GITHUB_TOKEN }}" + - name: Link Checker + uses: lycheeverse/lychee-action@2b973e86fc7b1f6b36a93795fe2c9c6ae1118621 # v1.10.0 + with: + fail: true + args: -c ./lychee-links.toml --base . --cache --max-cache-age 1d . --verbose --no-progress '*.md' './**/*.md' + token: ${{ secrets.GITHUB_TOKEN }} - - name: Create Issue From File - if: failure() && github.event_name == 'schedule' - uses: peter-evans/create-issue-from-file@24452a72d85239eacf1468b0f1982a9f3fec4c94 # v5 - with: - title: Link Checker Report - content-filepath: ./lychee/out.md - labels: report, automated issue + - name: Create Issue From File + if: failure() && github.event_name == 'schedule' + uses: peter-evans/create-issue-from-file@24452a72d85239eacf1468b0f1982a9f3fec4c94 # v5 + with: + title: Link Checker Report + content-filepath: ./lychee/out.md + labels: report, automated issue - - name: Notify in Slack in case of failure - id: slack-notification - if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 93393ba5..6efeb5d3 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -3,17 +3,17 @@ name: lint on: - push: - workflow_dispatch: + push: + workflow_dispatch: jobs: - lint: - name: pre-commit - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + lint: + name: pre-commit + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 + - uses: pre-commit/action@2c7b3805fd2a0fd8c1884dcaebf91fc102a13ecd # v3.0.1 diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index c74c58e5..31ce6ba9 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -2,181 +2,180 @@ name: EKS Cluster with an AuroraDB creation and destruction test on: - schedule: - - cron: '0 1 * * 1' # At 01:00 on Monday. - - workflow_dispatch: - inputs: - cluster_name: - description: "Cluster name." - required: false - type: string - create_db: - description: "Should the aurora db be created" - default: "true" - delete_cluster: - description: "Whether to delete the cluster." - default: "true" - db_username: - description: "Database username." - required: false - type: string - db_password: - description: "Database password." - required: false - type: string - - pull_request: - # the paths should be synced with ../labeler.yml - paths: - - modules/fixtures/backend.tf - - modules/fixtures/fixtures.default.eks.tfvars - - modules/fixtures/fixtures.default.aurora.tfvars - - modules/eks-cluster/**.tf - - modules/aurora/**.tf - - .tool-versions - - .github/workflows/test-gha-eks-manage-cluster.yml - - .github/actions/eks-manage-cluster/*.yml - - justfile + schedule: + - cron: 0 1 * * 2 # At 01:00 on Tuesday. + + workflow_dispatch: + inputs: + cluster_name: + description: Cluster name. + required: false + type: string + create_db: + description: Should the aurora db be created + default: 'true' + delete_cluster: + description: Whether to delete the cluster. + default: 'true' + db_username: + description: Database username. + required: false + type: string + db_password: + description: Database password. + required: false + type: string + + pull_request: + # the paths should be synced with ../labeler.yml + paths: + - modules/fixtures/backend.tf + - modules/fixtures/fixtures.default.eks.tfvars + - modules/fixtures/fixtures.default.aurora.tfvars + - modules/eks-cluster/**.tf + - modules/aurora/**.tf + - .tool-versions + - .github/workflows/test-gha-eks-manage-cluster.yml + - .github/actions/eks-manage-cluster/*.yml # limit to a single execution per actor of this workflow concurrency: - group: "${{ github.workflow }}-${{ github.ref }}" - cancel-in-progress: true + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true env: - AWS_PROFILE: "infex" - AWS_REGION: "eu-west-2" + AWS_PROFILE: infex + AWS_REGION: eu-west-2 # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config # please keep those synced with tests.yml - TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" - TF_STATE_BUCKET_REGION: "eu-central-1" + TF_STATE_BUCKET: tests-eks-tf-state-eu-central-1 + TF_STATE_BUCKET_REGION: eu-central-1 - CREATE_DB: "${{ github.event.inputs.create_db || 'true' }}" + CREATE_DB: ${{ github.event.inputs.create_db || 'true' }} jobs: - action-test: - runs-on: ubuntu-latest - steps: - - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 - - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - - name: Get Cluster Info - id: commit_info - run: | - if [[ -n "${{ inputs.cluster_name }}" ]]; then - cluster_name="${{ inputs.cluster_name }}" - else - cluster_name="cl-$(git rev-parse --short HEAD)" - fi - echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" - - if [[ -n "${{ inputs.db_username }}" ]]; then - db_username="${{ inputs.db_username }}" - else - db_username="user$(openssl rand -hex 4 | tr -d '/@" ')" - fi - echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" - - if [[ -n "${{ inputs.db_password }}" ]]; then - db_password="${{ inputs.db_password }}" - else - db_password="$(openssl rand -base64 12 | tr -d '/@" ')" - fi - echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" - - # Get the current commit hash for the modules revision - tf_modules_revision=$(git rev-parse HEAD) - echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" - - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - - name: Create EKS Cluster - timeout-minutes: 125 - uses: ./.github/actions/eks-manage-cluster - id: create_eks_cluster - with: - cluster-name: ${{ steps.commit_info.outputs.cluster_name }} - aws-region: ${{ env.AWS_REGION }} - s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} - s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - - - name: Prepare Aurora Cluster - id: prepare_aurora_cluster - run: | - vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.vpc_id.value') - echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" - - private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_subnet_ids.value') - echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPUT" - - private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_vpc_cidr_blocks.value') - echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" - - availability_zones=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query 'Subnets[].AvailabilityZone' --output json | jq 'unique' -c) - echo "availability_zones=$availability_zones" | tee -a "$GITHUB_OUTPUT" - - - name: Create Aurora Cluster - timeout-minutes: 125 - uses: ./.github/actions/aurora-manage-cluster - id: create_aurora_cluster - if: env.CREATE_DB == 'true' - with: - cluster-name: ${{ steps.commit_info.outputs.cluster_name }} - username: ${{ steps.commit_info.outputs.db_username }} - password: ${{ steps.commit_info.outputs.db_password }} - - s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} - s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} - - vpc-id: ${{ steps.prepare_aurora_cluster.outputs.vpc_id }} - subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} - cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} - - availability-zones: ${{ steps.prepare_aurora_cluster.outputs.availability_zones }} - - - name: Delete Clusters - timeout-minutes: 125 - if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') - uses: ./.github/actions/eks-cleanup-resources - with: - tf-bucket: ${{ env.TF_STATE_BUCKET }} - tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} - max-age-hours: 0 - target: ${{ steps.commit_info.outputs.cluster_name }} - - - name: Notify in Slack in case of failure - id: slack-notification - if: failure() && github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} + action-test: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + + - name: Get Cluster Info + id: commit_info + run: | + if [[ -n "${{ inputs.cluster_name }}" ]]; then + cluster_name="${{ inputs.cluster_name }}" + else + cluster_name="cl-$(git rev-parse --short HEAD)" + fi + echo "cluster_name=$cluster_name" | tee -a "$GITHUB_OUTPUT" + + if [[ -n "${{ inputs.db_username }}" ]]; then + db_username="${{ inputs.db_username }}" + else + db_username="user$(openssl rand -hex 4 | tr -d '/@" ')" + fi + echo "db_username=$db_username" | tee -a "$GITHUB_OUTPUT" + + if [[ -n "${{ inputs.db_password }}" ]]; then + db_password="${{ inputs.db_password }}" + else + db_password="$(openssl rand -base64 12 | tr -d '/@" ')" + fi + echo "db_password=$db_password" | tee -a "$GITHUB_OUTPUT" + + # Get the current commit hash for the modules revision + tf_modules_revision=$(git rev-parse HEAD) + echo "tf_modules_revision=$tf_modules_revision" | tee -a "$GITHUB_OUTPUT" + + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + + - name: Create EKS Cluster + timeout-minutes: 125 + uses: ./.github/actions/eks-manage-cluster + id: create_eks_cluster + with: + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + aws-region: ${{ env.AWS_REGION }} + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + + - name: Prepare Aurora Cluster + id: prepare_aurora_cluster + run: | + vpc_id=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.vpc_id.value') + echo "vpc_id=$vpc_id" | tee -a "$GITHUB_OUTPUT" + + private_subnet_ids=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_subnet_ids.value') + echo "private_subnet_ids=$private_subnet_ids" | tee -a "$GITHUB_OUTPUT" + + private_vpc_cidr_blocks=$(echo '${{ steps.create_eks_cluster.outputs.all-terraform-outputs }}' | jq -c -r '.private_vpc_cidr_blocks.value') + echo "private_vpc_cidr_blocks=$private_vpc_cidr_blocks" | tee -a "$GITHUB_OUTPUT" + + availability_zones=$(aws ec2 describe-subnets --filters "Name=vpc-id,Values=${vpc_id}" --query 'Subnets[].AvailabilityZone' --output json | jq 'unique' -c) + echo "availability_zones=$availability_zones" | tee -a "$GITHUB_OUTPUT" + + - name: Create Aurora Cluster + timeout-minutes: 125 + uses: ./.github/actions/aurora-manage-cluster + id: create_aurora_cluster + if: env.CREATE_DB == 'true' + with: + cluster-name: ${{ steps.commit_info.outputs.cluster_name }} + username: ${{ steps.commit_info.outputs.db_username }} + password: ${{ steps.commit_info.outputs.db_password }} + + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} + s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} + + vpc-id: ${{ steps.prepare_aurora_cluster.outputs.vpc_id }} + subnet-ids: ${{ steps.prepare_aurora_cluster.outputs.private_subnet_ids }} + cidr-blocks: ${{ steps.prepare_aurora_cluster.outputs.private_vpc_cidr_blocks }} + + availability-zones: ${{ steps.prepare_aurora_cluster.outputs.availability_zones }} + + - name: Delete Clusters + timeout-minutes: 125 + if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') + uses: ./.github/actions/eks-cleanup-resources + with: + tf-bucket: ${{ env.TF_STATE_BUCKET }} + tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + max-age-hours: 0 + target: ${{ steps.commit_info.outputs.cluster_name }} + + - name: Notify in Slack in case of failure + id: slack-notification + if: failure() && github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8bc6fda3..9bffc803 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -2,248 +2,248 @@ name: Terraform modules tests on: - schedule: - - cron: '0 1 * * 2' - workflow_dispatch: - pull_request: - # the paths should be synced with ../labeler.yml - paths: - - test/**.go - - test/**/go.mod - - modules/fixtures/** - - modules/**.tf - - .tool-versions - - .github/workflows/tests.yml - - justfile + schedule: + - cron: 0 1 * * 2 + workflow_dispatch: + pull_request: + # the paths should be synced with ../labeler.yml + paths: + - test/**.go + - test/**/go.mod + - modules/fixtures/** + - modules/**.tf + - .tool-versions + - .github/workflows/tests.yml + - justfile # limit to a single execution per ref of this workflow concurrency: - group: "${{ github.workflow }}-${{ github.ref }}" - cancel-in-progress: true + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true env: - # please keep those variables synced with daily-cleanup.yml - AWS_PROFILE: "infex" - AWS_REGION: "eu-west-2" # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config - TESTS_TF_BINARY_NAME: "terraform" + # please keep those variables synced with daily-cleanup.yml + AWS_PROFILE: infex + AWS_REGION: eu-west-2 # /!\ always use one of the available test region https://github.com/camunda/infraex-common-config + TESTS_TF_BINARY_NAME: terraform - # please keep test-gha*.yml synced - TF_STATE_BUCKET: "tests-eks-tf-state-eu-central-1" - TF_STATE_BUCKET_REGION: "eu-central-1" + # please keep test-gha*.yml synced + TF_STATE_BUCKET: tests-eks-tf-state-eu-central-1 + TF_STATE_BUCKET_REGION: eu-central-1 jobs: - # We can skip some tests using the commit description (skip-tests:NameOfTest1,NameOfTest2) or all tests (skip-tests:all) (see `DEVELOPER.md`) - # If all tests are skipped, the result of this workflow will be `failed` on purpose - # If you want to skip tests and have no error, you need to use `testing-ci-not-necessary` as a label on the PR - configure-tests: - runs-on: ubuntu-latest - if: >- - github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || ( - github.event_name == 'pull_request' && - !contains(github.event.pull_request.labels.*.name, 'testing-ci-not-necessary') - ) - outputs: - test_functions: ${{ steps.extract_test_functions.outputs.test_functions }} - cluster_id: ${{ steps.short_git_sha.outputs.short_git_sha }} - steps: - - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 - - - name: Get Short GitHub SHA - id: short_git_sha - run: echo "short_git_sha=$(git rev-parse --short HEAD)" >> "$GITHUB_OUTPUT" - - - name: Extract Test Functions - id: extract_test_functions - run: | - test_functions=$(grep -rho 'func \(Test[^ ]*\)' ./test/src/ | sed 's/func \(Test[^ ]*\)(t/\1/' | tr '\n' ',' | sed 's/,$//') - echo "test_functions=$test_functions" - - : # Extract test names marked to be skipped from the commit message description - commit_message=$(git log -1 --pretty=format:"%B") - echo "commit_message=$commit_message" - - skipped_tests=$(echo "$commit_message" | grep 'skip-tests' | sed 's/skip-tests://') - echo "skipped_tests=$skipped_tests" - - : # If all tests are marked to be skipped, then clear the test_functions list completely - if [ "$skipped_tests" == "all" ]; then - test_functions="" - echo "Skipping all tests (skip-tests:all found), this workflow will fail. If you want to skip-tests for a PR, please use the label 'testing-ci-not-necessary'" - else - : # Otherwise, remove the tests marked to be skipped from the test_functions list - if [ -n "$skipped_tests" ]; then - for test in $(echo "$skipped_tests" | tr ',' '\n'); do - echo "Skipping test: $test" - test_functions=$(echo "$test_functions" | sed "s/$test//g" | sed 's/,,/,/g' | sed 's/^,//' | sed 's/,$//') - echo "test_functions=$test_functions" - done - fi - fi - - : # to json array - IFS=',' read -ra array <<< "$test_functions" - json_array="[" - for element in "${array[@]}" - do - json_array+="\"$element\"," - done - test_functions="${json_array%,}]" - - echo "test_functions=${test_functions}" >> "$GITHUB_OUTPUT" - echo "test_functions=${test_functions}" - - integration-tests: - runs-on: ubuntu-latest - needs: - - configure-tests - strategy: - fail-fast: false # don't propagate failing jobs - matrix: - test_function: ${{ fromJson(needs.configure-tests.outputs.test_functions) }} - steps: - - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - # Official action does not support profiles - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - - name: Get go.mod details - uses: Eun/go-mod-details@b719cd324463e2037cf3a0dd1dd6091bdc2730f4 # v1 - id: go-mod-details - with: - modfile: ${{ github.workspace }}/test/src/go.mod - - - name: Launch test - timeout-minutes: 125 - run: | - export TESTS_CLUSTER_ID="${{ needs.configure-tests.outputs.cluster_id }}" - export TESTS_CLUSTER_REGION="${{ env.AWS_REGION }}" - export TESTS_TF_BINARY_NAME="${{ env.TESTS_TF_BINARY_NAME }}" - just test ${{ matrix.test_function }} "--junitfile ${{ matrix.test_function }}_unit-tests.xml" - - # this is a workaround for test report not working as expected due to https://github.com/test-summary/action/issues/5 - - name: Filter logger.go from the test report (too large) + # We can skip some tests using the commit description (skip-tests:NameOfTest1,NameOfTest2) or all tests (skip-tests:all) (see `DEVELOPER.md`) + # If all tests are skipped, the result of this workflow will be `failed` on purpose + # If you want to skip tests and have no error, you need to use `testing-ci-not-necessary` as a label on the PR + configure-tests: + runs-on: ubuntu-latest + if: >- + github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' || ( + github.event_name == 'pull_request' && + !contains(github.event.pull_request.labels.*.name, 'testing-ci-not-necessary') + ) + outputs: + test_functions: ${{ steps.extract_test_functions.outputs.test_functions }} + cluster_id: ${{ steps.short_git_sha.outputs.short_git_sha }} + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Get Short GitHub SHA + id: short_git_sha + run: echo "short_git_sha=$(git rev-parse --short HEAD)" >> "$GITHUB_OUTPUT" + + - name: Extract Test Functions + id: extract_test_functions + run: | + test_functions=$(grep -rho 'func \(Test[^ ]*\)' ./test/src/ | sed 's/func \(Test[^ ]*\)(t/\1/' | tr '\n' ',' | sed 's/,$//') + echo "test_functions=$test_functions" + + : # Extract test names marked to be skipped from the commit message description + commit_message=$(git log -1 --pretty=format:"%B") + echo "commit_message=$commit_message" + + skipped_tests=$(echo "$commit_message" | grep 'skip-tests' | sed 's/skip-tests://') + echo "skipped_tests=$skipped_tests" + + : # If all tests are marked to be skipped, then clear the test_functions list completely + if [ "$skipped_tests" == "all" ]; then + test_functions="" + echo "Skipping all tests (skip-tests:all found), this workflow will fail. If you want to skip-tests for a PR, please use the label 'testing-ci-not-necessary'" + else + : # Otherwise, remove the tests marked to be skipped from the test_functions list + if [ -n "$skipped_tests" ]; then + for test in $(echo "$skipped_tests" | tr ',' '\n'); do + echo "Skipping test: $test" + test_functions=$(echo "$test_functions" | sed "s/$test//g" | sed 's/,,/,/g' | sed 's/^,//' | sed 's/,$//') + echo "test_functions=$test_functions" + done + fi + fi + + : # to json array + IFS=',' read -ra array <<< "$test_functions" + json_array="[" + for element in "${array[@]}" + do + json_array+="\"$element\"," + done + test_functions="${json_array%,}]" + + echo "test_functions=${test_functions}" >> "$GITHUB_OUTPUT" + echo "test_functions=${test_functions}" + + integration-tests: + runs-on: ubuntu-latest + needs: + - configure-tests + strategy: + fail-fast: false # don't propagate failing jobs + matrix: + test_function: ${{ fromJson(needs.configure-tests.outputs.test_functions) }} + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + # Official action does not support profiles + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + + - name: Get go.mod details + uses: Eun/go-mod-details@b719cd324463e2037cf3a0dd1dd6091bdc2730f4 # v1 + id: go-mod-details + with: + modfile: ${{ github.workspace }}/test/src/go.mod + + - name: Launch test + timeout-minutes: 125 + run: | + export TESTS_CLUSTER_ID="${{ needs.configure-tests.outputs.cluster_id }}" + export TESTS_CLUSTER_REGION="${{ env.AWS_REGION }}" + export TESTS_TF_BINARY_NAME="${{ env.TESTS_TF_BINARY_NAME }}" + just test ${{ matrix.test_function }} "--junitfile ${{ matrix.test_function }}_unit-tests.xml" + + # this is a workaround for test report not working as expected due to https://github.com/test-summary/action/issues/5 + - name: Filter logger.go from the test report (too large) + if: always() + run: | + sed 's/ /\n/g' < "./test/src/${{ matrix.test_function }}_unit-tests.xml" | grep -E -v '^.*logger\.go.*$' | sed 's/\n/ /g' > "./test/src/${{ matrix.test_function }}_unit-tests_filtered.xml" + + - name: Upload test reports + if: always() + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 + with: + name: test-reports-${{ matrix.test_function }} + path: ./test/src/${{ matrix.test_function }}_unit-tests_filtered.xml + retention-days: 1 + + - name: Remove profile credentials from ~/.aws/credentials + if: always() + run: | + rm -rf ~/.aws/credentials + + test-report: + runs-on: ubuntu-latest + if: ${{ always() && needs.configure-tests.result == 'success' }} + needs: + - configure-tests + - integration-tests + steps: + - name: Download artifacts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4 + with: + pattern: test-reports-* + path: /tmp/testreports + merge-multiple: true + + - name: Run test-summary + uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2 + with: + paths: /tmp/testreports/**/*.xml + + cleanup-resources: + runs-on: ubuntu-latest if: always() - run: | - sed 's/ /\n/g' < "./test/src/${{ matrix.test_function }}_unit-tests.xml" | grep -E -v '^.*logger\.go.*$' | sed 's/\n/ /g' > "./test/src/${{ matrix.test_function }}_unit-tests_filtered.xml" - - - name: Upload test reports - if: always() - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4 - with: - name: test-reports-${{ matrix.test_function }} - path: "./test/src/${{ matrix.test_function }}_unit-tests_filtered.xml" - retention-days: 1 - - - name: Remove profile credentials from ~/.aws/credentials - if: always() - run: | - rm -rf ~/.aws/credentials - - test-report: - runs-on: ubuntu-latest - if: ${{ always() && needs.configure-tests.result == 'success' }} - needs: - - configure-tests - - integration-tests - steps: - - name: Download artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4 - with: - pattern: test-reports-* - path: /tmp/testreports - merge-multiple: true - - - name: Run test-summary - uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2 - with: - paths: /tmp/testreports/**/*.xml - - cleanup-resources: - runs-on: ubuntu-latest - if: always() - needs: - - configure-tests - - integration-tests - steps: - - name: Checkout repository - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 - with: - ref: ${{ github.head_ref }} - fetch-depth: 0 - - - name: Install tooling using asdf - uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 - - - name: Import Secrets - id: secrets - uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 - with: - url: ${{ secrets.VAULT_ADDR }} - method: approle - roleId: ${{ secrets.VAULT_ROLE_ID }} - secretId: ${{ secrets.VAULT_SECRET_ID }} - exportEnv: false - secrets: | - secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; - secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; - - # Official action does not support profiles - - name: Add profile credentials to ~/.aws/credentials - run: | - aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} - aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - - - name: Delete resources of this run - timeout-minutes: 125 - if: always() - uses: ./.github/actions/eks-cleanup-resources - with: - tf-bucket: "${{ env.TF_STATE_BUCKET }}" - tf-bucket-region: "${{ env.TF_STATE_BUCKET_REGION }}" - max-age-hours: "0" - target: "${{ needs.configure-tests.outputs.cluster_id }}" - - notify-on-failure: - runs-on: ubuntu-latest - if: failure() - needs: - - configure-tests - - integration-tests - - test-report - - cleanup-resources - steps: - - name: Notify in Slack in case of failure - id: slack-notification - if: github.event_name == 'schedule' - uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main - with: - vault_addr: ${{ secrets.VAULT_ADDR }} - vault_role_id: ${{ secrets.VAULT_ROLE_ID }} - vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} + needs: + - configure-tests + - integration-tests + steps: + - name: Checkout repository + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4 + with: + ref: ${{ github.head_ref }} + fetch-depth: 0 + + - name: Install tooling using asdf + uses: asdf-vm/actions/install@05e0d2ed97b598bfce82fd30daf324ae0c4570e6 # v3 + + - name: Import Secrets + id: secrets + uses: hashicorp/vault-action@d1720f055e0635fd932a1d2a48f87a666a57906c # v3 + with: + url: ${{ secrets.VAULT_ADDR }} + method: approle + roleId: ${{ secrets.VAULT_ROLE_ID }} + secretId: ${{ secrets.VAULT_SECRET_ID }} + exportEnv: false + secrets: | + secret/data/products/infrastructure-experience/ci/common AWS_ACCESS_KEY; + secret/data/products/infrastructure-experience/ci/common AWS_SECRET_KEY; + + # Official action does not support profiles + - name: Add profile credentials to ~/.aws/credentials + run: | + aws configure set aws_access_key_id ${{ steps.secrets.outputs.AWS_ACCESS_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set aws_secret_access_key ${{ steps.secrets.outputs.AWS_SECRET_KEY }} --profile ${{ env.AWS_PROFILE }} + aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} + + - name: Delete resources of this run + timeout-minutes: 125 + if: always() + uses: ./.github/actions/eks-cleanup-resources + with: + tf-bucket: ${{ env.TF_STATE_BUCKET }} + tf-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} + max-age-hours: '0' + target: ${{ needs.configure-tests.outputs.cluster_id }} + + notify-on-failure: + runs-on: ubuntu-latest + if: failure() + needs: + - configure-tests + - integration-tests + - test-report + - cleanup-resources + steps: + - name: Notify in Slack in case of failure + id: slack-notification + if: github.event_name == 'schedule' + uses: camunda/infraex-common-config/.github/actions/report-failure-on-slack@fe25bf36edcc03717275a4e05969cdb5483388df # main + with: + vault_addr: ${{ secrets.VAULT_ADDR }} + vault_role_id: ${{ secrets.VAULT_ROLE_ID }} + vault_secret_id: ${{ secrets.VAULT_SECRET_ID }} diff --git a/.lint/terraform_docs/.terraform-docs.yml b/.lint/terraform_docs/.terraform-docs.yml index edd51d8f..7cfe2d36 100644 --- a/.lint/terraform_docs/.terraform-docs.yml +++ b/.lint/terraform_docs/.terraform-docs.yml @@ -1,42 +1,42 @@ --- # Configuration documentation https://terraform-docs.io/user-guide/configuration/ -formatter: "markdown table" # Required -version: "" # version is managed by asdf +formatter: markdown table # Required +version: '' # version is managed by asdf header-from: main.tf -footer-from: "" +footer-from: '' recursive: - enabled: false + enabled: false content: |- - {{ .Modules }} - {{ .Resources }} - {{ .Inputs }} - {{ .Outputs }} + {{ .Modules }} + {{ .Resources }} + {{ .Inputs }} + {{ .Outputs }} sections: - hide: [] - show: [] + hide: [] + show: [] output-values: - enabled: false - from: "" + enabled: false + from: '' sort: - enabled: true - by: name + enabled: true + by: name settings: - anchor: true - color: true - default: true - description: true - escape: true - hide-empty: false - html: true - indent: 2 - lockfile: false - read-comments: true - required: true - sensitive: true - type: true + anchor: true + color: true + default: true + description: true + escape: true + hide-empty: false + html: true + indent: 2 + lockfile: false + read-comments: true + required: true + sensitive: true + type: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e4679496..0b886fc9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,55 +3,59 @@ # See https://pre-commit.com/hooks.html for more hooks repos: -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 - hooks: - - id: check-added-large-files - - id: end-of-file-fixer - - id: trailing-whitespace - - id: check-yaml - args: [--allow-multiple-documents] - - id: check-json - - id: check-symlinks - - id: check-shebang-scripts-are-executable - - id: detect-private-key + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.6.0 + hooks: + - id: check-added-large-files + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-yaml + args: [--allow-multiple-documents] + - id: check-json + - id: check-symlinks + - id: check-shebang-scripts-are-executable + - id: detect-private-key -- repo: https://github.com/rhysd/actionlint - rev: v1.7.1 - hooks: - - id: actionlint-docker + - repo: https://github.com/rhysd/actionlint + rev: v1.7.1 + hooks: + - id: actionlint-docker -- repo: https://github.com/renovatebot/pre-commit-hooks - rev: 38.72.1 - hooks: - - id: renovate-config-validator - args: ["--strict"] + - repo: https://github.com/renovatebot/pre-commit-hooks + rev: 38.72.1 + hooks: + - id: renovate-config-validator + args: [--strict] -- repo: https://github.com/compilerla/conventional-pre-commit - rev: v3.4.0 # use tags until renovate supports sha: https://github.com/renovatebot/renovate/issues/22567 - hooks: - - id: conventional-pre-commit - stages: [commit-msg] - args: ["--strict" , "--force-scope"] + - repo: https://github.com/compilerla/conventional-pre-commit + rev: v3.4.0 # use tags until renovate supports sha: https://github.com/renovatebot/renovate/issues/22567 + hooks: + - id: conventional-pre-commit + stages: [commit-msg] + args: [--strict, --force-scope] -- repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.94.1 - hooks: - - id: terraform_fmt - - id: terraform_tflint - args: - - --args=--config=__GIT_WORKING_DIR__/.lint/tflint/.tflint.hcl - - id: terraform_docs - args: - - --hook-config=--path-to-file=README.md - - --hook-config=--create-file-if-not-exist=true - - --hook-config=--add-to-existing-file=true - - --args=--config=.lint/terraform_docs/.terraform-docs.yml + - repo: https://github.com/antonbabenko/pre-commit-terraform + rev: v1.94.1 + hooks: + - id: terraform_fmt + - id: terraform_tflint + args: + - --args=--config=__GIT_WORKING_DIR__/.lint/tflint/.tflint.hcl + - id: terraform_docs + args: + - --hook-config=--path-to-file=README.md + - --hook-config=--create-file-if-not-exist=true + - --hook-config=--add-to-existing-file=true + - --args=--config=.lint/terraform_docs/.terraform-docs.yml -- repo: https://github.com/dnephin/pre-commit-golang - rev: v0.5.1 - hooks: - - id: go-fmt - - id: no-go-testing - - id: go-mod-tidy -... + - repo: https://github.com/dnephin/pre-commit-golang + rev: v0.5.1 + hooks: + - id: go-fmt + - id: no-go-testing + - id: go-mod-tidy + + - repo: https://github.com/jumanjihouse/pre-commit-hook-yamlfmt + rev: 0.2.3 + hooks: + - id: yamlfmt diff --git a/modules/fixtures/postgres-client-irsa.yml b/modules/fixtures/postgres-client-irsa.yml index 5fabb4bc..c5aed9c1 100644 --- a/modules/fixtures/postgres-client-irsa.yml +++ b/modules/fixtures/postgres-client-irsa.yml @@ -1,76 +1,77 @@ +--- # this manifest contains a version with the IRSA connection check, it is currently listed as a TODO # it may be implemented or dropped depending on if it's relevant or not to test IRSA connection for the db apiVersion: batch/v1 kind: Job metadata: - name: postgres-client - labels: - app: postgres-client + name: postgres-client + labels: + app: postgres-client spec: - backoffLimit: 0 - template: - spec: - restartPolicy: Never - containers: - - name: postgres-client - image: ubuntu:latest - command: - - sh - - "-c" - - | - /bin/bash <<'EOF' - set -o pipefail && \ - apt-get update && \ - apt-get install -y python3 python3-pip build-essential postgresql-client && \ - echo "Creating IRSA db user" && \ - mkdir -p /tmp/scripts && cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh && \ - echo "Testing connection using IRSA" && \ - python3 -m pip install awscli && \ - AWS_PG_PASSWORD=$(aws rds generate-db-auth-token --hostname $AURORA_ENDPOINT --port $AURORA_PORT --region $AWS_REGION --username $AURORA_USERNAME_IRSA) && \ - psql -h $AURORA_ENDPOINT -p $AURORA_PORT "dbname=$AURORA_DB_NAME user=$AURORA_USERNAME_IRSA password=$AWS_PG_PASSWORD" -c 'SELECT version();' + backoffLimit: 0 + template: + spec: + restartPolicy: Never + containers: + - name: postgres-client + image: ubuntu:latest + command: + - sh + - -c + - | + /bin/bash <<'EOF' + set -o pipefail && \ + apt-get update && \ + apt-get install -y python3 python3-pip build-essential postgresql-client && \ + echo "Creating IRSA db user" && \ + mkdir -p /tmp/scripts && cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh && \ + echo "Testing connection using IRSA" && \ + python3 -m pip install awscli && \ + AWS_PG_PASSWORD=$(aws rds generate-db-auth-token --hostname $AURORA_ENDPOINT --port $AURORA_PORT --region $AWS_REGION --username $AURORA_USERNAME_IRSA) && \ + psql -h $AURORA_ENDPOINT -p $AURORA_PORT "dbname=$AURORA_DB_NAME user=$AURORA_USERNAME_IRSA password=$AWS_PG_PASSWORD" -c 'SELECT version();' - EOF - volumeMounts: - - name: scripts - mountPath: /scripts - readOnly: true - env: - - name: AURORA_ENDPOINT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_endpoint - - name: AURORA_USERNAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username - - name: AURORA_USERNAME_IRSA - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username_irsa - - name: AURORA_PASSWORD - valueFrom: - secretKeyRef: - name: aurora-secret - key: aurora_password - - name: AURORA_PORT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_port - - name: AWS_REGION - valueFrom: - configMapKeyRef: - name: aurora-config - key: aws_region - - name: AURORA_DB_NAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_db_name - volumes: - - name: scripts - configMap: - name: postgres-scripts + EOF + volumeMounts: + - name: scripts + mountPath: /scripts + readOnly: true + env: + - name: AURORA_ENDPOINT + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_endpoint + - name: AURORA_USERNAME + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_username + - name: AURORA_USERNAME_IRSA + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_username_irsa + - name: AURORA_PASSWORD + valueFrom: + secretKeyRef: + name: aurora-secret + key: aurora_password + - name: AURORA_PORT + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_port + - name: AWS_REGION + valueFrom: + configMapKeyRef: + name: aurora-config + key: aws_region + - name: AURORA_DB_NAME + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_db_name + volumes: + - name: scripts + configMap: + name: postgres-scripts diff --git a/modules/fixtures/postgres-client.yml b/modules/fixtures/postgres-client.yml index 95edbb70..eb809559 100644 --- a/modules/fixtures/postgres-client.yml +++ b/modules/fixtures/postgres-client.yml @@ -1,70 +1,71 @@ +--- apiVersion: batch/v1 kind: Job metadata: - name: postgres-client - labels: - app: postgres-client + name: postgres-client + labels: + app: postgres-client spec: - backoffLimit: 0 - template: - spec: - restartPolicy: Never - containers: - - name: postgres-client - image: ubuntu:latest - command: - - sh - - "-c" - - | - /bin/bash <<'EOF' - set -o pipefail && \ - apt-get update && \ - apt-get install -y python3 python3-pip build-essential postgresql-client && \ - echo "Creating IRSA db user" && \ - mkdir -p /tmp/scripts && cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh + backoffLimit: 0 + template: + spec: + restartPolicy: Never + containers: + - name: postgres-client + image: ubuntu:latest + command: + - sh + - -c + - | + /bin/bash <<'EOF' + set -o pipefail && \ + apt-get update && \ + apt-get install -y python3 python3-pip build-essential postgresql-client && \ + echo "Creating IRSA db user" && \ + mkdir -p /tmp/scripts && cp /scripts/create_aurora_pg_db.sh /tmp/scripts/create_aurora_pg_db.sh && chmod +x /tmp/scripts/create_aurora_pg_db.sh && /tmp/scripts/create_aurora_pg_db.sh - EOF - volumeMounts: - - name: scripts - mountPath: /scripts - readOnly: true - env: - - name: AURORA_ENDPOINT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_endpoint - - name: AURORA_USERNAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username - - name: AURORA_USERNAME_IRSA - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_username_irsa - - name: AURORA_PASSWORD - valueFrom: - secretKeyRef: - name: aurora-secret - key: aurora_password - - name: AURORA_PORT - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_port - - name: AWS_REGION - valueFrom: - configMapKeyRef: - name: aurora-config - key: aws_region - - name: AURORA_DB_NAME - valueFrom: - configMapKeyRef: - name: aurora-config - key: aurora_db_name - volumes: - - name: scripts - configMap: - name: postgres-scripts + EOF + volumeMounts: + - name: scripts + mountPath: /scripts + readOnly: true + env: + - name: AURORA_ENDPOINT + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_endpoint + - name: AURORA_USERNAME + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_username + - name: AURORA_USERNAME_IRSA + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_username_irsa + - name: AURORA_PASSWORD + valueFrom: + secretKeyRef: + name: aurora-secret + key: aurora_password + - name: AURORA_PORT + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_port + - name: AWS_REGION + valueFrom: + configMapKeyRef: + name: aurora-config + key: aws_region + - name: AURORA_DB_NAME + valueFrom: + configMapKeyRef: + name: aurora-config + key: aurora_db_name + volumes: + - name: scripts + configMap: + name: postgres-scripts diff --git a/modules/fixtures/whoami-deployment.yml b/modules/fixtures/whoami-deployment.yml index 2eb1abf7..89a065c8 100644 --- a/modules/fixtures/whoami-deployment.yml +++ b/modules/fixtures/whoami-deployment.yml @@ -2,40 +2,41 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: whoami - namespace: example + name: whoami + namespace: example spec: - replicas: 1 - selector: - matchLabels: - app: whoami - template: - metadata: - labels: - app: whoami - spec: - containers: - - name: whoami - image: containous/whoami - ports: - - containerPort: 80 - readinessProbe: - httpGet: - path: /health - port: 80 - initialDelaySeconds: 10 - periodSeconds: 5 + replicas: 1 + selector: + matchLabels: + app: whoami + template: + metadata: + labels: + app: whoami + spec: + containers: + - name: whoami + image: containous/whoami + ports: + - containerPort: 80 + readinessProbe: + httpGet: + path: /health + port: 80 + initialDelaySeconds: 10 + periodSeconds: 5 --- apiVersion: v1 kind: Service metadata: - name: whoami-service - namespace: example + name: whoami-service + namespace: example spec: - selector: - app: whoami - ports: - - protocol: TCP - port: 80 - targetPort: 80 + selector: + app: whoami + ports: + - protocol: TCP + port: 80 + targetPort: 80 --- +... From ece9e089880813be61fcbb8da7f24b563e0417a4 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:32:53 +0200 Subject: [PATCH 41/51] use tf-state-key --- .github/actions/aurora-manage-cluster/action.yml | 3 +-- .github/actions/eks-manage-cluster/action.yml | 3 +-- .github/actions/utility-action/action.yml | 6 +++--- .github/workflows/test-gha-eks.yml | 6 +++--- 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 9343e94f..32474df2 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -140,8 +140,7 @@ runs: s3-backend-bucket: ${{ inputs.s3-backend-bucket }} s3-bucket-region: ${{ inputs.s3-bucket-region }} - name: ${{ inputs.cluster-name }} - module-name: aurora + tf-state-key: terraform/${{ inputs.cluster-name }}/gha/aurora/terraform.tfstate tf-cli-config-credentials-hostname: ${{ inputs.tf-cli-config-credentials-hostname }} tf-cli-config-credentials-token: ${{ inputs.tf-cli-config-credentials-token }} diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index ea37d2c1..ade280c4 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -135,8 +135,7 @@ runs: s3-backend-bucket: ${{ inputs.s3-backend-bucket }} s3-bucket-region: ${{ inputs.s3-bucket-region }} - name: ${{ inputs.cluster-name }} - module-name: eks-cluster + tf-state-key: terraform/${{ inputs.cluster-name }}/gha/eks-cluster/terraform.tfstate tf-cli-config-credentials-hostname: ${{ inputs.tf-cli-config-credentials-hostname }} tf-cli-config-credentials-token: ${{ inputs.tf-cli-config-credentials-token }} diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index 04963050..218e623c 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -30,8 +30,8 @@ inputs: description: AWS region to use for S3 bucket operations required: true - name: - description: Name of resource instance (e.g., uid) + tf-state-key: + description: 'Key use to store the tfstate file (e.g.: /tfstates/terraform.tfstate)' required: true module-name: @@ -95,7 +95,7 @@ runs: id: set-terraform-variables run: | export TFSTATE_BUCKET="${{ inputs.s3-backend-bucket }}" - export TFSTATE_KEY="terraform/${{ inputs.name }}/gha/${{ inputs.module-name }}/terraform.tfstate" + export TFSTATE_KEY="${{ inputs.tf-state-key }}" if [ -z "${{ inputs.s3-bucket-region }}" ]; then export TFSTATE_REGION="${{ inputs.aws-region }}" diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 31ce6ba9..058a3a5d 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -116,7 +116,7 @@ jobs: aws configure set region ${{ env.AWS_REGION }} --profile ${{ env.AWS_PROFILE }} - name: Create EKS Cluster - timeout-minutes: 125 + timeout-minutes: 45 uses: ./.github/actions/eks-manage-cluster id: create_eks_cluster with: @@ -142,7 +142,7 @@ jobs: echo "availability_zones=$availability_zones" | tee -a "$GITHUB_OUTPUT" - name: Create Aurora Cluster - timeout-minutes: 125 + timeout-minutes: 20 uses: ./.github/actions/aurora-manage-cluster id: create_aurora_cluster if: env.CREATE_DB == 'true' @@ -162,7 +162,7 @@ jobs: availability-zones: ${{ steps.prepare_aurora_cluster.outputs.availability_zones }} - name: Delete Clusters - timeout-minutes: 125 + timeout-minutes: 60 if: always() && !(github.event_name == 'workflow_dispatch' && inputs.delete_cluster == 'false') uses: ./.github/actions/eks-cleanup-resources with: From cf649ffba212ec5a47cd3f54ce0939f7ff2b7347 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:34:02 +0200 Subject: [PATCH 42/51] auto generate action readmes --- .../actions/aurora-manage-cluster/README.md | 206 ++++++++++----- .../actions/aurora-manage-cluster/action.yml | 47 +--- .../actions/eks-cleanup-resources/README.md | 82 +++--- .../actions/eks-cleanup-resources/action.yml | 1 + .github/actions/eks-manage-cluster/README.md | 244 +++++++++++++----- .github/actions/eks-manage-cluster/action.yml | 1 + .github/actions/utility-action/README.md | 106 ++++++++ .github/actions/utility-action/action.yml | 1 + .pre-commit-config.yaml | 10 + .pre-commit/update_action_readmes.sh | 19 ++ 10 files changed, 506 insertions(+), 211 deletions(-) create mode 100644 .github/actions/utility-action/README.md create mode 100755 .pre-commit/update_action_readmes.sh diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index 96d5e578..bb94abd6 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -1,87 +1,151 @@ -# Deploy RDS Aurora Cluster GitHub Action - -This GitHub Action automates the deployment of an Amazon RDS Aurora cluster using Terraform. It installs Terraform and AWS CLI, and outputs the Aurora cluster endpoint along with other relevant details. +# Deploy RDS Aurora Cluster ## Description -The **Deploy RDS Aurora Cluster** action enables you to: +This GitHub Action automates the deployment of an RDS Aurora cluster using Terraform. +This action will also install Terraform and awscli. It will output the Aurora cluster endpoint. -- Automate the deployment of an RDS Aurora cluster on AWS. -- Use Terraform for infrastructure as code. -- Install specific versions of Terraform and AWS CLI. -- Output the Aurora cluster endpoint, Terraform state URL, and all other Terraform outputs dynamically. ## Inputs -The following inputs are required or optional for the action: - -| Input | Description | Required | Default | -|-------|-------------|----------|---------| -| `cluster-name` | Name of the RDS Aurora cluster to deploy. | Yes | - | -| `engine-version` | Version of the Aurora engine to use. | No | see `action.yml` | -| `instance-class` | Instance class for the Aurora cluster. | No | `db.t3.medium` | -| `num-instances` | Number of instances in the Aurora cluster. | No | `1` | -| `username` | Username for the PostgreSQL admin user. | Yes | - | -| `password` | Password for the PostgreSQL admin user. | Yes | - | -| `vpc-id` | VPC ID to create the cluster in. | Yes | - | -| `subnet-ids` | List of subnet IDs to create the cluster in. | Yes | - | -| `cidr-blocks` | CIDR blocks to allow access from and to. | Yes | - | -| `auto-minor-version-upgrade` | If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window. | No | `true` | -| `availability-zones` | Array of availability zones to use for the Aurora cluster. | Yes | - | -| `iam-roles` | Allows propagating additional IAM roles to the Aurora cluster for features like access to S3. | No | `[]` | -| `iam-auth-enabled` | Determines whether IAM authentication should be activated for IRSA usage. | No | `false` | -| `ca-cert-identifier` | Specifies the identifier of the CA certificate for the DB instance. | No | `rds-ca-rsa2048-g1` | -| `default-database-name` | The name for the automatically created database on cluster creation. | No | `camunda` | -| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | Yes | - | -| `s3-bucket-region` | Region of the bucket containing the resources states. | No | Fallbacks to `aws-region` if not set. | -| `tf-modules-revision` | Git revision of the Terraform modules to use. | No | `main` | -| `tf-modules-path` | Path where the Terraform Aurora modules will be cloned. | No | `./.action-tf-modules/aurora/` | -| `tf-cli-config-credentials-hostname` | The hostname of a HCP Terraform/Terraform Enterprise instance for the CLI configuration file. | No | `app.terraform.io` | -| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | - | -| `tf-terraform-version` | The version of Terraform CLI to install. | No | `latest` | -| `tf-terraform-wrapper` | Whether to install a wrapper for the Terraform binary. | No | `true` | -| `awscli-version` | Version of the AWS CLI to use. | No | see `action.yml` | +| name | description | required | default | +| --- | --- | --- | --- | +| `cluster-name` |
Name of the RDS Aurora cluster to deploy
| `true` | `""` | +| `username` |Username for the PostgreSQL admin user
| `true` | `""` | +| `password` |Password for the PostgreSQL admin user
| `true` | `""` | +| `vpc-id` |VPC ID to create the cluster in
| `true` | `""` | +| `subnet-ids` |List of subnet IDs to create the cluster in
| `true` | `""` | +| `cidr-blocks` |CIDR blocks to allow access from and to
| `true` | `""` | +| `availability-zones` |Array of availability zones to use for the Aurora cluster
| `true` | `""` | +| `additional-terraform-vars` |JSON object containing additional Terraform variables
| `false` | `{}` | +| `s3-backend-bucket` |Name of the S3 bucket to store Terraform state
| `true` | `""` | +| `s3-bucket-region` |Region of the bucket containing the resources states
| `false` | `""` | +| `tf-modules-revision` |Git revision of the tf modules to use
| `false` | `main` | +| `tf-modules-path` |Path where the tf Aurora modules will be cloned
| `false` | `./.action-tf-modules/aurora/` | +| `tf-cli-config-credentials-hostname` |The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to app.terraform.io
.
The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.
| `false` | `""` | +| `tf-terraform-version` |The version of Terraform CLI to install. Defaults to latest
.
Whether or not to install a wrapper to wrap subsequent calls of the terraform
binary and expose its STDOUT, STDERR, and exit code as outputs named stdout
, stderr
, and exitcode
respectively. Defaults to true
.
Version of the aws cli to use
| `false` | `2.15.52` | + ## Outputs -The action provides the following outputs: +| name | description | +| --- | --- | +| `aurora-endpoint` |The endpoint of the deployed Aurora cluster
| +| `terraform-state-url` |URL of the Terraform state file in the S3 bucket
| +| `all-terraform-outputs` |All outputs from Terraform
| -| Output | Description | -|--------|-------------| -| `aurora-endpoint` | The endpoint of the deployed Aurora cluster. | -| `terraform-state-url` | URL of the Terraform state file in the S3 bucket. | -| `all-terraform-outputs` | All outputs from Terraform. | -## Usage +## Runs -To use this GitHub Action, include it in your workflow file: +This action is a `composite` action. + +## Usage ```yaml -jobs: - deploy_aurora: - runs-on: ubuntu-latest - steps: - - name: Deploy Aurora Cluster - uses: camunda/camunda-tf-eks-module/aurora-manage-cluster@main - with: - cluster-name: 'my-aurora-cluster' - engine-version: '15.4' - instance-class: 'db.t3.medium' - num-instances: '2' - username: 'admin' - password: ${{ secrets.DB_PASSWORD }} - vpc-id: 'vpc-12345678' - subnet-ids: 'subnet-12345,subnet-67890' - cidr-blocks: '10.0.0.0/16' - auto-minor-version-upgrade: 'true' - availability-zones: '["us-west-2a", "us-west-2b"]' - iam-roles: '["arn:aws:iam::123456789012:role/my-role"]' - iam-auth-enabled: 'false' - ca-cert-identifier: 'rds-ca-rsa2048-g1' - default-database-name: 'mydatabase' - s3-backend-bucket: 'my-terraform-state-bucket' - s3-bucket-region: 'us-west-2' - tf-modules-revision: 'main' - tf-modules-path: './.action-tf-modules/aurora/' - awscli-version: '2.15.52' +- uses: ***PROJECT***@***VERSION*** + with: + cluster-name: + # Name of the RDS Aurora cluster to deploy + # + # Required: true + # Default: "" + + username: + # Username for the PostgreSQL admin user + # + # Required: true + # Default: "" + + password: + # Password for the PostgreSQL admin user + # + # Required: true + # Default: "" + + vpc-id: + # VPC ID to create the cluster in + # + # Required: true + # Default: "" + + subnet-ids: + # List of subnet IDs to create the cluster in + # + # Required: true + # Default: "" + + cidr-blocks: + # CIDR blocks to allow access from and to + # + # Required: true + # Default: "" + + availability-zones: + # Array of availability zones to use for the Aurora cluster + # + # Required: true + # Default: "" + + additional-terraform-vars: + # JSON object containing additional Terraform variables + # + # Required: false + # Default: {} + + s3-backend-bucket: + # Name of the S3 bucket to store Terraform state + # + # Required: true + # Default: "" + + s3-bucket-region: + # Region of the bucket containing the resources states + # + # Required: false + # Default: "" + + tf-modules-revision: + # Git revision of the tf modules to use + # + # Required: false + # Default: main + + tf-modules-path: + # Path where the tf Aurora modules will be cloned + # + # Required: false + # Default: ./.action-tf-modules/aurora/ + + tf-cli-config-credentials-hostname: + # The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`. + # + # Required: false + # Default: app.terraform.io + + tf-cli-config-credentials-token: + # The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. + # + # Required: false + # Default: "" + + tf-terraform-version: + # The version of Terraform CLI to install. Defaults to `latest`. + # + # Required: false + # Default: latest + + tf-terraform-wrapper: + # Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + # + # Required: false + # Default: true + + awscli-version: + # Version of the aws cli to use + # + # Required: false + # Default: 2.15.52 ``` diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 32474df2..9df1cf03 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -10,19 +10,6 @@ inputs: description: Name of the RDS Aurora cluster to deploy required: true - engine-version: - description: Version of the Aurora engine to use - # renovate: datasource=endoflife-date depName=amazon-rds-postgresql versioning=semver - default: '15.4' - - instance-class: - description: Instance class for the Aurora cluster - default: db.t3.medium - - num-instances: - description: Number of instances in the Aurora cluster - default: '1' - username: description: Username for the PostgreSQL admin user required: true @@ -43,29 +30,14 @@ inputs: description: CIDR blocks to allow access from and to required: true - auto-minor-version-upgrade: - description: If true, minor engine upgrades will be applied automatically to the DB instance during the maintenance window - default: 'true' # Default value from variable.tf - availability-zones: description: Array of availability zones to use for the Aurora cluster required: true - iam-roles: - description: Allows propagating additional IAM roles to the Aurora cluster to allow e.g. access to S3 - default: '[]' # Default value from variable.tf - - iam-auth-enabled: - description: Determines whether IAM auth should be activated for IRSA usage - default: 'false' # Default value from variable.tf - - ca-cert-identifier: - description: Specifies the identifier of the CA certificate for the DB instance - default: rds-ca-rsa2048-g1 # Default value from variable.tf - - default-database-name: - description: The name for the automatically created database on cluster creation. - default: camunda # Default value from variable.tf + additional-terraform-vars: + description: JSON object containing additional Terraform variables + required: false + default: '{}' s3-backend-bucket: description: Name of the S3 bucket to store Terraform state @@ -170,20 +142,13 @@ runs: working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | terraform plan -no-color -out aurora.plan -var "cluster_name=${{ inputs.cluster-name }}" \ - -var "engine_version=${{ inputs.engine-version }}" \ - -var "instance_class=${{ inputs.instance-class }}" \ - -var "num_instances=${{ inputs.num-instances }}" \ -var "username=${{ inputs.username }}" \ -var "password=${{ inputs.password }}" \ - -var "auto_minor_version_upgrade=${{ inputs.auto-minor-version-upgrade }}" \ -var 'availability_zones=${{ inputs.availability-zones }}' \ - -var 'iam_roles=${{ inputs.iam-roles }}' \ - -var "iam_auth_enabled=${{ inputs.iam-auth-enabled }}" \ - -var "ca_cert_identifier=${{ inputs.ca-cert-identifier }}" \ - -var "default_database_name=${{ inputs.default-database-name }}" \ -var "vpc_id=${{ inputs.vpc-id }}" \ -var 'subnet_ids=${{ inputs.subnet-ids }}' \ - -var 'cidr_blocks=${{ inputs.cidr-blocks }}' + -var 'cidr_blocks=${{ inputs.cidr-blocks }}' \ + -var-file=<(echo '${{ inputs.additional-terraform-vars }}') - name: Terraform Apply shell: bash diff --git a/.github/actions/eks-cleanup-resources/README.md b/.github/actions/eks-cleanup-resources/README.md index da90c6e0..ba061c5c 100644 --- a/.github/actions/eks-cleanup-resources/README.md +++ b/.github/actions/eks-cleanup-resources/README.md @@ -1,39 +1,57 @@ -# Delete EKS Resources +# Delete EKS resources -This GitHub Action automates the deletion of AWS resources using a shell script. It helps you manage and clean up modules of this repository as resources by specifying a target or deleting resources based on age criteria. +## Description -## Usage - -To use this action, include it in your workflow file (e.g., `.github/workflows/delete-eks-resources.yml`): +This GitHub Action automates the deletion of EKS resources using a shell script. -```yaml -name: Delete EKS Resources - -on: - workflow_dispatch: - -jobs: - cleanup: - runs-on: ubuntu-latest - steps: - - name: Delete EKS resources - uses: camunda/camunda-tf-eks-module/eks-cleanup-resources@main - with: - tf-bucket: 'your-s3-bucket-name' - tf-bucket-region: 'your-region' - max-age-hours: 24 - target: 'all' - temp-dir: './tmp/eks-cleanup/' -``` ## Inputs -The action supports the following input parameters: +| name | description | required | default | +| --- | --- | --- | --- | +| `tf-bucket` |Bucket containing the resources states
| `true` | `""` | +| `tf-bucket-region` |Region of the bucket containing the resources states, if not set, will fallback on AWS_REGION
| `false` | `""` | +| `max-age-hours` |Maximum age of resources in hours
| `false` | `20` | +| `target` |Specify an ID to destroy specific resources or "all" to destroy all resources
| `false` | `all` | +| `temp-dir` |Temporary directory prefix used for storing resource data during processing
| `false` | `./tmp/eks-cleanup/` | + + +## Runs -| Input Name | Description | Required | Default | -|--------------------|-------------------------------------------------------------------------------------------|----------|----------------------------| -| `tf-bucket` | The S3 bucket containing the resources' state files. | Yes | - | -| `tf-bucket-region` | The region of the S3 bucket containing the resources state files. Falls back to `AWS_REGION` if not set. | No | AWS_REGION | -| `max-age-hours` | The maximum age (in hours) for resources to be deleted. | No | "20" | -| `target` | Specifies an ID to destroy specific resources or "all" to destroy all resources. | No | "all" | -| `temp-dir` | Temporary directory prefix used for storing resource data during processing. | No | "./tmp/eks-cleanup/" | +This action is a `composite` action. + +## Usage + +```yaml +- uses: ***PROJECT***@***VERSION*** + with: + tf-bucket: + # Bucket containing the resources states + # + # Required: true + # Default: "" + + tf-bucket-region: + # Region of the bucket containing the resources states, if not set, will fallback on AWS_REGION + # + # Required: false + # Default: "" + + max-age-hours: + # Maximum age of resources in hours + # + # Required: false + # Default: 20 + + target: + # Specify an ID to destroy specific resources or "all" to destroy all resources + # + # Required: false + # Default: all + + temp-dir: + # Temporary directory prefix used for storing resource data during processing + # + # Required: false + # Default: ./tmp/eks-cleanup/ +``` diff --git a/.github/actions/eks-cleanup-resources/action.yml b/.github/actions/eks-cleanup-resources/action.yml index 422033ef..bbfc8700 100644 --- a/.github/actions/eks-cleanup-resources/action.yml +++ b/.github/actions/eks-cleanup-resources/action.yml @@ -4,6 +4,7 @@ name: Delete EKS resources description: | This GitHub Action automates the deletion of EKS resources using a shell script. + inputs: tf-bucket: description: Bucket containing the resources states diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md index 73e1d699..45318d0b 100644 --- a/.github/actions/eks-manage-cluster/README.md +++ b/.github/actions/eks-manage-cluster/README.md @@ -1,76 +1,186 @@ -# Deploy or Destroy EKS Cluster +# Deploy an EKS Cluster -This GitHub Action automates the deployment or destruction of an Amazon Elastic Kubernetes Service (EKS) cluster using Terraform. It also installs necessary tools like Terraform, AWS CLI, and `kubectl`, and sets up the Kubernetes context for the created cluster. +## Description -## Usage +This GitHub Action automates the deployment of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. +This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. -To use this action, add it to your workflow file (e.g., `.github/workflows/eks-deploy.yml`): - -```yaml -name: EKS Cluster Management - -on: - workflow_dispatch: - -jobs: - eks_management: - runs-on: ubuntu-latest - steps: - - name: Deploy or Destroy EKS Cluster - uses: camunda/camunda-tf-eks-module/eks-manage-cluster@main - with: - action: 'create' # or 'destroy' - aws-region: 'us-west-2' - cluster-name: 'my-eks-cluster' - kubernetes-version: '1.30' - cluster-service-ipv4-cidr: '10.190.0.0/16' - cluster-node-ipv4-cidr: '10.192.0.0/16' - np-instance-types: '["t2.medium"]' - np-capacity-type: 'SPOT' - np-node-desired-count: '4' - np-node-min-count: '1' - np-disk-size: '20' - np-ami-type: 'AL2_x86_64' - np-node-max-count: '10' - s3-backend-bucket: 'your-terraform-state-bucket' - s3-bucket-region: 'us-west-2' - tf-modules-revision: 'main' - tf-modules-path: './.action-tf-modules/eks/' - login: 'true' - awscli-version: '2.15.52' -``` ## Inputs -| Input Name | Description | Required | Default | -|-------------------------------------|--------------------------------------------------------------------------------------------------------------|----------|----------------------------------| -| `aws-region` | AWS region where the EKS cluster will be deployed. | Yes | - | -| `cluster-name` | Name of the EKS cluster to deploy. | Yes | - | -| `kubernetes-version` | Version of Kubernetes to use for the EKS cluster. | No | `1.30` | -| `cluster-service-ipv4-cidr` | CIDR block for cluster service IPs. | No | `10.190.0.0/16` | -| `cluster-node-ipv4-cidr` | CIDR block for cluster node IPs. | No | `10.192.0.0/16` | -| `np-instance-types` | List of instance types for the node pool. | No | `["t2.medium"]` | -| `np-capacity-type` | Capacity type for non-production instances (e.g., SPOT). | No | `SPOT` | -| `np-node-desired-count` | Desired number of nodes in the EKS node group. | No | `4` | -| `np-node-min-count` | Minimum number of nodes in the EKS node group. | No | `1` | -| `np-disk-size` | Disk size of the nodes on the default node pool (in GB). | No | `20` | -| `np-ami-type` | Amazon Machine Image type. | No | `AL2_x86_64` | -| `np-node-max-count` | Maximum number of nodes in the EKS node group. | No | `10` | -| `s3-backend-bucket` | Name of the S3 bucket to store Terraform state. | No | - | -| `s3-bucket-region` | Region of the bucket containing the resources states; falls back on `aws-region` if not set. | No | - | -| `tf-modules-revision` | Git revision of the Terraform modules to use. | No | `main` | -| `tf-modules-path` | Path where the Terraform EKS modules will be cloned. | No | `./.action-tf-modules/eks/` | -| `login` | Authenticate the current kube context on the created cluster. | No | `true` | -| `tf-cli-config-credentials-hostname`| The hostname of a HCP Terraform/Terraform Enterprise instance to use for credentials configuration. | No | `app.terraform.io` | -| `tf-cli-config-credentials-token` | The API token for a HCP Terraform/Terraform Enterprise instance. | No | - | -| `tf-terraform-version` | The version of Terraform CLI to install. Accepts full version or constraints like `<1.13.0` or `latest`. | No | `latest` | -| `tf-terraform-wrapper` | Whether or not to install a wrapper for Terraform CLI calls. | No | `true` | -| `awscli-version` | Version of the AWS CLI to install. | No | see `action.yml` | +| name | description | required | default | +| --- | --- | --- | --- | +| `aws-region` |AWS region where the EKS cluster will be deployed
| `true` | `""` | +| `cluster-name` |Name of the EKS cluster to deploy
| `true` | `""` | +| `kubernetes-version` |Version of Kubernetes to use for the EKS cluster
| `false` | `1.30` | +| `cluster-service-ipv4-cidr` |CIDR block for cluster service IPs
| `false` | `10.190.0.0/16` | +| `cluster-node-ipv4-cidr` |CIDR block for cluster node IPs
| `false` | `10.192.0.0/16` | +| `np-instance-types` |List of instance types
| `false` | `["t2.medium"]` | +| `np-capacity-type` |Capacity type for non-production instances (e.g., SPOT)
| `false` | `SPOT` | +| `np-node-desired-count` |Desired number of nodes in the EKS node group
| `false` | `4` | +| `np-node-min-count` |Minimum number of nodes in the EKS node group
| `false` | `1` | +| `np-disk-size` |Disk size of the nodes on the default node pool
| `false` | `20` | +| `np-ami-type` |Amazon Machine Image
| `false` | `AL2_x86_64` | +| `np-node-max-count` |Maximum number of nodes in the EKS node group
| `false` | `10` | +| `s3-backend-bucket` |Name of the S3 bucket to store Terraform state
| `true` | `""` | +| `s3-bucket-region` |Region of the bucket containing the resources states, if not set, will fallback on aws-region
| `false` | `""` | +| `tf-modules-revision` |Git revision of the tf modules to use
| `false` | `main` | +| `tf-modules-path` |Path where the tf EKS modules will be cloned
| `false` | `./.action-tf-modules/eks/` | +| `login` |Authenticate the current kube context on the created cluster
| `false` | `true` | +| `tf-cli-config-credentials-hostname` |The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to app.terraform.io
.
The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file.
| `false` | `""` | +| `tf-terraform-version` |The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for example <1.13.0
) to install the latest version satisfying the constraint. A value of latest
will install the latest version of Terraform CLI. Defaults to latest
.
Whether or not to install a wrapper to wrap subsequent calls of the terraform
binary and expose its STDOUT, STDERR, and exit code as outputs named stdout
, stderr
, and exitcode
respectively. Defaults to true
.
Version of the aws cli to use
| `false` | `2.15.52` | + ## Outputs -| Output Name | Description | -|----------------------------|------------------------------------------------------------------| -| `eks-cluster-endpoint` | The API endpoint of the deployed EKS cluster. | -| `terraform-state-url` | URL of the Terraform state file in the S3 bucket. | -| `all-terraform-outputs` | All outputs from Terraform. | +| name | description | +| --- | --- | +| `eks-cluster-endpoint` |The API endpoint of the deployed EKS cluster
| +| `terraform-state-url` |URL of the Terraform state file in the S3 bucket
| +| `all-terraform-outputs` |All outputs from Terraform
| + + +## Runs + +This action is a `composite` action. + +## Usage + +```yaml +- uses: ***PROJECT***@***VERSION*** + with: + aws-region: + # AWS region where the EKS cluster will be deployed + # + # Required: true + # Default: "" + + cluster-name: + # Name of the EKS cluster to deploy + # + # Required: true + # Default: "" + + kubernetes-version: + # Version of Kubernetes to use for the EKS cluster + # + # Required: false + # Default: 1.30 + + cluster-service-ipv4-cidr: + # CIDR block for cluster service IPs + # + # Required: false + # Default: 10.190.0.0/16 + + cluster-node-ipv4-cidr: + # CIDR block for cluster node IPs + # + # Required: false + # Default: 10.192.0.0/16 + + np-instance-types: + # List of instance types + # + # Required: false + # Default: ["t2.medium"] + + np-capacity-type: + # Capacity type for non-production instances (e.g., SPOT) + # + # Required: false + # Default: SPOT + + np-node-desired-count: + # Desired number of nodes in the EKS node group + # + # Required: false + # Default: 4 + + np-node-min-count: + # Minimum number of nodes in the EKS node group + # + # Required: false + # Default: 1 + + np-disk-size: + # Disk size of the nodes on the default node pool + # + # Required: false + # Default: 20 + + np-ami-type: + # Amazon Machine Image + # + # Required: false + # Default: AL2_x86_64 + + np-node-max-count: + # Maximum number of nodes in the EKS node group + # + # Required: false + # Default: 10 + + s3-backend-bucket: + # Name of the S3 bucket to store Terraform state + # + # Required: true + # Default: "" + + s3-bucket-region: + # Region of the bucket containing the resources states, if not set, will fallback on aws-region + # + # Required: false + # Default: "" + + tf-modules-revision: + # Git revision of the tf modules to use + # + # Required: false + # Default: main + + tf-modules-path: + # Path where the tf EKS modules will be cloned + # + # Required: false + # Default: ./.action-tf-modules/eks/ + + login: + # Authenticate the current kube context on the created cluster + # + # Required: false + # Default: true + + tf-cli-config-credentials-hostname: + # The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. Defaults to `app.terraform.io`. + # + # Required: false + # Default: app.terraform.io + + tf-cli-config-credentials-token: + # The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file. + # + # Required: false + # Default: "" + + tf-terraform-version: + # The version of Terraform CLI to install. Instead of full version string you can also specify constraint string starting with "<" (for example `<1.13.0`) to install the latest version satisfying the constraint. A value of `latest` will install the latest version of Terraform CLI. Defaults to `latest`. + # + # Required: false + # Default: latest + + tf-terraform-wrapper: + # Whether or not to install a wrapper to wrap subsequent calls of the `terraform` binary and expose its STDOUT, STDERR, and exit code as outputs named `stdout`, `stderr`, and `exitcode` respectively. Defaults to `true`. + # + # Required: false + # Default: true + + awscli-version: + # Version of the aws cli to use + # + # Required: false + # Default: 2.15.52 +``` diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index ade280c4..f484d35b 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -5,6 +5,7 @@ description: | This GitHub Action automates the deployment of an EKS (Amazon Elastic Kubernetes Service) cluster using Terraform. This action will also install Terraform, awscli, and kubectl. The kube context will be set on the created cluster. + inputs: aws-region: description: AWS region where the EKS cluster will be deployed diff --git a/.github/actions/utility-action/README.md b/.github/actions/utility-action/README.md new file mode 100644 index 00000000..aab27ddb --- /dev/null +++ b/.github/actions/utility-action/README.md @@ -0,0 +1,106 @@ +# Utility Actions + +## Description + +A set of utility steps to be used across different workflows, including: +- Installing Terraform +- Installing AWS CLI +- Setting Terraform variables +- Checking/Creating an S3 bucket + + +## Inputs + +| name | description | required | default | +| --- | --- | --- | --- | +| `awscli-version` |Version of the AWS CLI to install
| `false` | `2.15.52` | +| `terraform-version` |Version of Terraform to install
| `false` | `latest` | +| `s3-backend-bucket` |Name of the S3 bucket to store Terraform state
| `true` | `""` | +| `s3-bucket-region` |Region of the bucket containing the resources states, if not set, will fallback on aws-region
| `false` | `""` | +| `aws-region` |AWS region to use for S3 bucket operations
| `true` | `""` | +| `tf-state-key` |Key use to store the tfstate file (e.g.: /tfstates/terraform.tfstate)
| `true` | `""` | +| `module-name` |Name of the Terraform module (e.g., eks-cluster, aurora)
| `true` | `""` | +| `tf-cli-config-credentials-hostname` |The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file
| `false` | `app.terraform.io` | +| `tf-cli-config-credentials-token` |The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file
| `false` | `""` | +| `tf-terraform-wrapper` |Whether or not to install a wrapper for Terraform CLI
| `false` | `true` | + + +## Outputs + +| name | description | +| --- | --- | +| `terraform-state-url` |URL of the Terraform state file in the S3 bucket
| +| `TFSTATE_BUCKET` |S3 bucket name for Terraform state
| +| `TFSTATE_REGION` |Region of the S3 bucket for Terraform state
| +| `TFSTATE_KEY` |Key of the Terraform state file in the S3 bucket
| + + +## Runs + +This action is a `composite` action. + +## Usage + +```yaml +- uses: ***PROJECT***@***VERSION*** + with: + awscli-version: + # Version of the AWS CLI to install + # + # Required: false + # Default: 2.15.52 + + terraform-version: + # Version of Terraform to install + # + # Required: false + # Default: latest + + s3-backend-bucket: + # Name of the S3 bucket to store Terraform state + # + # Required: true + # Default: "" + + s3-bucket-region: + # Region of the bucket containing the resources states, if not set, will fallback on aws-region + # + # Required: false + # Default: "" + + aws-region: + # AWS region to use for S3 bucket operations + # + # Required: true + # Default: "" + + tf-state-key: + # Key use to store the tfstate file (e.g.: /tfstates/terraform.tfstate) + # + # Required: true + # Default: "" + + module-name: + # Name of the Terraform module (e.g., eks-cluster, aurora) + # + # Required: true + # Default: "" + + tf-cli-config-credentials-hostname: + # The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file + # + # Required: false + # Default: app.terraform.io + + tf-cli-config-credentials-token: + # The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file + # + # Required: false + # Default: "" + + tf-terraform-wrapper: + # Whether or not to install a wrapper for Terraform CLI + # + # Required: false + # Default: true +``` diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index 218e623c..363641dd 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -8,6 +8,7 @@ description: | - Setting Terraform variables - Checking/Creating an S3 bucket + inputs: awscli-version: description: Version of the AWS CLI to install diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0b886fc9..eded4083 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,6 +2,16 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: + - repo: local + hooks: + # TODO: extract this pre-commit in common config + - id: update-action-readmes + name: Update GitHub Action READMEs + entry: bash .pre-commit/update_action_readmes.sh + language: system + # Only runs when action files are modified + files: ^\.github/actions/.+\.(yml|yaml)$ + pass_filenames: false # ensure single run - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 diff --git a/.pre-commit/update_action_readmes.sh b/.pre-commit/update_action_readmes.sh new file mode 100755 index 00000000..5a793ee6 --- /dev/null +++ b/.pre-commit/update_action_readmes.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +# Run a single Docker container to handle the README.md updates +docker run --rm \ + -v "$PWD":/workspace \ + -w /workspace \ + node:22 \ + bash -c ' + npm install -g action-docs + find .github/actions -name "*.yml" -o -name "*.yaml" | while read -r action_file; do + action_dir=$(dirname "$action_file") + echo "Updating README.md in $action_dir" + rm -f "$action_dir/README.md" + action-docs -t 1 --no-banner -n -s "$action_file" > "$action_dir/README.md.tmp" + # Ensure that only a single empty line is left at the end of the file + sed -e :a -e "/^\n*\$/{\$d;N;};/\n\$/ba" "$action_dir/README.md.tmp" > "$action_dir/README.md" + rm -f "$action_dir/README.md.tmp" + done + ' From db88d390dbf0ac8122563c708732f989f83aece7 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:43:35 +0200 Subject: [PATCH 43/51] use additional variables for non default on eks --- .github/actions/eks-manage-cluster/README.md | 71 ++----------------- .github/actions/eks-manage-cluster/action.yml | 55 ++------------ .github/workflows/test-gha-eks.yml | 3 + .pre-commit-config.yaml | 4 +- ...mes.sh => docker_update_action_readmes.sh} | 0 modules/eks-cluster/README.md | 4 +- modules/eks-cluster/variables.tf | 3 +- 7 files changed, 18 insertions(+), 122 deletions(-) rename .pre-commit/{update_action_readmes.sh => docker_update_action_readmes.sh} (100%) diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md index 45318d0b..4871b2c0 100644 --- a/.github/actions/eks-manage-cluster/README.md +++ b/.github/actions/eks-manage-cluster/README.md @@ -12,16 +12,7 @@ This action will also install Terraform, awscli, and kubectl. The kube context w | --- | --- | --- | --- | | `aws-region` |AWS region where the EKS cluster will be deployed
| `true` | `""` | | `cluster-name` |Name of the EKS cluster to deploy
| `true` | `""` | -| `kubernetes-version` |Version of Kubernetes to use for the EKS cluster
| `false` | `1.30` | -| `cluster-service-ipv4-cidr` |CIDR block for cluster service IPs
| `false` | `10.190.0.0/16` | -| `cluster-node-ipv4-cidr` |CIDR block for cluster node IPs
| `false` | `10.192.0.0/16` | -| `np-instance-types` |List of instance types
| `false` | `["t2.medium"]` | -| `np-capacity-type` |Capacity type for non-production instances (e.g., SPOT)
| `false` | `SPOT` | -| `np-node-desired-count` |Desired number of nodes in the EKS node group
| `false` | `4` | -| `np-node-min-count` |Minimum number of nodes in the EKS node group
| `false` | `1` | -| `np-disk-size` |Disk size of the nodes on the default node pool
| `false` | `20` | -| `np-ami-type` |Amazon Machine Image
| `false` | `AL2_x86_64` | -| `np-node-max-count` |Maximum number of nodes in the EKS node group
| `false` | `10` | +| `additional-terraform-vars` |JSON object containing additional Terraform variables
| `false` | `{}` | | `s3-backend-bucket` |Name of the S3 bucket to store Terraform state
| `true` | `""` | | `s3-bucket-region` |Region of the bucket containing the resources states, if not set, will fallback on aws-region
| `false` | `""` | | `tf-modules-revision` |Git revision of the tf modules to use
| `false` | `main` | @@ -64,65 +55,11 @@ This action is a `composite` action. # Required: true # Default: "" - kubernetes-version: - # Version of Kubernetes to use for the EKS cluster + additional-terraform-vars: + # JSON object containing additional Terraform variables # # Required: false - # Default: 1.30 - - cluster-service-ipv4-cidr: - # CIDR block for cluster service IPs - # - # Required: false - # Default: 10.190.0.0/16 - - cluster-node-ipv4-cidr: - # CIDR block for cluster node IPs - # - # Required: false - # Default: 10.192.0.0/16 - - np-instance-types: - # List of instance types - # - # Required: false - # Default: ["t2.medium"] - - np-capacity-type: - # Capacity type for non-production instances (e.g., SPOT) - # - # Required: false - # Default: SPOT - - np-node-desired-count: - # Desired number of nodes in the EKS node group - # - # Required: false - # Default: 4 - - np-node-min-count: - # Minimum number of nodes in the EKS node group - # - # Required: false - # Default: 1 - - np-disk-size: - # Disk size of the nodes on the default node pool - # - # Required: false - # Default: 20 - - np-ami-type: - # Amazon Machine Image - # - # Required: false - # Default: AL2_x86_64 - - np-node-max-count: - # Maximum number of nodes in the EKS node group - # - # Required: false - # Default: 10 + # Default: {} s3-backend-bucket: # Name of the S3 bucket to store Terraform state diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index f484d35b..559a1ba3 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -15,46 +15,10 @@ inputs: description: Name of the EKS cluster to deploy required: true - kubernetes-version: - description: Version of Kubernetes to use for the EKS cluster - # renovate: datasource=endoflife-date depName=amazon-eks versioning=semver - default: '1.30' - - cluster-service-ipv4-cidr: - description: CIDR block for cluster service IPs - default: 10.190.0.0/16 - - cluster-node-ipv4-cidr: - description: CIDR block for cluster node IPs - default: 10.192.0.0/16 - - np-instance-types: - description: List of instance types - default: '["t2.medium"]' - - np-capacity-type: - description: Capacity type for non-production instances (e.g., SPOT) - default: SPOT - - np-node-desired-count: - description: Desired number of nodes in the EKS node group - default: '4' - - np-node-min-count: - description: Minimum number of nodes in the EKS node group - default: '1' - - np-disk-size: - description: Disk size of the nodes on the default node pool - default: '20' - - np-ami-type: - description: Amazon Machine Image - default: AL2_x86_64 - - np-node-max-count: - description: Maximum number of nodes in the EKS node group - default: '10' + additional-terraform-vars: + description: JSON object containing additional Terraform variables + required: false + default: '{}' s3-backend-bucket: description: Name of the S3 bucket to store Terraform state @@ -167,17 +131,8 @@ runs: run: | terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ -var "region=${{ inputs.aws-region }}" \ - -var "kubernetes_version=${{ inputs.kubernetes-version }}" \ -var "name=${{ inputs.cluster-name }}" \ - -var "np_desired_node_count=${{ inputs.np-node-desired-count }}" \ - -var "np_min_node_count=${{ inputs.np-node-min-count }}" \ - -var "np_max_node_count=${{ inputs.np-node-max-count }}" \ - -var "np_disk_size=${{ inputs.np-disk-size }}" \ - -var "np_ami_type=${{ inputs.np-ami-type }}" \ - -var "cluster_service_ipv4_cidr=${{ inputs.cluster-service-ipv4-cidr }}" \ - -var "cluster_node_ipv4_cidr=${{ inputs.cluster-node-ipv4-cidr }}" \ - -var 'np_instance_types=${{ inputs.np-instance-types }}' \ - -var "np_capacity_type=${{ inputs.np-capacity-type }}" + -var-file=<(echo '${{ inputs.additional-terraform-vars }}') - name: Terraform Apply shell: bash diff --git a/.github/workflows/test-gha-eks.yml b/.github/workflows/test-gha-eks.yml index 058a3a5d..59303d48 100644 --- a/.github/workflows/test-gha-eks.yml +++ b/.github/workflows/test-gha-eks.yml @@ -122,6 +122,9 @@ jobs: with: cluster-name: ${{ steps.commit_info.outputs.cluster_name }} aws-region: ${{ env.AWS_REGION }} + + additional-terraform-vars: '{"np_capacity_type": "SPOT", "np_instance_types": ["t2.medium"]}' + s3-backend-bucket: ${{ env.TF_STATE_BUCKET }} s3-bucket-region: ${{ env.TF_STATE_BUCKET_REGION }} tf-modules-revision: ${{ steps.commit_info.outputs.tf_modules_revision }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index eded4083..50972bf7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,9 +5,9 @@ repos: - repo: local hooks: # TODO: extract this pre-commit in common config - - id: update-action-readmes + - id: update-action-readmes-docker name: Update GitHub Action READMEs - entry: bash .pre-commit/update_action_readmes.sh + entry: bash .pre-commit/docker_update_action_readmes.sh language: system # Only runs when action files are modified files: ^\.github/actions/.+\.(yml|yaml)$ diff --git a/.pre-commit/update_action_readmes.sh b/.pre-commit/docker_update_action_readmes.sh similarity index 100% rename from .pre-commit/update_action_readmes.sh rename to .pre-commit/docker_update_action_readmes.sh diff --git a/modules/eks-cluster/README.md b/modules/eks-cluster/README.md index 32d4fd17..90bda043 100644 --- a/modules/eks-cluster/README.md +++ b/modules/eks-cluster/README.md @@ -51,8 +51,8 @@ module "eks_cluster" { |------|-------------|------|---------|:--------:| | [access\_entries](#input\_access\_entries) | Map of access entries to add to the cluster. | `any` | `{}` | no | | [authentication\_mode](#input\_authentication\_mode) | The authentication mode for the cluster. | `string` | `"API"` | no | -| [cluster\_node\_ipv4\_cidr](#input\_cluster\_node\_ipv4\_cidr) | The CIDR block for public and private subnets of loadbalancers and nodes. Between /28 and /16. | `string` | n/a | yes | -| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. Between /24 and /12. | `string` | n/a | yes | +| [cluster\_node\_ipv4\_cidr](#input\_cluster\_node\_ipv4\_cidr) | The CIDR block for public and private subnets of loadbalancers and nodes. Between /28 and /16. | `string` | `"10.192.0.0/16"` | no | +| [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. Between /24 and /12. | `string` | `"10.190.0.0/16"` | no | | [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry. | `bool` | `true` | no | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to be used by EKS | `string` | `"1.30"` | no | | [name](#input\_name) | Name being used for relevant resources - including EKS cluster name | `string` | n/a | yes | diff --git a/modules/eks-cluster/variables.tf b/modules/eks-cluster/variables.tf index d6ca5d04..5c23ab90 100644 --- a/modules/eks-cluster/variables.tf +++ b/modules/eks-cluster/variables.tf @@ -1,5 +1,4 @@ # ! Developer: if you are adding a variable without a default value, please ensure to reference it in the cleanup script (.github/actions/eks-cleanup-resources/scripts/destroy.sh) -# and also in the manage gha variable "region" { type = string @@ -63,11 +62,13 @@ variable "np_capacity_type" { variable "cluster_service_ipv4_cidr" { description = "The CIDR block to assign Kubernetes service IP addresses from. Between /24 and /12." type = string + default = "10.190.0.0/16" } variable "cluster_node_ipv4_cidr" { description = "The CIDR block for public and private subnets of loadbalancers and nodes. Between /28 and /16." type = string + default = "10.192.0.0/16" } variable "authentication_mode" { From 8db575cf95ce3ece3563b0c4980a12718c9b98f3 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 22:55:31 +0200 Subject: [PATCH 44/51] remove outdate parameter --- .github/actions/utility-action/README.md | 7 ------- .github/actions/utility-action/action.yml | 6 ++---- 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/actions/utility-action/README.md b/.github/actions/utility-action/README.md index aab27ddb..66a24402 100644 --- a/.github/actions/utility-action/README.md +++ b/.github/actions/utility-action/README.md @@ -19,7 +19,6 @@ A set of utility steps to be used across different workflows, including: | `s3-bucket-region` |Region of the bucket containing the resources states, if not set, will fallback on aws-region
| `false` | `""` | | `aws-region` |AWS region to use for S3 bucket operations
| `true` | `""` | | `tf-state-key` |Key use to store the tfstate file (e.g.: /tfstates/terraform.tfstate)
| `true` | `""` | -| `module-name` |Name of the Terraform module (e.g., eks-cluster, aurora)
| `true` | `""` | | `tf-cli-config-credentials-hostname` |The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file
| `false` | `app.terraform.io` | | `tf-cli-config-credentials-token` |The API token for a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file
| `false` | `""` | | `tf-terraform-wrapper` |Whether or not to install a wrapper for Terraform CLI
| `false` | `true` | @@ -80,12 +79,6 @@ This action is a `composite` action. # Required: true # Default: "" - module-name: - # Name of the Terraform module (e.g., eks-cluster, aurora) - # - # Required: true - # Default: "" - tf-cli-config-credentials-hostname: # The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file # diff --git a/.github/actions/utility-action/action.yml b/.github/actions/utility-action/action.yml index 363641dd..c85de862 100644 --- a/.github/actions/utility-action/action.yml +++ b/.github/actions/utility-action/action.yml @@ -1,6 +1,8 @@ --- name: Utility Actions +# TODO: this action should be extracted and made available as a global reusable action (see https://github.com/camunda/camunda-tf-eks-module/pull/112#discussion_r1761274718) + description: | A set of utility steps to be used across different workflows, including: - Installing Terraform @@ -35,10 +37,6 @@ inputs: description: 'Key use to store the tfstate file (e.g.: /tfstates/terraform.tfstate)' required: true - module-name: - description: Name of the Terraform module (e.g., eks-cluster, aurora) - required: true - tf-cli-config-credentials-hostname: description: The hostname of a HCP Terraform/Terraform Enterprise instance to place within the credentials block of the Terraform CLI configuration file From 568a642983bf380f070bc8c357328e0f11e124fd Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:04:15 +0200 Subject: [PATCH 45/51] update pre-commits --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 50972bf7..b4b71206 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,7 +32,7 @@ repos: - id: actionlint-docker - repo: https://github.com/renovatebot/pre-commit-hooks - rev: 38.72.1 + rev: 38.80.0 hooks: - id: renovate-config-validator args: [--strict] @@ -45,7 +45,7 @@ repos: args: [--strict, --force-scope] - repo: https://github.com/antonbabenko/pre-commit-terraform - rev: v1.94.1 + rev: v1.95.0 hooks: - id: terraform_fmt - id: terraform_tflint From c73b2d936c9f17782f6fc477af93443ea7e056ee Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Mon, 16 Sep 2024 23:06:41 +0200 Subject: [PATCH 46/51] fix user mapping --- .pre-commit/docker_update_action_readmes.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.pre-commit/docker_update_action_readmes.sh b/.pre-commit/docker_update_action_readmes.sh index 5a793ee6..fac8f163 100755 --- a/.pre-commit/docker_update_action_readmes.sh +++ b/.pre-commit/docker_update_action_readmes.sh @@ -1,9 +1,13 @@ #!/bin/bash +USER_ID=$(id -u) +GROUP_ID=$(id -g) + # Run a single Docker container to handle the README.md updates docker run --rm \ -v "$PWD":/workspace \ -w /workspace \ + -u "$USER_ID:$GROUP_ID" \ node:22 \ bash -c ' npm install -g action-docs From 8415dae5dbbee9dcb5c33eba1ae3f25dd6f6dc4b Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:36:29 +0200 Subject: [PATCH 47/51] fix var file order and action generation --- .github/actions/aurora-manage-cluster/action.yml | 7 ++++--- .github/actions/eks-manage-cluster/action.yml | 7 ++++--- .pre-commit/docker_update_action_readmes.sh | 16 +++++++++++++++- 3 files changed, 23 insertions(+), 7 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 9df1cf03..5d10914c 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -141,14 +141,15 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | - terraform plan -no-color -out aurora.plan -var "cluster_name=${{ inputs.cluster-name }}" \ + terraform plan -no-color -out aurora.plan \ + -var-file=<(echo '${{ inputs.additional-terraform-vars }}') \ + -var "cluster_name=${{ inputs.cluster-name }}" \ -var "username=${{ inputs.username }}" \ -var "password=${{ inputs.password }}" \ -var 'availability_zones=${{ inputs.availability-zones }}' \ -var "vpc_id=${{ inputs.vpc-id }}" \ -var 'subnet_ids=${{ inputs.subnet-ids }}' \ - -var 'cidr_blocks=${{ inputs.cidr-blocks }}' \ - -var-file=<(echo '${{ inputs.additional-terraform-vars }}') + -var 'cidr_blocks=${{ inputs.cidr-blocks }}' - name: Terraform Apply shell: bash diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 559a1ba3..cd413835 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -129,10 +129,11 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | - terraform plan -no-color -out eks.plan -var "name=${{ inputs.cluster-name }}" \ - -var "region=${{ inputs.aws-region }}" \ + terraform plan -no-color -out eks.plan \ + -var-file=<(echo '${{ inputs.additional-terraform-vars }}') \ -var "name=${{ inputs.cluster-name }}" \ - -var-file=<(echo '${{ inputs.additional-terraform-vars }}') + -var "region=${{ inputs.aws-region }}" \ + -var "name=${{ inputs.cluster-name }}" - name: Terraform Apply shell: bash diff --git a/.pre-commit/docker_update_action_readmes.sh b/.pre-commit/docker_update_action_readmes.sh index fac8f163..ddf30ada 100755 --- a/.pre-commit/docker_update_action_readmes.sh +++ b/.pre-commit/docker_update_action_readmes.sh @@ -1,15 +1,21 @@ #!/bin/bash +set -o pipefail + +# due to an open bug in the node image https://github.com/nodejs/docker-node/issues/740 +# we can't map the user and group at the docker level, therefore we chown the files USER_ID=$(id -u) GROUP_ID=$(id -g) # Run a single Docker container to handle the README.md updates docker run --rm \ + -e USER_ID="$USER_ID" \ + -e GROUP_ID="$GROUP_ID" \ -v "$PWD":/workspace \ -w /workspace \ - -u "$USER_ID:$GROUP_ID" \ node:22 \ bash -c ' + set -euxo pipefail npm install -g action-docs find .github/actions -name "*.yml" -o -name "*.yaml" | while read -r action_file; do action_dir=$(dirname "$action_file") @@ -18,6 +24,14 @@ docker run --rm \ action-docs -t 1 --no-banner -n -s "$action_file" > "$action_dir/README.md.tmp" # Ensure that only a single empty line is left at the end of the file sed -e :a -e "/^\n*\$/{\$d;N;};/\n\$/ba" "$action_dir/README.md.tmp" > "$action_dir/README.md" + chown "$USER_ID:$GROUP_ID" "$action_dir/README.md" rm -f "$action_dir/README.md.tmp" done ' + +DOCKER_EXIT_CODE=$? + +if [ $DOCKER_EXIT_CODE -ne 0 ]; then + echo "Docker action readme generation command failed with exit code $DOCKER_EXIT_CODE, please use verbose mode" + exit 1 +fi From 5d961bc09be2d38c5043f0d0ac9334f2a55f4318 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:00:11 +0200 Subject: [PATCH 48/51] fix inputs tfvars --- .github/actions/aurora-manage-cluster/action.yml | 3 ++- .github/actions/eks-manage-cluster/action.yml | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/actions/aurora-manage-cluster/action.yml b/.github/actions/aurora-manage-cluster/action.yml index 5d10914c..1e79fd98 100644 --- a/.github/actions/aurora-manage-cluster/action.yml +++ b/.github/actions/aurora-manage-cluster/action.yml @@ -141,8 +141,9 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/aurora/ run: | + echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json terraform plan -no-color -out aurora.plan \ - -var-file=<(echo '${{ inputs.additional-terraform-vars }}') \ + -var-file=/tmp/var.tfvars.json \ -var "cluster_name=${{ inputs.cluster-name }}" \ -var "username=${{ inputs.username }}" \ -var "password=${{ inputs.password }}" \ diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index cd413835..0b64b614 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -129,8 +129,9 @@ runs: id: plan working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | - terraform plan -no-color -out eks.plan \ - -var-file=<(echo '${{ inputs.additional-terraform-vars }}') \ + echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json + terraform plan -no-color -out aurora.plan \ + -var-file=/tmp/var.tfvars.json \ -var "name=${{ inputs.cluster-name }}" \ -var "region=${{ inputs.aws-region }}" \ -var "name=${{ inputs.cluster-name }}" From 07478b3852651b6ab90ef361063094fd2da0345e Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:42:34 +0200 Subject: [PATCH 49/51] fix plan file --- .github/actions/eks-manage-cluster/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/eks-manage-cluster/action.yml b/.github/actions/eks-manage-cluster/action.yml index 0b64b614..15e212d1 100644 --- a/.github/actions/eks-manage-cluster/action.yml +++ b/.github/actions/eks-manage-cluster/action.yml @@ -130,7 +130,7 @@ runs: working-directory: ${{ inputs.tf-modules-path }}/modules/eks-cluster/ run: | echo '${{ inputs.additional-terraform-vars }}' > /tmp/var.tfvars.json - terraform plan -no-color -out aurora.plan \ + terraform plan -no-color -out eks.plan \ -var-file=/tmp/var.tfvars.json \ -var "name=${{ inputs.cluster-name }}" \ -var "region=${{ inputs.aws-region }}" \ From 737704d2851a1a9d4fe983bc40e8ae895d06a830 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:38:33 +0200 Subject: [PATCH 50/51] fix deployment --- .pre-commit-config.yaml | 4 ++++ modules/fixtures/whoami-deployment.yml | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b4b71206..367d6a87 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,6 +2,10 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: + # - repo: https://github.com/camunda/infraex-common-config + # rev: 0d3d66656b774d1cd906e379df10b601f25ba946 # TODO: replace with main onced it's merged + # hooks: + # - id: update-action-readmes-docker - repo: local hooks: # TODO: extract this pre-commit in common config diff --git a/modules/fixtures/whoami-deployment.yml b/modules/fixtures/whoami-deployment.yml index 89a065c8..4bb9e2eb 100644 --- a/modules/fixtures/whoami-deployment.yml +++ b/modules/fixtures/whoami-deployment.yml @@ -38,5 +38,3 @@ spec: - protocol: TCP port: 80 targetPort: 80 ---- -... From 102008e112f2567031477a6324a5edead9816d44 Mon Sep 17 00:00:00 2001 From: "Leo J." <153937047+leiicamundi@users.noreply.github.com> Date: Tue, 17 Sep 2024 19:29:55 +0200 Subject: [PATCH 51/51] use new generation of docs --- .../actions/aurora-manage-cluster/README.md | 2 +- .../actions/eks-cleanup-resources/README.md | 2 +- .github/actions/eks-manage-cluster/README.md | 2 +- .github/actions/utility-action/README.md | 2 +- .pre-commit-config.yaml | 14 +------ .pre-commit/docker_update_action_readmes.sh | 37 ------------------- 6 files changed, 6 insertions(+), 53 deletions(-) delete mode 100755 .pre-commit/docker_update_action_readmes.sh diff --git a/.github/actions/aurora-manage-cluster/README.md b/.github/actions/aurora-manage-cluster/README.md index bb94abd6..6257ea5f 100644 --- a/.github/actions/aurora-manage-cluster/README.md +++ b/.github/actions/aurora-manage-cluster/README.md @@ -45,7 +45,7 @@ This action is a `composite` action. ## Usage ```yaml -- uses: ***PROJECT***@***VERSION*** +- uses: camunda/camunda-tf-eks-module/aurora-manage-cluster@main with: cluster-name: # Name of the RDS Aurora cluster to deploy diff --git a/.github/actions/eks-cleanup-resources/README.md b/.github/actions/eks-cleanup-resources/README.md index ba061c5c..99581844 100644 --- a/.github/actions/eks-cleanup-resources/README.md +++ b/.github/actions/eks-cleanup-resources/README.md @@ -23,7 +23,7 @@ This action is a `composite` action. ## Usage ```yaml -- uses: ***PROJECT***@***VERSION*** +- uses: camunda/camunda-tf-eks-module/eks-cleanup-resources@main with: tf-bucket: # Bucket containing the resources states diff --git a/.github/actions/eks-manage-cluster/README.md b/.github/actions/eks-manage-cluster/README.md index 4871b2c0..bf3eaefe 100644 --- a/.github/actions/eks-manage-cluster/README.md +++ b/.github/actions/eks-manage-cluster/README.md @@ -41,7 +41,7 @@ This action is a `composite` action. ## Usage ```yaml -- uses: ***PROJECT***@***VERSION*** +- uses: camunda/camunda-tf-eks-module/eks-manage-cluster@main with: aws-region: # AWS region where the EKS cluster will be deployed diff --git a/.github/actions/utility-action/README.md b/.github/actions/utility-action/README.md index 66a24402..84efd8f8 100644 --- a/.github/actions/utility-action/README.md +++ b/.github/actions/utility-action/README.md @@ -41,7 +41,7 @@ This action is a `composite` action. ## Usage ```yaml -- uses: ***PROJECT***@***VERSION*** +- uses: camunda/camunda-tf-eks-module/utility-action@main with: awscli-version: # Version of the AWS CLI to install diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 367d6a87..f5b6eed5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,20 +2,10 @@ # See https://pre-commit.com for more information # See https://pre-commit.com/hooks.html for more hooks repos: - # - repo: https://github.com/camunda/infraex-common-config - # rev: 0d3d66656b774d1cd906e379df10b601f25ba946 # TODO: replace with main onced it's merged - # hooks: - # - id: update-action-readmes-docker - - repo: local + - repo: https://github.com/camunda/infraex-common-config + rev: 5a264b0 # TODO: replace with main onced it's merged hooks: - # TODO: extract this pre-commit in common config - id: update-action-readmes-docker - name: Update GitHub Action READMEs - entry: bash .pre-commit/docker_update_action_readmes.sh - language: system - # Only runs when action files are modified - files: ^\.github/actions/.+\.(yml|yaml)$ - pass_filenames: false # ensure single run - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 diff --git a/.pre-commit/docker_update_action_readmes.sh b/.pre-commit/docker_update_action_readmes.sh deleted file mode 100755 index ddf30ada..00000000 --- a/.pre-commit/docker_update_action_readmes.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash - -set -o pipefail - -# due to an open bug in the node image https://github.com/nodejs/docker-node/issues/740 -# we can't map the user and group at the docker level, therefore we chown the files -USER_ID=$(id -u) -GROUP_ID=$(id -g) - -# Run a single Docker container to handle the README.md updates -docker run --rm \ - -e USER_ID="$USER_ID" \ - -e GROUP_ID="$GROUP_ID" \ - -v "$PWD":/workspace \ - -w /workspace \ - node:22 \ - bash -c ' - set -euxo pipefail - npm install -g action-docs - find .github/actions -name "*.yml" -o -name "*.yaml" | while read -r action_file; do - action_dir=$(dirname "$action_file") - echo "Updating README.md in $action_dir" - rm -f "$action_dir/README.md" - action-docs -t 1 --no-banner -n -s "$action_file" > "$action_dir/README.md.tmp" - # Ensure that only a single empty line is left at the end of the file - sed -e :a -e "/^\n*\$/{\$d;N;};/\n\$/ba" "$action_dir/README.md.tmp" > "$action_dir/README.md" - chown "$USER_ID:$GROUP_ID" "$action_dir/README.md" - rm -f "$action_dir/README.md.tmp" - done - ' - -DOCKER_EXIT_CODE=$? - -if [ $DOCKER_EXIT_CODE -ne 0 ]; then - echo "Docker action readme generation command failed with exit code $DOCKER_EXIT_CODE, please use verbose mode" - exit 1 -fi