From 3b49ae986962328bded8908268d75844588abebc Mon Sep 17 00:00:00 2001 From: Loic Devulder Date: Wed, 31 Jan 2024 16:14:14 +0100 Subject: [PATCH] ci: use new master(s) workflows Signed-off-by: Loic Devulder --- .github/actions/logs-and-summary/action.yaml | 165 +++--- .../master_clean-and-delete-runner.yaml | 54 +- .github/workflows/master_cli.yaml | 534 +++--------------- .github/workflows/master_create-runner.yaml | 60 +- .github/workflows/master_e2e.yaml | 109 ++++ .github/workflows/master_main.yaml | 229 ++++++++ .github/workflows/master_poc.yaml | 79 --- .github/workflows/master_ui.yaml | 288 ++++++++++ .github/workflows/test-new-master.yaml | 14 +- 9 files changed, 832 insertions(+), 700 deletions(-) create mode 100644 .github/workflows/master_e2e.yaml create mode 100644 .github/workflows/master_main.yaml delete mode 100644 .github/workflows/master_poc.yaml create mode 100644 .github/workflows/master_ui.yaml diff --git a/.github/actions/logs-and-summary/action.yaml b/.github/actions/logs-and-summary/action.yaml index 4774e74b2..a3221eba3 100644 --- a/.github/actions/logs-and-summary/action.yaml +++ b/.github/actions/logs-and-summary/action.yaml @@ -2,87 +2,77 @@ name: logs-and-summary description: 'Add logs and summary for an Elemental E2E test' -# Action inputs +# Variables to set when calling this action inputs: - ca_type: - description: CA type to use (selfsigned or private) - default: selfsigned - type: string - elemental_support: - description: URL of the elemental support binary - default: https://github.com/rancher/elemental-operator/releases/download/v1.1.4/elemental-support_1.1.4_linux_amd64 - type: string - elemental_ui_version: - description: Version of the elemental ui which will be installed (dev/stable) - default: dev - type: string - k8s_version_to_provision: - description: Name and version of installed K8s distribution - required: true - type: string - node_number: - description: Number of nodes to deploy on the provisioned cluster - default: 5 - type: string - os_to_test: - description: OS repository to test (dev/staging/stable) - type: string - default: dev - proxy: - description: Deploy a proxy - type: string - rancher_log_collector: - description: URL of the Rancher log collector script - default: https://raw.githubusercontent.com/rancherlabs/support-tools/master/collection/rancher/v2.x/logs-collector/rancher2_logs_collector.sh - type: string - rancher_version: - description: Rancher Manager channel/version/head_version to use for installation - default: stable/latest/none - type: string - sequential: - description: Defines if bootstrapping is done sequentially (true) or in parallel (false) - default: false - type: boolean - test_description: - description: Short description of the test - type: string - test_type: - description: Type of test to run (cli or ui) - default: single_cli - type: string - ui_account: - description: Account used to test RBAC role in UI - type: string - upgrade_image: - description: Image to use for the Elemental OS upgrade - type: string - upgrade_os_channel: - description: Channel to use for the Elemental OS upgrade - type: string - # Secrets inputs! - credentials: - description: Credentials to use to connect - required: true - pat_token: - # A token is needed to be able to add runner on the repo, maybe this can be changed later - # This token is linked to a personal account - # So in case of token issue you have to check (no specific order and for example): - # - the expiration date - # - if the account associated still exists - # - if the person still has access to the repo - description: PAT token used to add runner - required: true - qase_api_token: - description: Qase API token to use for Cypress tests + backup_restore_version: + default: "Unknown" + type: string + ca_type: + default: "Unknown" + type: string + cert_manager_version: + default: "Unknown" + type: string + elemental_ui_version: + default: "Unknown" + type: string + image_tag: + default: "Unknown" + type: string + k8s_version_to_provision: + default: "Unknown" + type: string + node_number: + default: "Unknown" + type: string + operator_upgrade: + default: "Unknown" + type: string + operator_version: + default: "Unknown" + type: string + os_to_test: + default: "Unknown" + type: string + os_version: + default: "Unknown" + type: string + proxy: + default: "Unknown" + type: string + rancher_image_version: + default: "Unknown" + type: string + rancher_version: + default: "Unknown" + type: string + sequential: + default: "Unknown" + type: boolean + test_description: + default: "Unknown" + type: string + test_type: + default: "Unknown" + type: string + ui_account: + default: "Unknown" + type: string + upgrade_image: + default: "Unknown" + type: string + upgrade_os_channel: + default: "Unknown" + type: string runs: using: "composite" steps: - name: Get logs env: - ELEMENTAL_SUPPORT: ${{ inputs.elemental_support }} + ELEMENTAL_SUPPORT: https://github.com/rancher/elemental-operator/releases/download/v1.3.4/elemental-support_1.3.4_linux_amd64 PROXY: ${{ inputs.proxy }} - RANCHER_LOG_COLLECTOR: ${{ inputs.rancher_log_collector }} + RANCHER_LOG_COLLECTOR: https://raw.githubusercontent.com/rancherlabs/support-tools/master/collection/rancher/v2.x/logs-collector/rancher2_logs_collector.sh shell: bash run: | cd tests && ( @@ -100,6 +90,7 @@ runs: if-no-files-found: ignore - name: Add summary + if: ${{ always() }} shell: bash run: | # Define some variable(s) @@ -123,32 +114,28 @@ runs: echo "Type of cluster deployed: ${CLUSTER_TYPE:-normal}" >> ${GITHUB_STEP_SUMMARY} echo "Bootstrap method: ${BOOTSTRAP_METHOD}" >> ${GITHUB_STEP_SUMMARY} echo "### Rancher Manager" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Image: ${{ steps.component.outputs.rm_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Rancher Manager Image: ${{ inputs.rancher_image_version }}" >> ${GITHUB_STEP_SUMMARY} echo "Rancher Manager Version: ${{ inputs.rancher_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "CertManager Image: ${{ steps.component.outputs.cert_manager_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "CertManager Image: ${{ inputs.cert_manager_version }}" >> ${GITHUB_STEP_SUMMARY} echo "### Elemental" >> ${GITHUB_STEP_SUMMARY} echo "Elemental ISO image: ${{ inputs.os_to_test }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental OS version: ${{ steps.iso_version.outputs.image_tag }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Image: ${{ steps.component.outputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Backup/Restore Operator Image: ${{ steps.component.outputs.backup_restore_version }}" >> ${GITHUB_STEP_SUMMARY} - if ${{ inputs.elemental_ui_version != '' }}; then - echo "Elemental UI Extension Version: ${{ inputs.elemental_ui_version }}" >> ${GITHUB_STEP_SUMMARY} - fi - if ${{ inputs.ui_account != '' }}; then - echo "Elemental UI User: ${{ inputs.ui_account }}" >> ${GITHUB_STEP_SUMMARY} - fi + echo "Elemental OS version: ${{ inputs.os_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental Operator Image: ${{ inputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental Backup/Restore Operator Image: ${{ inputs.backup_restore_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental UI Extension Version: ${{ inputs.elemental_ui_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental UI User: ${{ inputs.ui_account }}" >> ${GITHUB_STEP_SUMMARY} echo "### Kubernetes" >> ${GITHUB_STEP_SUMMARY} echo "K3s on Rancher Manager: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY} echo "K8s version deployed on the cluster(s): ${{ inputs.k8s_version_to_provision }}" >> ${GITHUB_STEP_SUMMARY} echo "### Cluster nodes" >> ${GITHUB_STEP_SUMMARY} - echo "Number of CPU: ${VCPU:-unknown}" >> ${GITHUB_STEP_SUMMARY} - echo "Memory size: ${VMEM:-unknown}GB" >> ${GITHUB_STEP_SUMMARY} + echo "Number of CPU: ${VCPU:-Unknown}" >> ${GITHUB_STEP_SUMMARY} + echo "Memory size: ${VMEM:-Unknown}GB" >> ${GITHUB_STEP_SUMMARY} # Upgrade details if ${{ inputs.upgrade_image != '' || inputs.upgrade_os_channel != '' }}; then echo "## Upgrade details" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Upgrade: ${{ steps.operator_upgrade.outputs.operator_upgrade }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Image: ${{ steps.operator_upgrade.outputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Image: ${{ steps.rancher_upgrade.outputs.rm_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental Operator Upgrade: ${{ inputs.operator_upgrade }}" >> ${GITHUB_STEP_SUMMARY} + echo "Elemental Operator Image: ${{ inputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} + echo "Rancher Manager Image: ${{ inputs.rancher_image_version }}" >> ${GITHUB_STEP_SUMMARY} echo "Rancher Manager Version: ${{ inputs.rancher_upgrade }}" >> ${GITHUB_STEP_SUMMARY} echo "Channel: ${{ inputs.upgrade_os_channel }}" >> ${GITHUB_STEP_SUMMARY} echo "Upgrade image: ${{ inputs.upgrade_image }}" >> ${GITHUB_STEP_SUMMARY} diff --git a/.github/workflows/master_clean-and-delete-runner.yaml b/.github/workflows/master_clean-and-delete-runner.yaml index 86fdf5ad8..f2d63aea5 100644 --- a/.github/workflows/master_clean-and-delete-runner.yaml +++ b/.github/workflows/master_clean-and-delete-runner.yaml @@ -3,94 +3,64 @@ name: (template) Clean and delete GitHub Actions runner on GCP on: workflow_call: - # Jobs inputs + # Variables to set when calling this reusable workflow inputs: create_runner_result: description: Status of the create-runner job required: true type: string destroy_runner: - description: Destroy the auto-generated self-hosted runner - default: true + required: true type: boolean - qase_run_id: - description: Case run ID where the results will be reported - type: string - runner: - description: Name of the GCP runner + runner_hostname: required: true type: string steps_status: description: Status of all the steps from the previous job required: true type: string - uuid: - description: UUID of the GitHub runner + runner_label: required: true type: string zone: - description: GCP zone to host the runner - default: us-central1-a + required: true type: string + # Secrets to set when calling this reusable workflow secrets: credentials: - description: Credentials to use to connect required: true pat_token: - # A token is needed to be able to add runner on the repo, maybe this can be changed later - # This token is linked to a personal account - # So in case of token issue you have to check (no specific order and for example): - # - the expiration date - # - if the account associated still exists - # - if the person still has access to the repo - description: PAT token used to add runner required: true - qase_api_token: - description: Qase API token to use for Cypress tests jobs: clean-delete: runs-on: ubuntu-latest - # Don't block if a step fails - continue-on-error: true - env: - # QASE variables - QASE_API_TOKEN: ${{ secrets.qase_api_token }} - QASE_PROJECT_CODE: ELEMENTAL - QASE_RUN_ID: ${{ inputs.qase_run_id }} steps: - name: Checkout uses: actions/checkout@v4 - - name: Setup Go - uses: actions/setup-go@v5 - with: - cache-dependency-path: tests/go.sum - go-version-file: tests/go.mod - - name: Finalize Qase Run and publish Results - if: ${{ env.QASE_RUN_ID != '' && !contains(inputs.steps_status, 'cancelled') }} - run: cd tests && make publish-qase-run - - name: Delete Qase Run if job has been cancelled - if: ${{ env.QASE_RUN_ID != '' && contains(inputs.steps_status, 'cancelled') }} - run: cd tests && make delete-qase-run + - name: Authenticate to GCP uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.credentials }} + - name: Setup gcloud uses: google-github-actions/setup-gcloud@v2 + - name: Delete GCP secrets run: | for SECRET in PAT_TOKEN GH_REPO; do - gcloud --quiet secrets delete ${SECRET}_${{ inputs.uuid }} || true + gcloud --quiet secrets delete ${SECRET}_${{ inputs.runner_label }} || true done + - name: Delete runner if: ${{ inputs.create_runner_result == 'success' && inputs.destroy_runner == true }} run: | # Disable failure on first error, needed for the "delete" check set +e # If runner is already deleted we can bypass the error - if ! LOGS=$(gcloud --quiet compute instances delete ${{ inputs.runner }} \ + if ! LOGS=$(gcloud --quiet compute instances delete ${{ inputs.runner_hostname }} \ --delete-disks all \ --zone ${{ inputs.zone }} 2>&1); then echo "${LOGS}" | grep -q "resource .* was not found" || RC=1 diff --git a/.github/workflows/master_cli.yaml b/.github/workflows/master_cli.yaml index 67262aac9..44abfe75c 100644 --- a/.github/workflows/master_cli.yaml +++ b/.github/workflows/master_cli.yaml @@ -1,218 +1,103 @@ # This workflow is a reusable one called by other workflows -name: (template) Elemental E2E tests with Rancher Manager +name: (template) Elemental E2E CLI tests on: workflow_call: # Variables to set when calling this reusable workflow - secrets: - credentials: - description: Credentials to use to connect - required: true - pat_token: - # A token is needed to be able to add runner on the repo, maybe this can be changed later - # This token is linked to a personal account - # So in case of token issue you have to check (no specific order and for example): - # - the expiration date - # - if the account associated still exists - # - if the person still has access to the repo - description: PAT token used to add runner - required: true - qase_api_token: - description: Qase API token to use for Cypress tests - slack_webhook_url: - description: WebHook URL to use for Slack - required: true inputs: backup_restore_version: - description: Version of backup-restore-operator to use - type: string - qase_run_id: - description: Case run ID where the results will be reported +# required: true type: string ca_type: - description: CA type to use (selfsigned or private) - default: selfsigned + required: true type: string cert-manager_version: - description: Version of cert-manager to use + required: true type: string cluster_name: - description: Name of the provisioned cluster required: true type: string cluster_number: - description: Number of clusters to deploy in multi-cluster test +# required: true type: string cluster_type: - description: Cluster type (empty if normal or hardened) - type: string - cypress_tags: - description: Tags to filter tests we want to run - default: main - type: string - destroy_runner: - description: Destroy the auto-generated self-hosted runner - default: true - type: boolean - elemental_support: - description: URL of the elemental support binary - default: https://github.com/rancher/elemental-operator/releases/download/v1.1.4/elemental-support_1.1.4_linux_amd64 - type: string - elemental_ui_version: - description: Version of the elemental ui which will be installed (dev/stable) - default: dev +# required: true type: string iso_boot: - description: Choose booting from ISO - default: false + required: true type: boolean k8s_version_to_provision: - description: Name and version of installed K8s distribution required: true type: string node_number: - description: Number of nodes to deploy on the provisioned cluster - default: 5 +# required: true type: string operator_repo: - description: Elemental operator repository to use + required: true type: string - default: oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher operator_upgrade: - description: Elemental operator version to upgrade to +# required: true type: string os_to_test: - description: OS repository to test (dev/staging/stable) +# required: true type: string - default: dev - proxy: - description: Deploy a proxy + public_dns: + required: true type: string - rancher_log_collector: - description: URL of the Rancher log collector script - default: https://raw.githubusercontent.com/rancherlabs/support-tools/master/collection/rancher/v2.x/logs-collector/rancher2_logs_collector.sh + qase_run_id: + required: true type: string rancher_version: - description: Rancher Manager channel/version/head_version to use for installation - default: stable/latest/none + required: true type: string rancher_upgrade: - description: Rancher Manager channel/version to upgrade to +# required: true type: string reset: - description: Allow reset test (mainly used on CLI tests) - default: false +# required: true type: boolean - runner_template: - description: Runner template to use - default: elemental-e2e-ci-runner-spot-x86-64-template-n2-standard-16-v5 + runner_label: + required: true type: string sequential: - description: Defines if bootstrapping is done sequentially (true) or in parallel (false) - default: false +# required: true type: boolean test_description: - description: Short description of the test + required: true type: string test_type: - description: Type of test to run (cli or ui) - default: single_cli - type: string - ui_account: - description: Account used to test RBAC role in UI +# required: true type: string upgrade_image: - description: Image to use for the Elemental OS upgrade + required: true type: string upgrade_os_channel: - description: Channel to use for the Elemental OS upgrade + required: true type: string upgrade_type: - description: Type of upgrade to use for the Elemental OS upgrade +# required: true type: string upstream_cluster_version: - description: Cluster upstream version where to install Rancher (K3s or RKE2) - default: v1.26.10+k3s2 - type: string - zone: - description: GCP zone to host the runner - default: us-central1-a + required: true type: string + # Variables to set when calling this reusable workflow + secrets: + qase_api_token: + jobs: - create-runner: - runs-on: ubuntu-latest + cli: + runs-on: ${{ inputs.runner_label }} outputs: - uuid: ${{ steps.generator.outputs.uuid }} - runner: ${{ steps.generator.outputs.runner }} - public_dns: ${{ steps.dns.outputs.public_dns }} - steps: - # actions/checkout MUST come before auth - - name: Checkout - uses: actions/checkout@v3 - - name: Generate UUID and Runner hostname - id: generator - run: | - # NOTE: keep the runner name to less than 63 characters! - UUID=$(uuidgen) - GH_REPO_FULL=${{ github.repository }} - GH_REPO=${GH_REPO_FULL#*/} - echo "uuid=${UUID//-}" >> ${GITHUB_OUTPUT} - echo "runner=${GH_REPO//\//-}-ci-${UUID//-}" >> ${GITHUB_OUTPUT} - - name: Authenticate to GCP - uses: google-github-actions/auth@v1 - with: - credentials_json: ${{ secrets.credentials }} - - name: Setup gcloud - uses: google-github-actions/setup-gcloud@v1 - - name: Create runner - run: | - gcloud compute instances create ${{ steps.generator.outputs.runner }} \ - --source-instance-template ${{ inputs.runner_template }} \ - --zone ${{ inputs.zone }} - - name: Create GCP secrets - run: | - echo -n ${{ secrets.pat_token }} \ - | gcloud secrets create PAT_TOKEN_${{ steps.generator.outputs.uuid }} --data-file=- - echo -n ${{ github.repository }} \ - | gcloud secrets create GH_REPO_${{ steps.generator.outputs.uuid }} --data-file=- - - name: Get public dns name in GCP - id: dns - run: | - # Do a timed out loop here, as gcloud can sometimes fail - typeset -i i=0 - while true; do - # Get public IP - PUBLIC_IP=$(gcloud compute instances list 2> /dev/null \ - | awk '/${{ steps.generator.outputs.runner }}/ {print $6}') - # Exit if we reach the timeout or if IP is set - if (( ++i > 10 )) || [[ -n "${PUBLIC_IP}" ]]; then - break - fi - # Wait a little before retrying - sleep 2 - done - # Get the public DNS - PUBLIC_DNS=$(host -l ${PUBLIC_IP} 2> /dev/null \ - | awk '{sub(/\.$/, ""); print $5}') - echo "public_dns=${PUBLIC_DNS}" >> ${GITHUB_OUTPUT} - # Raise an error if either IP and/or DNS are empty - if [[ -z "${PUBLIC_IP}" || -z "${PUBLIC_DNS}" ]]; then - echo "PUBLIC_IP and/or PUBLIC_DNS are empty!" >&2 - false - fi - e2e: - needs: create-runner - runs-on: ${{ needs.create-runner.outputs.uuid }} - outputs: - qase_run_id: ${{ steps.qase.outputs.qase_run_id }} + # For this to work 'id:' in steps are mandatory! steps_status: ${{ join(steps.*.conclusion, ' ') }} env: - ARCH: amd64 CERT_MANAGER_VERSION: ${{ inputs.cert-manager_version }} CLUSTER_NAME: ${{ inputs.cluster_name }} CLUSTER_NS: fleet-default CLUSTER_TYPE: ${{ inputs.cluster_type }} + # For Qase reporting + QASE_RUN_ID: ${{ inputs.qase_run_id }} # K3S / RKE2 flags to use for installation INSTALL_K3S_SKIP_ENABLE: true INSTALL_K3S_VERSION: ${{ inputs.upstream_cluster_version }} @@ -222,14 +107,6 @@ jobs: K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }} # For K8s cluster to provision with Rancher Manager K8S_VERSION_TO_PROVISION: ${{ inputs.k8s_version_to_provision }} - # QASE variables - QASE_API_TOKEN: ${{ secrets.qase_api_token }} - QASE_PROJECT_CODE: ELEMENTAL - QASE_REPORT: 1 - QASE_RUN_COMPLETE: 1 - QASE_RUN_DESCRIPTION: TO_BE_CHANGED - QASE_RUN_ID: ${{ inputs.qase_run_id }} - QASE_RUN_NAME: TO_BE_CHANGED # For Rancher Manager RANCHER_VERSION: ${{ inputs.rancher_version }} TEST_TYPE: ${{ inputs.test_type }} @@ -237,53 +114,21 @@ jobs: steps: - name: Checkout id: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 + - name: Install Go id: install_go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: + cache-dependency-path: tests/go.sum go-version-file: tests/go.mod + - name: Define needed system variables id: define_sys_vars run: | # Add missing PATH, removed in recent distributions for security reasons... echo "/usr/local/bin" >> ${GITHUB_PATH} - - name: Deploy Proxy - id: proxy - if: ${{ inputs.proxy == 'elemental' || inputs.proxy == 'rancher' }} - run: docker run -d --name squid_proxy -v $(pwd)/tests/assets/squid.conf:/etc/squid/squid.conf -p 3128:3128 wernight/squid - - name: Create Qase Run (if needed) - id: qase - if: ${{ inputs.qase_run_id == 'auto' }} - run: | - # Export the Qase run name, as it cannot be done in - # 'env:' because GITHUB_WORKFLOW is a shell variable - echo "QASE_RUN_NAME=${GITHUB_WORKFLOW}" >> ${GITHUB_ENV} - # Also export URL of GH test run in Qase run description - GH_RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - QASE_DESC="${{ inputs.test_description }} (${GH_RUN_URL})" - echo "QASE_RUN_DESCRIPTION=${QASE_DESC}" >> ${GITHUB_ENV} - - # Export them also to be used locally - export QASE_RUN_NAME="${GITHUB_WORKFLOW}" - export QASE_RUN_DESCRIPTION="${QASE_DESC}" - - # We need to always unset the faking ID variable - unset QASE_RUN_ID - - ID=$(cd tests && make create-qase-run) - if [[ -n "${ID}" ]]; then - # Export QASE_RUN_ID to be sure that we always have the same value - echo "QASE_RUN_ID=${ID}" >> ${GITHUB_ENV} - export QASE_RUN_ID=${ID} - fi - - # Output for the clean step - echo "qase_run_id=${QASE_RUN_ID}" >> ${GITHUB_OUTPUT} - - # Just an info for debugging purposes - echo "Export QASE_RUN_ID=${QASE_RUN_ID}, QASE_RUN_DESCRIPTION=${QASE_RUN_DESCRIPTION} and QASE_RUN_NAME=${QASE_RUN_NAME}" - name: Install Rancher+Elemental components id: install_rancher_elemental env: @@ -293,6 +138,7 @@ jobs: PUBLIC_DNS: ${{ needs.create-runner.outputs.public_dns }} PUBLIC_DOMAIN: bc.googleusercontent.com run: cd tests && make e2e-install-rancher + - name: Workaround for DynamicSchemas (if needed) run: | # Check if DynamicSchemas for MachineInventorySelectorTemplate exists @@ -301,10 +147,12 @@ jobs: echo "WORKAROUND: DynamicSchemas for MachineInventorySelectorTemplate is missing!" kubectl apply -f tests/assets/add_missing_dynamicschemas.yaml fi + - name: Install backup-restore components (K3s only for now) id: install_backup_restore if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }} run: cd tests && make e2e-install-backup-restore + - name: Extract component versions/informations id: component run: | @@ -333,121 +181,12 @@ jobs: echo "cert_manager_version=${CERT_MANAGER_VERSION}" >> ${GITHUB_OUTPUT} echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT} echo "rm_version=${RM_VERSION}" >> ${GITHUB_OUTPUT} - - name: Install Chartmuseum - id: install_chartmuseum - if: ${{ inputs.test_type == 'ui' }} - run: cd tests && make e2e-install-chartmuseum - - name: Cypress tests - Basics - id: cypress_basics - # Basics means tests without an extra elemental node needed - if: ${{ inputs.test_type == 'ui' }} - env: - BROWSER: chrome - CHARTMUSEUM_REPO: http://${{ needs.create-runner.outputs.public_dns }} - CYPRESS_DOCKER: 'cypress/included:10.9.0' - CYPRESS_TAGS: ${{ inputs.cypress_tags }} - ELEMENTAL_UI_VERSION: ${{ inputs.elemental_ui_version }} - ISO_BOOT: ${{ inputs.iso_boot }} - K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }} - OPERATOR_REPO: ${{ inputs.operator_repo }} - RANCHER_VERSION: ${{ steps.component.outputs.rm_version }} - RANCHER_PASSWORD: rancherpassword - RANCHER_URL: https://${{ needs.create-runner.outputs.public_dns }}/dashboard - RANCHER_USER: admin - SPEC: | - /workdir/e2e/unit_tests/first_connection.spec.ts - /workdir/e2e/unit_tests/elemental_operator.spec.ts - /workdir/e2e/unit_tests/elemental_plugin.spec.ts - /workdir/e2e/unit_tests/user.spec.ts - /workdir/e2e/unit_tests/menu.spec.ts - /workdir/e2e/unit_tests/machine_registration.spec.ts - /workdir/e2e/unit_tests/advanced_filtering.spec.ts - UI_ACCOUNT: ${{ inputs.ui_account }} - UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }} - run: cd tests && make start-cypress-tests - - name: Upload Cypress screenshots (Basics) - id: upload_screenshots_cypress_basics - if: ${{ failure() && inputs.test_type == 'ui' }} - uses: actions/upload-artifact@v3 - with: - name: cypress-screenshots-basics-${{ inputs.cluster_name }} - path: tests/cypress/latest/screenshots - retention-days: 7 - if-no-files-found: ignore - - name: Upload Cypress videos (Basics) - id: upload_videos_cypress_basics - # Test run video is always captured, so this action uses "always()" condition - if: ${{ always() && inputs.test_type == 'ui' }} - uses: actions/upload-artifact@v3 - with: - name: cypress-videos-basics-${{ inputs.cluster_name }} - path: tests/cypress/latest/videos - retention-days: 7 - - name: Deploy a node to join Rancher manager - id: deploy_node_ui - if: ${{ inputs.test_type == 'ui' }} - env: - ISO_BOOT: ${{ inputs.iso_boot }} - VM_INDEX: 1 - VM_MEM: 8192 - HOST_MEMORY_RESERVED: 49152 - run: | - cd tests && ( - # Removing 'downloads' is needed to avoid this error during 'make': - # 'pattern all: open .../elemental/tests/cypress/downloads: permission denied' - sudo rm -rf cypress/latest/downloads - make e2e-ui-rancher - ) - - name: Cypress tests - Advanced - id: cypress_advanced - # Advanced means tests which needs an extra elemental node (provisioned with libvirt) - if: ${{ inputs.test_type == 'ui' }} - env: - BROWSER: firefox - CHARTMUSEUM_REPO: http://${{ needs.create-runner.outputs.public_dns }} - CYPRESS_DOCKER: 'cypress/included:10.9.0' - CYPRESS_TAGS: ${{ inputs.cypress_tags }} - ELEMENTAL_UI_VERSION: ${{ inputs.elemental_ui_version }} - OPERATOR_REPO: ${{ inputs.operator_repo }} - PROXY: ${{ inputs.proxy }} - RANCHER_VERSION: ${{ steps.component.outputs.rm_version }} - RANCHER_PASSWORD: rancherpassword - RANCHER_URL: https://${{ needs.create-runner.outputs.public_dns }}/dashboard - RANCHER_USER: admin - SPEC: | - /workdir/e2e/unit_tests/machine_selector.spec.ts - /workdir/e2e/unit_tests/machine_inventory.spec.ts - /workdir/e2e/unit_tests/reset.spec.ts - /workdir/e2e/unit_tests/deploy_app.spec.ts - /workdir/e2e/unit_tests/upgrade-operator.spec.ts - /workdir/e2e/unit_tests/upgrade-ui-extension.spec.ts - /workdir/e2e/unit_tests/upgrade.spec.ts - UI_ACCOUNT: ${{ inputs.ui_account }} - UPGRADE_IMAGE: ${{ inputs.upgrade_image }} - UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }} - run: cd tests && make start-cypress-tests - - name: Upload Cypress screenshots (Advanced) - id: upload_screenshots_cypress_advanced - if: ${{ failure() && inputs.test_type == 'ui' }} - uses: actions/upload-artifact@v3 - with: - name: cypress-screenshots-advanced-${{ inputs.cluster_name }} - path: tests/cypress/latest/screenshots - retention-days: 7 - if-no-files-found: ignore - - name: Upload Cypress videos (Advanced) - id: upload_videos_cypress_advanced - # Test run video is always captured, so this action uses "always()" condition - if: ${{ always() && inputs.test_type == 'ui' }} - uses: actions/upload-artifact@v3 - with: - name: cypress-videos-advanced-${{ inputs.cluster_name }} - path: tests/cypress/latest/videos - retention-days: 7 + - name: Configure Rancher & Libvirt id: configure_rancher if: ${{ inputs.test_type == 'single_cli' }} run: cd tests && make e2e-configure-rancher + - name: Create ISO image for master pool id: create_iso_master if: ${{ inputs.test_type == 'single_cli' }} @@ -462,10 +201,12 @@ jobs: export ISO_BOOT=true fi cd tests && make e2e-iso-image + - name: Extract iPXE artifacts from ISO id: extract_ipxe_artifacts if: ${{ inputs.test_type == 'single_cli' && inputs.iso_boot == false }} run: cd tests && make extract_kernel_init_squash && make ipxe + - name: Bootstrap node 1, 2 and 3 in pool "master" (use Emulated TPM if possible) id: bootstrap_master_nodes if: ${{ inputs.test_type == 'single_cli' }} @@ -493,6 +234,7 @@ jobs: else cd tests && VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} make e2e-bootstrap-node fi + - name: Deploy multiple clusters (with 3 nodes by cluster) id: deploy_multi_clusters if: inputs.test_type == 'multi_cli' @@ -506,18 +248,22 @@ jobs: export VM_CPU=6 fi cd tests && make e2e-multi-cluster + - name: Install a simple application id: install_simple_app if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }} run: cd tests && make e2e-install-app && make e2e-check-app + - name: Reset a node in the cluster id: reset_node if: ${{ inputs.test_type == 'single_cli' && inputs.reset == true }} run: cd tests && make e2e-reset + - name: Check app after reset id: check_app if: ${{ inputs.test_type == 'single_cli' && inputs.reset == true && contains(inputs.upstream_cluster_version, 'k3s') }} run: cd tests && make e2e-check-app + - name: Upgrade Elemental Operator id: operator_upgrade if: ${{ inputs.test_type == 'single_cli' && inputs.operator_upgrade != '' }} @@ -536,6 +282,7 @@ jobs: # Export values echo "operator_upgrade=${OPERATOR_UPGRADE}" >> ${GITHUB_OUTPUT} echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT} + - name: Upgrade Rancher Manager id: rancher_upgrade if: ${{ inputs.test_type == 'single_cli' && inputs.rancher_upgrade != '' }} @@ -557,6 +304,7 @@ jobs: -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true) # Export values echo "rm_version=${RM_VERSION}" >> ${GITHUB_OUTPUT} + - name: Upgrade node 1 to specified OS version with osImage id: upgrade_node_1 if: ${{ inputs.test_type == 'single_cli' && inputs.upgrade_image != '' }} @@ -569,6 +317,7 @@ jobs: if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then make e2e-check-app fi + - name: Upgrade other nodes to specified OS version with managedOSVersionName id: upgrade_other_nodes if: ${{ inputs.test_type == 'single_cli' && inputs.upgrade_os_channel != '' }} @@ -582,6 +331,7 @@ jobs: if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then make e2e-check-app fi + - name: Test Backup/Restore Elemental resources with Rancher Manager id: test_backup_restore if: ${{ inputs.test_type == 'single_cli' && contains(inputs.upstream_cluster_version, 'k3s') }} @@ -592,6 +342,7 @@ jobs: if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then make e2e-check-app fi + - name: Extract ISO version id: iso_version if: ${{ always() }} @@ -607,11 +358,13 @@ jobs: fi # Export value (even if empty!) echo "image_tag=${IMAGE_TAG}" >> ${GITHUB_OUTPUT} + - name: Remove old built ISO image id: clean_master_iso # Only one at a time is allowed, the new one will be created after if needed if: ${{ inputs.test_type == 'single_cli' }} run: rm -f *.iso + - name: Create ISO image for worker pool if: ${{ inputs.test_type == 'single_cli' }} env: @@ -619,6 +372,7 @@ jobs: OS_TO_TEST: ${{ inputs.os_to_test }} POOL: worker run: cd tests && make e2e-iso-image + - name: Bootstrap additional nodes in pool "worker" (total of ${{ inputs.node_number }}) id: bootstrap_worker_nodes if: ${{ inputs.test_type == 'single_cli' && inputs.node_number > 3 }} @@ -646,6 +400,7 @@ jobs: if ${{ contains(inputs.upstream_cluster_version, 'k3s') }}; then make e2e-check-app fi + - name: Uninstall Elemental Operator id: uninstall_elemental_operator env: @@ -653,158 +408,17 @@ jobs: # Don't test Operator uninstall if we want to keep the runner for debugging purposes if: ${{ inputs.destroy_runner == true && inputs.test_type == 'single_cli' }} run: cd tests && make e2e-uninstall-operator - - name: Get logs - id: get_logs - if: ${{ always() }} - env: - ELEMENTAL_SUPPORT: ${{ inputs.elemental_support }} - PROXY: ${{ inputs.proxy }} - RANCHER_LOG_COLLECTOR: ${{ inputs.rancher_log_collector }} - run: | - cd tests && ( - # Removing 'downloads' is needed to avoid this error during 'make': - # 'pattern all: open .../elemental/tests/cypress/downloads: permission denied' - sudo rm -rf cypress/latest/downloads - make e2e-get-logs - ) - - name: Upload logs - id: uploade_logs - if: ${{ always() }} - uses: actions/upload-artifact@v3 - with: - name: support-logs - path: tests/**/logs/* - if-no-files-found: ignore - - name: Add summary - id: add_summary + + # This step must be called in each worklow that wants a summary! + - name: Get logs and add summary + id: logs_summary if: ${{ always() }} - run: | - # Define some variable(s) - BOOTSTRAP_METHOD="Parallel" - if ${{ inputs.sequential == true }}; then - BOOTSTRAP_METHOD="Sequential" - fi - # Get nodes configuration (use the first one, they are all identical) - NODE=$(sudo virsh list --name | head -1) - if [[ -n "${NODE}" ]]; then - VCPU=$(sudo virsh vcpucount --live ${NODE}) - VMEM=$(sudo virsh dommemstat --live ${NODE} | awk '/^actual/ { print $2 }') - (( VMEM /= 1048576 )) - fi - # Add summary - echo "## General informations" >> ${GITHUB_STEP_SUMMARY} - echo -e "***${{ inputs.test_description }}***\n" >> ${GITHUB_STEP_SUMMARY} - if ${{ inputs.test_type == 'single_cli' }}; then - echo "Number of nodes in the cluster: ${{ inputs.node_number }}" >> ${GITHUB_STEP_SUMMARY} - fi - echo "Type of certificate for Rancher Manager: ${{ inputs.ca_type }}" >> ${GITHUB_STEP_SUMMARY} - echo "Type of cluster deployed: ${CLUSTER_TYPE:-normal}" >> ${GITHUB_STEP_SUMMARY} - echo "Bootstrap method: ${BOOTSTRAP_METHOD}" >> ${GITHUB_STEP_SUMMARY} - echo "### Rancher Manager" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Image: ${{ steps.component.outputs.rm_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Version: ${{ inputs.rancher_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "CertManager Image: ${{ steps.component.outputs.cert_manager_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "### Elemental" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental ISO image: ${{ inputs.os_to_test }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental OS version: ${{ steps.iso_version.outputs.image_tag }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Image: ${{ steps.component.outputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Backup/Restore Operator Image: ${{ steps.component.outputs.backup_restore_version }}" >> ${GITHUB_STEP_SUMMARY} - if ${{ inputs.elemental_ui_version != '' }}; then - echo "Elemental UI Extension Version: ${{ inputs.elemental_ui_version }}" >> ${GITHUB_STEP_SUMMARY} - fi - if ${{ inputs.ui_account != '' }}; then - echo "Elemental UI User: ${{ inputs.ui_account }}" >> ${GITHUB_STEP_SUMMARY} - fi - echo "### Kubernetes" >> ${GITHUB_STEP_SUMMARY} - echo "K3s on Rancher Manager: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY} - echo "K8s version deployed on the cluster(s): ${{ inputs.k8s_version_to_provision }}" >> ${GITHUB_STEP_SUMMARY} - echo "### Cluster nodes" >> ${GITHUB_STEP_SUMMARY} - echo "Number of CPU: ${VCPU:-unknown}" >> ${GITHUB_STEP_SUMMARY} - echo "Memory size: ${VMEM:-unknown}GB" >> ${GITHUB_STEP_SUMMARY} - # Upgrade details - if ${{ inputs.upgrade_image != '' }} || ${{ inputs.upgrade_os_channel != '' }}; then - echo "## Upgrade details" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Upgrade: ${{ steps.operator_upgrade.outputs.operator_upgrade }}" >> ${GITHUB_STEP_SUMMARY} - echo "Elemental Operator Image: ${{ steps.operator_upgrade.outputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Image: ${{ steps.rancher_upgrade.outputs.rm_version }}" >> ${GITHUB_STEP_SUMMARY} - echo "Rancher Manager Version: ${{ inputs.rancher_upgrade }}" >> ${GITHUB_STEP_SUMMARY} - echo "Channel: ${{ inputs.upgrade_os_channel }}" >> ${GITHUB_STEP_SUMMARY} - echo "Upgrade image: ${{ inputs.upgrade_image }}" >> ${GITHUB_STEP_SUMMARY} - fi - - name: Finalize Qase Run and publish Results - id: finalize_qase - if: ${{ always() && job.status != 'cancelled' }} - run: | - if [[ -n "${QASE_RUN_ID}" ]]; then - cd tests && make publish-qase-run - fi - - name: Send failed status to slack - id: send_status - if: ${{ failure() && github.event_name == 'schedule' }} - uses: slackapi/slack-github-action@v1.23.0 - with: - payload: | - { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "Workflow E2E ${{ github.job }}" - }, - "accessory": { - "type": "button", - "text": { - "type": "plain_text", - "text": ":github:", - "emoji": true - }, - "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" - } - } - ] - } - env: - SLACK_WEBHOOK_URL: ${{ secrets.slack_webhook_url }} - SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK - clean-and-delete-runner: - if: ${{ always() }} - needs: [create-runner, e2e] - runs-on: ubuntu-latest - env: - # QASE variables - QASE_API_TOKEN: ${{ secrets.qase_api_token }} - QASE_PROJECT_CODE: ELEMENTAL - QASE_RUN_ID: ${{ needs.e2e.outputs.qase_run_id }} - steps: - # actions/checkout MUST come before auth - - name: Checkout - uses: actions/checkout@v3 - - name: Authenticate to GCP - uses: google-github-actions/auth@v1 + uses: ./.github/actions/logs-and-summary with: - credentials_json: ${{ secrets.credentials }} - - name: Setup gcloud - uses: google-github-actions/setup-gcloud@v1 - - name: Delete GCP secrets - run: | - gcloud --quiet secrets delete PAT_TOKEN_${{ needs.create-runner.outputs.uuid }} || true - gcloud --quiet secrets delete GH_REPO_${{ needs.create-runner.outputs.uuid }} || true - - name: Delete runner - if: ${{ needs.create-runner.result == 'success' && inputs.destroy_runner == true }} - run: | - # Disable failure on first error, needed for the "delete" check - set +e - # If runner is already deleted we can bypass the error - # NOTE: seems to still return an error if the VM is already deleted... To fix! - if ! LOGS=$(gcloud --quiet compute instances delete ${{ needs.create-runner.outputs.runner }} \ - --delete-disks all \ - --zone ${{ inputs.zone }} 2>&1); then - echo "${LOGS}" | grep -q "resource .* was not found" && true || false - fi - - name: Delete Qase Run if job has been cancelled - if: ${{ always() && contains(needs.e2e.outputs.steps_status, 'cancelled') }} - run: | - if [[ -n "${QASE_RUN_ID}" ]]; then - cd tests && make delete-qase-run - fi + cert_manager_version: ${{ steps.component.outputs.cert_manager_version }} + k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} + operator_version: ${{ steps.component.outputs.operator_version }} + os_version: ${{ steps.iso_version.outputs.os_version }} + proxy: ${{ inputs.proxy }} + rancher_image_version: ${{ steps.component.outputs.rancher_image_version }} + test_description: ${{ inputs.test_description }} diff --git a/.github/workflows/master_create-runner.yaml b/.github/workflows/master_create-runner.yaml index e2dc4e3b9..985d5e4d5 100644 --- a/.github/workflows/master_create-runner.yaml +++ b/.github/workflows/master_create-runner.yaml @@ -3,40 +3,32 @@ name: (template) Create GitHub Actions runner on GCP on: workflow_call: - # Jobs inputs + # Variables to set when calling this reusable workflow inputs: runner_template: - description: Runner template to use - default: elemental-e2e-ci-runner-spot-x86-64-template-n2-standard-16-v5 + required: true type: string zone: - description: GCP zone to host the runner - default: us-central1-a + required: true type: string + # Job outputs to export for caller workflow outputs: public_dns: description: FQDN hostname of GCP runner value: ${{ jobs.create.outputs.public_dns }} - runner: + runner_hostname: description: Internal name of GCP runner - value: ${{ jobs.create.outputs.runner }} - uuid: - description: Unique ID of GCP runner - value: ${{ jobs.create.outputs.uuid }} + value: ${{ jobs.create.outputs.runner_hostname }} + runner_label: + description: Unique ID of GCP runner (used as runner label) + value: ${{ jobs.create.outputs.runner_label }} + # Secrets to set when calling this reusable workflow secrets: credentials: - description: Credentials to use to connect required: true pat_token: - # A token is needed to be able to add runner on the repo, maybe this can be changed later - # This token is linked to a personal account - # So in case of token issue you have to check (no specific order and for example): - # - the expiration date - # - if the account associated still exists - # - if the person still has access to the repo - description: PAT token used to add runner required: true jobs: @@ -44,12 +36,12 @@ jobs: runs-on: ubuntu-latest outputs: public_dns: ${{ steps.dns.outputs.public_dns }} - runner: ${{ steps.generator.outputs.runner }} - uuid: ${{ steps.generator.outputs.uuid }} + runner_hostname: ${{ steps.generator.outputs.runner_hostname }} + runner_label: ${{ steps.generator.outputs.runner_label }} steps: - # actions/checkout MUST come before auth - name: Checkout uses: actions/checkout@v4 + - name: Generate UUID and Runner hostname id: generator run: | @@ -57,45 +49,57 @@ jobs: UUID=$(uuidgen) GH_REPO_FULL=${{ github.repository }} GH_REPO=${GH_REPO_FULL#*/} - echo "uuid=${UUID//-}" >> ${GITHUB_OUTPUT} - echo "runner=${GH_REPO//\//-}-ci-${UUID//-}" >> ${GITHUB_OUTPUT} + echo "runner_hostname=${GH_REPO//\//-}-ci-${UUID//-}" >> ${GITHUB_OUTPUT} + echo "runner_label=${UUID//-}" >> ${GITHUB_OUTPUT} + - name: Authenticate to GCP uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.credentials }} + - name: Setup gcloud uses: google-github-actions/setup-gcloud@v2 + - name: Create runner run: | - gcloud compute instances create ${{ steps.generator.outputs.runner }} \ + gcloud compute instances create ${{ steps.generator.outputs.runner_hostname }} \ --source-instance-template ${{ inputs.runner_template }} \ --zone ${{ inputs.zone }} + - name: Create GCP secrets run: | echo -n ${{ secrets.pat_token }} \ - | gcloud secrets create PAT_TOKEN_${{ steps.generator.outputs.uuid }} --data-file=- + | gcloud secrets create PAT_TOKEN_${{ steps.generator.outputs.runner_label }} --data-file=- echo -n ${{ github.repository }} \ - | gcloud secrets create GH_REPO_${{ steps.generator.outputs.uuid }} --data-file=- - - name: Get public dns name in GCP + | gcloud secrets create GH_REPO_${{ steps.generator.outputs.runner_label }} --data-file=- + + - name: Get public DNS name in GCP id: dns run: | + set +e + set -x + # Do a timed out loop here, as gcloud can sometimes fail typeset -i i=0 while true; do # Get public IP PUBLIC_IP=$(gcloud compute instances list 2> /dev/null \ - | awk '/${{ steps.generator.outputs.runner }}/ {print $6}') + | awk '/${{ steps.generator.outputs.runner_hostname }}/ {print $6}') + # Exit if we reach the timeout or if IP is set if (( ++i > 10 )) || [[ -n "${PUBLIC_IP}" ]]; then break fi + # Wait a little before retrying sleep 2 done + # Get the public DNS PUBLIC_DNS=$(host -l ${PUBLIC_IP} 2> /dev/null \ | awk '{sub(/\.$/, ""); print $5}') echo "public_dns=${PUBLIC_DNS}" >> ${GITHUB_OUTPUT} + # Raise an error if either IP and/or DNS are empty if [[ -z "${PUBLIC_IP}" || -z "${PUBLIC_DNS}" ]]; then echo "PUBLIC_IP and/or PUBLIC_DNS are empty!" >&2 diff --git a/.github/workflows/master_e2e.yaml b/.github/workflows/master_e2e.yaml new file mode 100644 index 000000000..2070a4831 --- /dev/null +++ b/.github/workflows/master_e2e.yaml @@ -0,0 +1,109 @@ +# This workflow is a reusable one called by other workflows +name: (template) CLI/UI template +#ffo +on: + workflow_call: + # Variables to set when calling this reusable workflow + inputs: + backup_restore_version: + type: string + test_type: + required: true + type: string + ca_type: + type: string + cert-manager_version: + type: string + cluster_name: + required: true + type: string + cypress_tags: + type: string + destroy_runner: + type: boolean + elemental_ui_version: + type: string + iso_boot: + type: boolean + k8s_version_to_provision: + required: true + type: string + operator_repo: + type: string + proxy: + type: string + public_dns: + required: true + type: string + qase_run_id: + type: string + runner_label: + required: true + type: string + rancher_version: + type: string + test_description: + required: true + type: string + test_type: + required: true + type: string + ui_account: + type: string + upgrade_image: + type: string + upgrade_os_channel: + type: string + upstream_cluster_version: + type: string + + # Secrets to set when calling this reusable workflow + secrets: + qase_api_token: + +jobs: + cli: + if: ${{ inputs.test_type == 'cli' }} + uses: ./.github/workflows/master_cli.yaml + secrets: + qase_api_token: ${{ secrets.qase_api_token }} + with: + ca_type: ${{ inputs.ca_type }} + cert-manager_version: ${{ inputs.cert-manager_version }} + cluster_name: ${{ inputs.cluster_name }} + iso_boot: ${{ inputs.iso_boot }} + k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} + operator_repo: ${{ inputs.operator_repo }} + public_dns: ${{ needs.create-runner.outputs.public_dns }} + qase_run_id: ${{ needs.pre-qase.outputs.qase_run_id }} + rancher_version: ${{ inputs.rancher_version }} + runner_label: ${{ needs.create-runner.outputs.runner_label }} + test_description: ${{ inputs.test_description }} + upgrade_image: ${{ inputs.upgrade_image }} + upgrade_os_channel: ${{ inputs.os_channel }} + upstream_cluster_version: ${{ inputs.upstream_cluster_version }} + + ui: + if: ${{ inputs.test_type == 'ui' }} + uses: ./.github/workflows/master_ui.yaml + secrets: + qase_api_token: ${{ secrets.qase_api_token }} + with: + ca_type: ${{ inputs.ca_type }} + cert-manager_version: ${{ inputs.cert-manager_version }} + cluster_name: ${{ inputs.cluster_name }} + cypress_tags: ${{ inputs.cypress_tags }} + elemental_ui_version: ${{ inputs.elemental_ui_version }} + iso_boot: ${{ inputs.iso_boot }} + k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} + operator_repo: ${{ inputs.operator_repo }} + proxy: ${{ inputs.proxy }} + public_dns: ${{ needs.create-runner.outputs.public_dns }} + qase_run_id: ${{ needs.pre-qase.outputs.qase_run_id }} + rancher_version: ${{ inputs.rancher_version }} + runner_label: ${{ needs.create-runner.outputs.runner_label }} + test_description: ${{ inputs.test_description }} + ui_account: ${{ inputs.ui_account }} + upgrade_image: ${{ inputs.upgrade_image }} + upgrade_os_channel: ${{ inputs.os_channel }} + upstream_cluster_version: ${{ inputs.upstream_cluster_version }} diff --git a/.github/workflows/master_main.yaml b/.github/workflows/master_main.yaml new file mode 100644 index 000000000..771e74894 --- /dev/null +++ b/.github/workflows/master_main.yaml @@ -0,0 +1,229 @@ +# This workflow is a reusable one called by other workflows +name: (template) Main template + +on: + workflow_call: + # Variables to set when calling this reusable workflow + inputs: + backup_restore_version: + description: Version of backup-restore-operator to use + type: string + ca_type: + description: CA type to use (selfsigned or private) + default: selfsigned + type: string + cert-manager_version: + description: Version of cert-manager to use + type: string + cluster_name: + description: Name of the provisioned cluster + required: true + type: string + cypress_tags: + description: Tags to filter tests we want to run + default: main + type: string + destroy_runner: + description: Destroy the auto-generated self-hosted runner + default: true + type: boolean + elemental_ui_version: + description: Version of the elemental ui which will be installed (dev/stable) + default: dev + type: string + iso_boot: + description: Choose booting from ISO + default: false + type: boolean + k8s_version_to_provision: + description: Name and version of installed K8s distribution + required: true + type: string + operator_repo: + description: Elemental operator repository to use + type: string + default: oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher + proxy: + description: Deploy a proxy + type: string + qase_run_id: + description: Case run ID where the results will be reported + type: string + rancher_version: + description: Rancher Manager channel/version/head_version to use for installation + default: stable/latest/none + type: string + runner_template: + description: Runner template to use + default: elemental-e2e-ci-runner-spot-x86-64-template-n2-standard-16-v5 + type: string + test_description: + description: Short description of the test + default: Unknown + type: string + test_type: + description: Type of test to run (cli or ui) + required: true + type: string + ui_account: + description: Account used to test RBAC role in UI + type: string + upgrade_image: + description: Image to use for the Elemental OS upgrade + type: string + upgrade_os_channel: + description: Channel to use for the Elemental OS upgrade + type: string + upstream_cluster_version: + description: Cluster upstream version where to install Rancher (K3s or RKE2) + default: v1.26.10+k3s2 + type: string + zone: + description: GCP zone to host the runner + default: us-central1-a + type: string + + # Secrets to set when calling this reusable workflow + secrets: + credentials: + description: Credentials to use to connect + required: true + pat_token: + # A token is needed to be able to add runner on the repo, maybe this can be changed later + # This token is linked to a personal account + # So in case of token issue you have to check (no specific order and for example): + # - the expiration date + # - if the account associated still exists + # - if the person still has access to the repo + description: PAT token used to add runner + required: true + qase_api_token: + description: Qase API token to use for Qase reporting + +jobs: + create-runner: + uses: ./.github/workflows/master_create-runner.yaml + secrets: + credentials: ${{ secrets.credentials }} + pat_token: ${{ secrets.pat_token }} + with: + runner_template: ${{ inputs.runner_template }} + zone: ${{ inputs.zone }} + + pre-qase: + runs-on: ubuntu-latest + env: + QASE_API_TOKEN: ${{ secrets.qase_api_token }} + QASE_PROJECT_CODE: ELEMENTAL + outputs: + qase_run_description: ${{ steps.qase.outputs.qase_run_description }} + qase_run_id: ${{ steps.qase.outputs.qase_run_id }} + qase_run_name: ${{ steps.qase.outputs.qase_run_name }} + steps: + - name: Create/Export Qase Run + id: qase + run: | + if ${{ inputs.qase_run_id == 'auto' }}; then + # Define and export URL of GH test run in Qase run description + GH_RUN_URL="${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" + QASE_DESC="${{ inputs.test_description }} (${GH_RUN_URL})" + export QASE_RUN_DESCRIPTION="${QASE_DESC}" + + # Define and export the Qase run name, as it cannot be done + # in 'env:' because GITHUB_WORKFLOW is a shell variable + # Export them also to be used locally + export QASE_RUN_NAME="${GITHUB_WORKFLOW}" + + # Create a Qase run, get its ID + ID=$(cd tests && make create-qase-run) + + # Export outputs for future use + echo "qase_run_description=${QASE_DESC}" >> ${GITHUB_OUTPUT} + echo "qase_run_id=${ID}" >> ${GITHUB_OUTPUT} + echo "qase_run_name=${GITHUB_WORKFLOW}" >> ${GITHUB_OUTPUT} + + # Just an info for debugging purposes + echo -e "Exported values:\nQASE_RUN_ID=${ID}\nQASE_RUN_DESCRIPTION=${QASE_DESC}\nQASE_RUN_NAME=${GITHUB_WORKFLOW}" + elif ${{ inputs.qase_run_id != '' }}; then + # If the run ID has been specified + echo "qase_run_id=${{ inputs.qase_run_id }}" >> ${GITHUB_OUTPUT} + fi + + e2e: + needs: [create-runner, pre-qase] + uses: ./.github/workflows/master_e2e.yaml + #uses: ./.github/workflows/master_ui.yaml + secrets: + qase_api_token: ${{ secrets.qase_api_token }} + with: + ca_type: ${{ inputs.ca_type }} + cert-manager_version: ${{ inputs.cert-manager_version }} + cluster_name: ${{ inputs.cluster_name }} + cypress_tags: ${{ inputs.cypress_tags }} + elemental_ui_version: ${{ inputs.elemental_ui_version }} + iso_boot: ${{ inputs.iso_boot }} + k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} + operator_repo: ${{ inputs.operator_repo }} + proxy: ${{ inputs.proxy }} + public_dns: ${{ needs.create-runner.outputs.public_dns }} + qase_run_id: ${{ needs.pre-qase.outputs.qase_run_id }} + rancher_version: ${{ inputs.rancher_version }} + runner_label: ${{ needs.create-runner.outputs.runner_label }} + test_description: ${{ inputs.test_description }} + test_type: ${{ inputs.test_type }} + ui_account: ${{ inputs.ui_account }} + upgrade_image: ${{ inputs.upgrade_image }} + upgrade_os_channel: ${{ inputs.os_channel }} + upstream_cluster_version: ${{ inputs.upstream_cluster_version }} + + clean-and-delete-runner: + if: ${{ always() }} + needs: [create-runner, e2e] + uses: ./.github/workflows/master_clean-and-delete-runner.yaml + secrets: + credentials: ${{ secrets.credentials }} + pat_token: ${{ secrets.pat_token }} + with: + create_runner_result: ${{ needs.create-runner.result }} + destroy_runner: ${{ inputs.destroy_runner }} + runner_hostname: ${{ needs.create-runner.outputs.runner_hostname }} + steps_status: ${{ needs.e2e.outputs.steps_status }} + runner_label: ${{ needs.create-runner.outputs.runner_label }} + zone: ${{ inputs.zone }} + + post-qase: + if: ${{ always() && needs.pre-qase.outputs.qase_run_id != '' }} + needs: e2e + runs-on: ubuntu-latest + env: + QASE_API_TOKEN: ${{ secrets.qase_api_token }} + QASE_PROJECT_CODE: ELEMENTAL + QASE_REPORT: 1 + QASE_RUN_COMPLETE: 1 + QASE_RUN_ID: ${{ needs.pre-qase.outputs.qase_run_id }} + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + cache-dependency-path: tests/go.sum + go-version-file: tests/go.mod + + - name: Finalize Qase Run and publish Results + if: ${{ !contains(needs.e2e.outputs.steps_status, 'cancelled') }} + run: cd tests && make publish-qase-run + + - name: Delete Qase Run if job has been cancelled + if: ${{ contains(needs.e2e.outputs.steps_status, 'cancelled') }} + run: cd tests && make delete-qase-run + + # Just to signify that something has been cancelled and it's not useful to check the test + declare-cancelled: + if: ${{ always() && (contains(needs.e2e.outputs.steps_status, 'cancelled') || needs.e2e.result == 'cancelled' || needs.create-runner.result != 'success') }} + needs: [create-runner, e2e] + runs-on: ubuntu-latest + steps: + - name: Specify in summary if something has been cancelled + run: echo "# TEST CANCELLED!" >> ${GITHUB_STEP_SUMMARY} diff --git a/.github/workflows/master_poc.yaml b/.github/workflows/master_poc.yaml deleted file mode 100644 index 0de87c702..000000000 --- a/.github/workflows/master_poc.yaml +++ /dev/null @@ -1,79 +0,0 @@ -# This workflow is a reusable one called by other workflows -name: (template) PoC template - -on: - workflow_call: - # Jobs inputs - inputs: - k8s_version_to_provision: - description: Name and version of installed K8s distribution - required: true - type: string - # Secrets to set when calling this reusable workflow - secrets: - credentials: - description: Credentials to use to connect - required: true - pat_token: - # A token is needed to be able to add runner on the repo, maybe this can be changed later - # This token is linked to a personal account - # So in case of token issue you have to check (no specific order and for example): - # - the expiration date - # - if the account associated still exists - # - if the person still has access to the repo - description: PAT token used to add runner - required: true - -jobs: - create-runner: - uses: ./.github/workflows/master_create-runner.yaml - secrets: inherit - - test-job: - needs: create-runner - runs-on: ${{ needs.create-runner.outputs.uuid }} - outputs: - # For this to work 'id:' in steps are mandatory! - steps_status: ${{ join(steps.*.conclusion, ' ') }} - steps: - - name: Checkout - id: checkout - uses: actions/checkout@v4 - - name: Install Go - id: install_go - uses: actions/setup-go@v5 - with: - cache-dependency-path: tests/go.sum - go-version-file: tests/go.mod - - name: Define needed system variables - id: define_sys_vars - run: | - # Add missing PATH, removed in recent distributions for security reasons... - echo "/usr/local/bin" >> ${GITHUB_PATH} - - name: Get logs and add summary - id: logs_summary - uses: ./.github/actions/logs-and-summary - with: - credentials: ${{ secrets.GCP_CREDENTIALS }} - k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} - pat_token: ${{ secrets.SELF_HOSTED_RUNNER_PAT_TOKEN }} - - clean-and-delete-runner: - if: ${{ always() }} - needs: [create-runner, test-job] - uses: ./.github/workflows/master_clean-and-delete-runner.yaml - secrets: inherit - with: - create_runner_result: ${{ needs.create-runner.result }} - runner: ${{ needs.create-runner.outputs.runner }} - steps_status: ${{ needs.test-job.outputs.steps_status }} - uuid: ${{ needs.create-runner.outputs.uuid }} - - # Just to signify that something has been cancelled and it's not useful to check - declare-cancelled: - if: ${{ always() && (contains(needs.test-job.outputs.steps_status, 'cancelled') || needs.test-job.result == 'cancelled' || needs.create-runner.result != 'success') }} - needs: [create-runner, test-job] - runs-on: ubuntu-latest - steps: - - name: Specify in summary if something has been cancelled - run: echo "# TEST CANCELLED!" >> ${GITHUB_STEP_SUMMARY} diff --git a/.github/workflows/master_ui.yaml b/.github/workflows/master_ui.yaml new file mode 100644 index 000000000..0f7c3a1c4 --- /dev/null +++ b/.github/workflows/master_ui.yaml @@ -0,0 +1,288 @@ +# This workflow is a reusable one called by other workflows +name: (template) Elemental E2E UI tests + +on: + workflow_call: + # Variables to set when calling this reusable workflow + inputs: + ca_type: + required: true + type: string + cert-manager_version: + required: true + type: string + cluster_name: + required: true + type: string + cypress_tags: + required: true + type: string + elemental_ui_version: + required: true + type: string + iso_boot: + required: true + type: boolean + k8s_version_to_provision: + required: true + type: string + operator_repo: + required: true + type: string + proxy: + required: true + type: string + public_dns: + required: true + type: string + qase_run_id: + required: true + type: string + rancher_version: + required: true + type: string + runner_label: + required: true + type: string + test_description: + required: true + type: string + ui_account: + required: true + type: string + upgrade_image: + required: true + type: string + upgrade_os_channel: + required: true + type: string + upstream_cluster_version: + required: true + type: string + + # Variables to set when calling this reusable workflow + secrets: + qase_api_token: + +jobs: + ui: + runs-on: ${{ inputs.runner_label }} + outputs: + # For this to work 'id:' in steps are mandatory! + steps_status: ${{ join(steps.*.conclusion, ' ') }} + env: + CLUSTER_NAME: ${{ inputs.cluster_name }} + # For Qase reporting + QASE_RUN_ID: ${{ inputs.qase_run_id }} + # K3S / RKE2 flags to use for installation + INSTALL_K3S_SKIP_ENABLE: true + INSTALL_K3S_VERSION: ${{ inputs.upstream_cluster_version }} + INSTALL_RKE2_VERSION: ${{ inputs.upstream_cluster_version }} + K3S_KUBECONFIG_MODE: 0644 + # Distribution to use to host Rancher Manager (K3s or RKE2) + K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }} + # For K8s cluster to provision with Rancher Manager + K8S_VERSION_TO_PROVISION: ${{ inputs.k8s_version_to_provision }} + steps: + - name: Checkout + id: checkout + uses: actions/checkout@v4 + + - name: Install Go + id: install_go + uses: actions/setup-go@v5 + with: + cache-dependency-path: tests/go.sum + go-version-file: tests/go.mod + + - name: Define needed system variables + id: define_sys_vars + run: | + # Add missing PATH, removed in recent distributions for security reasons... + echo "/usr/local/bin" >> ${GITHUB_PATH} + + - name: Deploy Proxy + id: proxy + if: ${{ inputs.proxy == 'elemental' || inputs.proxy == 'rancher' }} + run: docker run -d --name squid_proxy -v $(pwd)/tests/assets/squid.conf:/etc/squid/squid.conf -p 3128:3128 wernight/squid + + - name: Install Rancher Manager and Elemental + id: install_rancher_elemental + env: + CA_TYPE: ${{ inputs.ca_type }} + CERT_MANAGER_VERSION: ${{ inputs.cert-manager_version }} + OPERATOR_REPO: ${{ inputs.operator_repo }} + PROXY: ${{ inputs.proxy }} + PUBLIC_DNS: ${{ inputs.public_dns }} + PUBLIC_DOMAIN: bc.googleusercontent.com + RANCHER_VERSION: ${{ inputs.rancher_version }} + run: cd tests && make e2e-install-rancher + + - name: Extract component versions/informations + id: component + run: | + # Extract CertManager version + CERT_MANAGER_VERSION=$(kubectl get pod \ + --namespace cert-manager \ + -l app=cert-manager \ + -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true) + # Extract elemental-operator version + OPERATOR_VERSION=$(kubectl get pod \ + --namespace cattle-elemental-system \ + -l app=elemental-operator \ + -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true) + # Extract Rancher Manager version + RANCHER_VERSION=$(kubectl get pod \ + --namespace cattle-system \ + -l app=rancher \ + -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true) + # Export values + echo "cert_manager_version=${CERT_MANAGER_VERSION}" >> ${GITHUB_OUTPUT} + echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT} + echo "rancher_image_version=${RANCHER_VERSION}" >> ${GITHUB_OUTPUT} + + - name: Install Chartmuseum + id: install_chartmuseum + run: cd tests && make e2e-install-chartmuseum + + # Basics means tests without an extra elemental node needed + - name: Cypress tests - Basics + id: cypress_basics + env: + BROWSER: chrome + CHARTMUSEUM_REPO: http://${{ inputs.public_dns }} + CYPRESS_DOCKER: 'cypress/included:10.9.0' + CYPRESS_TAGS: ${{ inputs.cypress_tags }} + ELEMENTAL_UI_VERSION: ${{ inputs.elemental_ui_version }} + ISO_BOOT: ${{ inputs.iso_boot }} + K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }} + OPERATOR_REPO: ${{ inputs.operator_repo }} + RANCHER_VERSION: ${{ steps.component.outputs.rancher_image_version }} + RANCHER_PASSWORD: rancherpassword + RANCHER_URL: https://${{ inputs.public_dns }}/dashboard + RANCHER_USER: admin + SPEC: | + /workdir/e2e/unit_tests/first_connection.spec.ts + /workdir/e2e/unit_tests/elemental_operator.spec.ts + /workdir/e2e/unit_tests/elemental_plugin.spec.ts + /workdir/e2e/unit_tests/user.spec.ts + /workdir/e2e/unit_tests/menu.spec.ts + /workdir/e2e/unit_tests/machine_registration.spec.ts + /workdir/e2e/unit_tests/advanced_filtering.spec.ts + UI_ACCOUNT: ${{ inputs.ui_account }} + UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }} + run: cd tests && make start-cypress-tests + + - name: Upload Cypress screenshots (Basics) + id: upload_screenshots_cypress_basics + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cypress-screenshots-basics-${{ inputs.cluster_name }} + path: tests/cypress/latest/screenshots + retention-days: 7 + if-no-files-found: ignore + + - name: Upload Cypress videos (Basics) + id: upload_videos_cypress_basics + # Test run video is always captured, so this action uses "always()" condition + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: cypress-videos-basics-${{ inputs.cluster_name }} + path: tests/cypress/latest/videos + retention-days: 7 + + - name: Deploy a node to join Rancher manager + id: deploy_node_ui + env: + ISO_BOOT: ${{ inputs.iso_boot }} + VM_INDEX: 1 + VM_MEM: 8192 + HOST_MEMORY_RESERVED: 49152 + run: | + cd tests && ( + # Removing 'downloads' is needed to avoid this error during 'make': + # 'pattern all: open .../elemental/tests/cypress/downloads: permission denied' + sudo rm -rf cypress/latest/downloads + make e2e-ui-rancher + ) + + # Advanced means tests which needs an extra elemental node (provisioned with libvirt) + - name: Cypress tests - Advanced + id: cypress_advanced + env: + BROWSER: firefox + CHARTMUSEUM_REPO: http://${{ inputs.public_dns }} + CYPRESS_DOCKER: 'cypress/included:10.9.0' + CYPRESS_TAGS: ${{ inputs.cypress_tags }} + ELEMENTAL_UI_VERSION: ${{ inputs.elemental_ui_version }} + OPERATOR_REPO: ${{ inputs.operator_repo }} + PROXY: ${{ inputs.proxy }} + RANCHER_VERSION: ${{ steps.component.outputs.rancher_image_version }} + RANCHER_PASSWORD: rancherpassword + RANCHER_URL: https://${{ inputs.public_dns }}/dashboard + RANCHER_USER: admin + SPEC: | + /workdir/e2e/unit_tests/machine_selector.spec.ts + /workdir/e2e/unit_tests/machine_inventory.spec.ts + /workdir/e2e/unit_tests/reset.spec.ts + /workdir/e2e/unit_tests/deploy_app.spec.ts + /workdir/e2e/unit_tests/upgrade-operator.spec.ts + /workdir/e2e/unit_tests/upgrade-ui-extension.spec.ts + /workdir/e2e/unit_tests/upgrade.spec.ts + UI_ACCOUNT: ${{ inputs.ui_account }} + UPGRADE_IMAGE: ${{ inputs.upgrade_image }} + UPGRADE_OS_CHANNEL: ${{ inputs.upgrade_os_channel }} + run: cd tests && make start-cypress-tests + + - name: Upload Cypress screenshots (Advanced) + id: upload_screenshots_cypress_advanced + if: ${{ failure() }} + uses: actions/upload-artifact@v4 + with: + name: cypress-screenshots-advanced-${{ inputs.cluster_name }} + path: tests/cypress/latest/screenshots + retention-days: 7 + if-no-files-found: ignore + + - name: Upload Cypress videos (Advanced) + id: upload_videos_cypress_advanced + # Test run video is always captured, so this action uses "always()" condition + if: ${{ always() }} + uses: actions/upload-artifact@v4 + with: + name: cypress-videos-advanced-${{ inputs.cluster_name }} + path: tests/cypress/latest/videos + retention-days: 7 + + - name: Extract ISO version + id: iso_version + if: ${{ always() }} + run: | + # Extract OS version from ISO + ISO=$(file -Ls *.iso 2>/dev/null | awk -F':' '/boot sector/ { print $1 }') + if [[ -n "${ISO}" ]]; then + INITRD_FILE=$(isoinfo -i ${ISO} -R -find -type f -name initrd -print 2>/dev/null) + isoinfo -i ${ISO} -R -x ${INITRD_FILE} 2>/dev/null \ + | xz -dc \ + | cpio -i --to-stdout usr/lib/initrd-release > os-release + eval $(grep IMAGE_TAG os-release 2>/dev/null) + fi + # Export value (even if empty!) + echo "os_version=${IMAGE_TAG}" >> ${GITHUB_OUTPUT} + + # This step must be called in each worklow that wants a summary! + - name: Get logs and add summary + id: logs_summary + if: ${{ always() }} + uses: ./.github/actions/logs-and-summary + with: + ca_type: ${{ inputs.ca_type }} + cert_manager_version: ${{ steps.component.outputs.cert_manager_version }} + k8s_version_to_provision: ${{ inputs.k8s_version_to_provision }} + operator_version: ${{ steps.component.outputs.operator_version }} + os_version: ${{ steps.iso_version.outputs.os_version }} + proxy: ${{ inputs.proxy }} + rancher_image_version: ${{ steps.component.outputs.rancher_image_version }} + test_description: ${{ inputs.test_description }} diff --git a/.github/workflows/test-new-master.yaml b/.github/workflows/test-new-master.yaml index 8c8ba4d8c..4e02b7d72 100644 --- a/.github/workflows/test-new-master.yaml +++ b/.github/workflows/test-new-master.yaml @@ -4,10 +4,20 @@ on: push: jobs: - test-master-poc: - uses: ./.github/workflows/master_poc.yaml + test-master-main: + uses: ./.github/workflows/master_main.yaml secrets: credentials: ${{ secrets.GCP_CREDENTIALS }} pat_token: ${{ secrets.SELF_HOSTED_RUNNER_PAT_TOKEN }} + qase_api_token: ${{ secrets.QASE_API_TOKEN_CLI }} with: + cluster_name: cluster-k3s + #cluster_type: ${{ inputs.cluster_type }} + #destroy_runner: ${{ inputs.destroy_runner }} k8s_version_to_provision: v1.26.10+k3s2 + #operator_repo: oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher + #os_to_test: dev + #qase_run_id: ${{ inputs.qase_run_id }} + #rancher_version: ${{ inputs.rancher_version }} + test_description: "Test new master-main.yaml" + test_type: ui