diff --git a/.github/workflows/cli-k3s-airgap_rm_latest_dev.yaml b/.github/workflows/cli-k3s-airgap_rm_latest_dev.yaml
new file mode 100644
index 000000000..fcc63d2de
--- /dev/null
+++ b/.github/workflows/cli-k3s-airgap_rm_latest_dev.yaml
@@ -0,0 +1,30 @@
+# This workflow calls the master E2E workflow with custom variables
+name: CLI-K3s-Airgap-RM_latest_devel
+
+on:
+ workflow_dispatch:
+ inputs:
+ destroy_runner:
+ description: Destroy the auto-generated self-hosted runner
+ default: true
+ type: boolean
+ schedule:
+ - cron: '0 8 * * *'
+
+jobs:
+ cli:
+ uses: ./.github/workflows/master-airgap.yaml
+ secrets:
+ credentials: ${{ secrets.GCP_CREDENTIALS }}
+ pat_token: ${{ secrets.SELF_HOSTED_RUNNER_PAT_TOKEN }}
+ slack_webhook_url: ${{ secrets.SLACK_WEBHOOK_URL }}
+ with:
+ test_description: "CI - CLI - Airgap K3S - RM Latest Devel"
+ cert-manager_version: v1.12.2
+ cluster_name: airgap-cluster
+ cluster_type: airgap
+ destroy_runner: ${{ github.event_name == 'schedule' && true || inputs.destroy_runner }}
+ k8s_version_to_provision: v1.26.8+k3s1
+ os_to_test: dev
+ rancher_version: latest/devel/2.7
+ upstream_cluster_version: 1.26.8
diff --git a/.github/workflows/cli-k3s-airgap_rm_stable.yaml b/.github/workflows/cli-k3s-airgap_rm_stable.yaml
new file mode 100644
index 000000000..4e49b611c
--- /dev/null
+++ b/.github/workflows/cli-k3s-airgap_rm_stable.yaml
@@ -0,0 +1,30 @@
+# This workflow calls the master E2E workflow with custom variables
+name: CLI-K3s-Airgap-RM_stable
+
+on:
+ workflow_dispatch:
+ inputs:
+ destroy_runner:
+ description: Destroy the auto-generated self-hosted runner
+ default: true
+ type: boolean
+ schedule:
+ - cron: '0 8 * * *'
+
+jobs:
+ cli:
+ uses: ./.github/workflows/master-airgap.yaml
+ secrets:
+ credentials: ${{ secrets.GCP_CREDENTIALS }}
+ pat_token: ${{ secrets.SELF_HOSTED_RUNNER_PAT_TOKEN }}
+ slack_webhook_url: ${{ secrets.SLACK_WEBHOOK_URL }}
+ with:
+ test_description: "CI - CLI - Airgap K3S - RM Stable"
+ cert-manager_version: v1.12.2
+ cluster_name: airgap-cluster
+ cluster_type: airgap
+ destroy_runner: ${{ github.event_name == 'schedule' && true || inputs.destroy_runner }}
+ k8s_version_to_provision: v1.26.8+k3s1
+ os_to_test: dev
+ rancher_version: stable/latest/2.7
+ upstream_cluster_version: 1.26.8
diff --git a/.github/workflows/master-airgap.yaml b/.github/workflows/master-airgap.yaml
new file mode 100644
index 000000000..5a2504043
--- /dev/null
+++ b/.github/workflows/master-airgap.yaml
@@ -0,0 +1,263 @@
+name: (template) Elemental Airgap E2E tests with Rancher Manager
+
+on:
+ workflow_call:
+ # Variables to set when calling this reusable workflow
+ secrets:
+ credentials:
+ description: Credentials to use to connect
+ required: true
+ pat_token:
+ # A token is needed to be able to add runner on the repo, maybe this can be changed later
+ # This token is linked to a personal account
+ # So in case of token issue you have to check (no specific order and for example):
+ # - the expiration date
+ # - if the account associated still exists
+ # - if the person still has access to the repo
+ description: PAT token used to add runner
+ required: true
+ qase_api_token:
+ description: Qase API token to use for Cypress tests
+ required: false
+ slack_webhook_url:
+ description: WebHook URL to use for Slack
+ required: true
+ inputs:
+ cert-manager_version:
+ description: Version of cert-manager to use
+ type: string
+ cluster_name:
+ description: Name of the provisioned cluster
+ required: true
+ type: string
+ cluster_type:
+ description: Cluster type (empty if normal or hardened)
+ type: string
+ destroy_runner:
+ description: Destroy the auto-generated self-hosted runner
+ default: true
+ type: boolean
+ k8s_version_to_provision:
+ description: Name and version of installed K8s distribution
+ required: true
+ type: string
+ operator_repo:
+ description: Elemental operator repository to use
+ type: string
+ default: oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher
+ os_to_test:
+ description: OS repository to test (dev/staging/stable)
+ type: string
+ default: dev
+ qase_run_id:
+ description: Case run ID where the results will be reported
+ required: false
+ type: string
+ rancher_version:
+ description: Rancher Manager channel/version to use for installation
+ default: stable/latest/2.7
+ type: string
+ test_description:
+ description: Short description of the test
+ type: string
+ test_type:
+ description: Type of test to run (cli or ui)
+ default: airgap
+ type: string
+ upstream_cluster_version:
+ description: Cluster upstream version where to install Rancher (K3s or RKE2)
+ default: v1.26.8
+ type: string
+ zone:
+ description: GCP zone to host the runner
+ default: us-central1-a
+ type: string
+
+jobs:
+ create-runner:
+ runs-on: ubuntu-latest
+ outputs:
+ uuid: ${{ steps.generator.outputs.uuid }}
+ runner: ${{ steps.generator.outputs.runner }}
+ public_dns: ${{ steps.dns.outputs.public_dns }}
+ steps:
+ # actions/checkout MUST come before auth
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Generate UUID and Runner hostname
+ id: generator
+ run: |
+ UUID=$(uuidgen)
+ echo "uuid=${UUID}" >> ${GITHUB_OUTPUT}
+ echo "runner=elemental-ci-${UUID}" >> ${GITHUB_OUTPUT}
+ - name: Authenticate to GCP
+ uses: google-github-actions/auth@v1
+ with:
+ credentials_json: ${{ secrets.credentials }}
+ - name: Setup gcloud
+ uses: google-github-actions/setup-gcloud@v1
+ - name: Create runner
+ run: |
+ gcloud compute instances create ${{ steps.generator.outputs.runner }} \
+ --source-instance-template elemental-e2e-ci-runner-spot-x86-64-template-n2-standard-16-v4 \
+ --zone us-central1-a
+ - name: Create PAT token secret
+ run: |
+ echo -n ${{ secrets.pat_token }} \
+ | gcloud secrets create PAT_TOKEN_${{ steps.generator.outputs.uuid }} --data-file=-
+ - name: Get public dns name in GCP
+ id: dns
+ run: |
+ # Do a timed out loop here, as gcloud can sometimes fail
+ typeset -i i=0
+ while true; do
+ # Get public IP
+ PUBLIC_IP=$(gcloud compute instances list 2> /dev/null \
+ | awk '/${{ steps.generator.outputs.runner }}/ {print $6}')
+ # Exit if we reach the timeout or if IP is set
+ if (( ++i > 10 )) || [[ -n "${PUBLIC_IP}" ]]; then
+ break
+ fi
+ # Wait a little before retrying
+ sleep 2
+ done
+ # Get the public DNS
+ PUBLIC_DNS=$(host -l ${PUBLIC_IP} 2> /dev/null \
+ | awk '{sub(/\.$/, ""); print $5}')
+ echo "public_dns=${PUBLIC_DNS}" >> ${GITHUB_OUTPUT}
+ # Raise an error if either IP and/or DNS are empty
+ if [[ -z "${PUBLIC_IP}" || -z "${PUBLIC_DNS}" ]]; then
+ echo "PUBLIC_IP and/or PUBLIC_DNS are empty!" >&2
+ false
+ fi
+ e2e:
+ needs: create-runner
+ runs-on: ${{ needs.create-runner.outputs.uuid }}
+ env:
+ ARCH: amd64
+ CERT_MANAGER_VERSION: ${{ inputs.cert-manager_version }}
+ CLUSTER_NAME: ${{ inputs.cluster_name }}
+ CLUSTER_NS: fleet-default
+ CLUSTER_TYPE: ${{ inputs.cluster_type }}
+ # Distribution to use to host Rancher Manager (K3s)
+ K8S_UPSTREAM_VERSION: ${{ inputs.upstream_cluster_version }}
+ # For K8s cluster to provision with Rancher Manager
+ K8S_VERSION_TO_PROVISION: ${{ inputs.k8s_version_to_provision}}
+ # QASE variables
+ QASE_API_TOKEN: ${{ secrets.qase_api_token }}
+ QASE_RUN_ID: ${{ inputs.qase_run_id }}
+ # For Rancher Manager
+ RANCHER_VERSION: ${{ inputs.rancher_version }}
+ TEST_TYPE: ${{ inputs.test_type }}
+ TIMEOUT_SCALE: 3
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Install Go
+ uses: actions/setup-go@v3
+ with:
+ go-version-file: tests/go.mod
+ - name: Prepare the archive file to send to air-gapped nodes
+ run: cd tests && make e2e-prepare-archive
+ - name: Deploy airgap infrastructure
+ run: cd tests && make e2e-airgap-rancher
+ - name: Configure Rancher & Libvirt
+ run: cd tests && make e2e-configure-rancher
+ - name: Extract component versions/informations
+ id: component
+ run: |
+ # Extract rancher-backup-operator version
+ BACKUP_RESTORE_VERSION=$(kubectl get pod \
+ --namespace cattle-resources-system \
+ -l app.kubernetes.io/name=rancher-backup \
+ -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
+ # Extract CertManager version
+ CERT_MANAGER_VERSION=$(kubectl get pod \
+ --namespace cert-manager \
+ -l app=cert-manager \
+ -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
+ # Extract elemental-operator version
+ OPERATOR_VERSION=$(kubectl get pod \
+ --namespace cattle-elemental-system \
+ -l app=elemental-operator \
+ -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
+ # Extract Rancher Manager version
+ RM_VERSION=$(kubectl get pod \
+ --namespace cattle-system \
+ -l app=rancher \
+ -o jsonpath={.items[*].status.containerStatuses[*].image} 2> /dev/null || true)
+ # Export values
+ echo "backup_restore_version=${BACKUP_RESTORE_VERSION}" >> ${GITHUB_OUTPUT}
+ echo "cert_manager_version=${CERT_MANAGER_VERSION}" >> ${GITHUB_OUTPUT}
+ echo "operator_version=${OPERATOR_VERSION}" >> ${GITHUB_OUTPUT}
+ echo "rm_version=${RM_VERSION}" >> ${GITHUB_OUTPUT}
+ - name: Create ISO image for master pool
+ env:
+ EMULATE_TPM: true
+ OS_TO_TEST: ${{ inputs.os_to_test }}
+ POOL: master
+ run: cd tests && ISO_BOOT=true make e2e-iso-image
+ - name: Bootstrap node 1, 2 and 3 in pool "master" (use Emulated TPM if possible)
+ env:
+ EMULATE_TPM: true
+ POOL: master
+ VM_START: 1
+ VM_END: 3
+ run: cd tests && VM_INDEX=${VM_START} VM_NUMBERS=${VM_END} ISO_BOOT=true make e2e-bootstrap-node
+ - name: Add summary
+ if: ${{ always() }}
+ run: |
+ # Define some variable(s)
+ # Add summary
+ echo "## General informations" >> ${GITHUB_STEP_SUMMARY}
+ echo -e "***${{ inputs.test_description }}***\n" >> ${GITHUB_STEP_SUMMARY}
+ echo "Type of cluster deployed: ${CLUSTER_TYPE:-normal}" >> ${GITHUB_STEP_SUMMARY}
+ echo "### Rancher Manager" >> ${GITHUB_STEP_SUMMARY}
+ echo "Rancher Manager Image: ${{ steps.component.outputs.rm_version }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "Rancher Manager Version: ${{ inputs.rancher_version }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "CertManager Image: ${{ steps.component.outputs.cert_manager_version }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "### Elemental" >> ${GITHUB_STEP_SUMMARY}
+ echo "Elemental ISO image: ${{ inputs.os_to_test }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "Elemental OS version: ${{ steps.iso_version.outputs.image_tag }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "Elemental Operator Image: ${{ steps.component.outputs.operator_version }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "Elemental Backup/Restore Operator Image: ${{ steps.component.outputs.backup_restore_version }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "### Kubernetes" >> ${GITHUB_STEP_SUMMARY}
+ echo "K3s on Rancher Manager: ${{ env.INSTALL_K3S_VERSION }}" >> ${GITHUB_STEP_SUMMARY}
+ echo "K8s version deployed on the cluster(s): ${{ inputs.k8s_version_to_provision }}" >> ${GITHUB_STEP_SUMMARY}
+ clean-runner:
+ if: ${{ always() }}
+ needs: e2e
+ runs-on: ubuntu-latest
+ steps:
+ # actions/checkout MUST come before auth
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Authenticate to GCP
+ uses: google-github-actions/auth@v1
+ with:
+ credentials_json: ${{ secrets.credentials }}
+ - name: Setup gcloud
+ uses: google-github-actions/setup-gcloud@v1
+ - name: Delete GCP secrets
+ run: |
+ gcloud --quiet secrets delete PAT_TOKEN_${{ needs.create-runner.outputs.uuid }} || true
+ gcloud --quiet secrets delete GH_REPO_${{ needs.create-runner.outputs.uuid }} || true
+ delete-runner:
+ if: ${{ always() && needs.create-runner.result == 'success' && inputs.destroy_runner == true }}
+ needs: [create-runner, clean-runner]
+ runs-on: ubuntu-latest
+ steps:
+ # actions/checkout MUST come before auth
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Authenticate to GCP
+ uses: google-github-actions/auth@v1
+ with:
+ credentials_json: ${{ secrets.credentials }}
+ - name: Setup gcloud
+ uses: google-github-actions/setup-gcloud@v1
+ - name: Delete runner
+ run: |
+ gcloud --quiet compute instances delete ${{ needs.create-runner.outputs.runner }} \
+ --delete-disks all \
+ --zone ${{ inputs.zone }}
\ No newline at end of file
diff --git a/README.md b/README.md
index a0e4aa650..5f005d9ba 100644
--- a/README.md
+++ b/README.md
@@ -24,6 +24,9 @@
| [![UI-K3s-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_stable.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_stable.yaml) | [![UI-K3s-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.7.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.7.yaml) | [![UI-K3s-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.8.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.8.yaml) | [![UI-K3s-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.9.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-k3s-os-upgrade-rm_head_2.9.yaml)
| [![UI-RKE2-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_stable.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_stable.yaml) | [![UI-RKE2-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.7.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.7.yaml) | [![UI-RKE2-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.8.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.8.yaml) | [![UI-RKE2-OS-Upgrade](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.9.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/ui-rke2-os-upgrade-rm_head_2.9.yaml)
+## Airgap test
+[![CLI-K3s-Airgap-RM_latest](https://github.com/rancher/elemental/actions/workflows/cli-k3s-airgap_rm_latest_dev.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/cli-k3s-airgap_rm_latest_dev.yaml) [![CLI-K3s-Airgap-RM_stable](https://github.com/rancher/elemental/actions/workflows/cli-k3s-airgap_rm_stable.yaml/badge.svg?branch=main)](https://github.com/rancher/elemental/actions/workflows/cli-k3s-airgap_rm_stable.yaml)
+
Elemental is a software stack enabling a centralized, full cloud-native OS management solution with Kubernetes.
Cluster Node OSes are built and maintained via container images through the [Elemental Toolkit](https://rancher.github.io/elemental-toolkit/) and installed on new hosts using the [Elemental CLI](https://github.com/rancher/elemental-cli).
diff --git a/tests/Makefile b/tests/Makefile
index 8f1f902f7..a235f0b55 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -45,6 +45,8 @@ generate-readme:
@./scripts/generate-readme > README.md
# E2E tests
+e2e-airgap-rancher: deps
+ ginkgo --label-filter airgap-rancher -r -v ./e2e
e2e-bootstrap-node: deps
ginkgo --timeout $(GINKGO_TIMEOUT)s --label-filter bootstrap -r -v ./e2e
@@ -81,6 +83,9 @@ e2e-multi-cluster: deps
e2e-reset: deps
ginkgo --label-filter reset -r -v ./e2e
+e2e-prepare-archive: deps
+ ginkgo --label-filter prepare-archive -r -v ./e2e
+
e2e-ui-rancher: deps
ginkgo --label-filter ui -r -v ./e2e
diff --git a/tests/assets/cluster-airgap.yaml b/tests/assets/cluster-airgap.yaml
new file mode 100644
index 000000000..919258c1b
--- /dev/null
+++ b/tests/assets/cluster-airgap.yaml
@@ -0,0 +1,39 @@
+kind: Cluster
+apiVersion: provisioning.cattle.io/v1
+metadata:
+ name: %CLUSTER_NAME%
+ # namespace: fleet-default
+spec:
+ rkeConfig:
+ etcd:
+ disableSnapshots: true
+ machineGlobalConfig:
+ cni: canal
+ disable:
+ - rke2-metrics-server
+ - metrics-server
+ etcd-expose-metrics: false
+ profile: null
+ machinePools:
+ - controlPlaneRole: true
+ etcdRole: true
+ machineConfigRef:
+ apiVersion: elemental.cattle.io/v1beta1
+ kind: MachineInventorySelectorTemplate
+ name: selector-master-%CLUSTER_NAME%
+ name: pool-master-%CLUSTER_NAME%
+ quantity: 0
+ unhealthyNodeTimeout: 0s
+ workerRole: true
+ machineSelectorConfig:
+ - config:
+ protect-kernel-defaults: false
+ registries:
+ configs:
+ rancher-manager.test:5000:
+ insecureSkipVerify: true
+ mirrors:
+ rancher-manager.test:5000:
+ endpoint:
+ - http://rancher-manager.test:5000
+ kubernetesVersion: %K8S_VERSION%
diff --git a/tests/assets/net-default-airgap.xml b/tests/assets/net-default-airgap.xml
new file mode 100644
index 000000000..8af7c7e06
--- /dev/null
+++ b/tests/assets/net-default-airgap.xml
@@ -0,0 +1,18 @@
+
+ default
+
+
+
+
+
+
+ rancher-manager.test
+
+
+
+
+
+
+
+
+
diff --git a/tests/e2e/airgap_test.go b/tests/e2e/airgap_test.go
new file mode 100644
index 000000000..01675514a
--- /dev/null
+++ b/tests/e2e/airgap_test.go
@@ -0,0 +1,228 @@
+/*
+Copyright © 2022 - 2023 SUSE LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e_test
+
+import (
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ "github.com/rancher-sandbox/ele-testhelpers/kubectl"
+ "github.com/rancher-sandbox/ele-testhelpers/rancher"
+ "github.com/rancher-sandbox/ele-testhelpers/tools"
+)
+
+var _ = Describe("E2E - Build the airgap archive", Label("prepare-archive"), func() {
+ It("Execute the script to build the archive", func() {
+ err := exec.Command("sudo", airgapBuildScript, k8sUpstreamVersion, certManagerVersion, rancherChannel, k8sVersion).Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+})
+
+var _ = Describe("E2E - Deploy K3S/Rancher in airgap environment", Label("airgap-rancher"), func() {
+ It("Create the rancher-manager machine", func() {
+ By("Updating the default network configuration", func() {
+ // Don't check return code, as the default network could be already removed
+ for _, c := range []string{"net-destroy", "net-undefine"} {
+ _ = exec.Command("sudo", "virsh", c, "default").Run()
+ }
+
+ // Wait a bit between virsh commands
+ time.Sleep(1 * time.Minute)
+ err := exec.Command("sudo", "virsh", "net-create", netDefaultAirgapFileName).Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Downloading the qcow2 image from GCP storage", func() {
+ err := exec.Command("/opt/google-cloud-sdk/bin/gcloud", "storage", "cp", "gs://elemental-airgap-image/rancher-image.qcow2", os.Getenv("HOME")+"/rancher-image.qcow2").Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Creating the Rancher Manager VM", func() {
+ err := exec.Command("sudo", "virt-install",
+ "--name", "rancher-manager",
+ "--memory", "4096",
+ "--vcpus", "2",
+ "--disk", "path="+os.Getenv("HOME")+"/rancher-image.qcow2,bus=sata",
+ "--import",
+ "--os-variant", "opensuse-unknown",
+ "--network=default,mac=52:54:00:00:00:10",
+ "--noautoconsole").Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+ })
+
+ It("Install K3S/Rancher in the rancher-manager machine", func() {
+ userName := "root"
+ password := "root"
+ client := &tools.Client{
+ Host: "192.168.122.102:22",
+ Username: userName,
+ Password: password,
+ }
+
+ // Get the version of the Elemental Operator
+ out, err := exec.Command("bash", "-c", "ls /opt/rancher/helm/elemental-operator-chart-*.tgz | cut -d '-' -f 4").Output()
+ Expect(err).To(Not(HaveOccurred()))
+ elementalVersion := strings.Trim(string(out), "\n")
+
+ // Create kubectl context
+ // Default timeout is too small, so New() cannot be used
+ k := &kubectl.Kubectl{
+ Namespace: "",
+ PollTimeout: tools.SetTimeout(300 * time.Second),
+ PollInterval: 500 * time.Millisecond,
+ }
+
+ By("Sending the archive file into the rancher server", func() {
+ // Make sure SSH is available
+ Eventually(func() string {
+ out, _ := client.RunSSH("echo SSH_OK")
+ out = strings.Trim(out, "\n")
+ return out
+ }, tools.SetTimeout(10*time.Minute), 5*time.Second).Should(Equal("SSH_OK"))
+
+ // Send the airgap archive
+ err := client.SendFile("/opt/airgap_rancher.zst", "/opt/airgap_rancher.zst", "0644")
+ Expect(err).To(Not(HaveOccurred()))
+
+ // Extract the airgap archive
+ _, err = client.RunSSH("mkdir /opt/rancher; tar -I zstd -vxf /opt/airgap_rancher.zst -C /opt/rancher")
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Deploying airgap infrastructure by executing the deploy script", func() {
+ _, err := client.RunSSH("/opt/rancher/k3s_" + k8sUpstreamVersion + "/deploy-airgap " + k8sUpstreamVersion + " " + certManagerVersion)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Getting the kubeconfig file of the airgap cluster", func() {
+ // Define local Kubeconfig file
+ localKubeconfig := os.Getenv("HOME") + "/.kube/config"
+ Expect(err).To(Not(HaveOccurred()))
+ err := os.Mkdir(os.Getenv("HOME")+"/.kube", 0755)
+ Expect(err).To(Not(HaveOccurred()))
+ err = client.GetFile(localKubeconfig, "/etc/rancher/k3s/k3s.yaml", 0644)
+ Expect(err).To(Not(HaveOccurred()))
+ err = os.Setenv("KUBECONFIG", localKubeconfig)
+ Expect(err).To(Not(HaveOccurred()))
+
+ // Replace localhost with the IP of the VM
+ err = tools.Sed("127.0.0.1", "192.168.122.102", localKubeconfig)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Installing kubectl", func() {
+ // TODO: Variable for kubectl version
+ err := exec.Command("curl", "-LO", "https://dl.k8s.io/release/v1.28.2/bin/linux/amd64/kubectl").Run()
+ Expect(err).To(Not(HaveOccurred()))
+ err = exec.Command("chmod", "+x", "kubectl").Run()
+ Expect(err).To(Not(HaveOccurred()))
+ err = exec.Command("sudo", "mv", "kubectl", "/usr/local/bin/").Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Installing CertManager", func() {
+ // Set flags for cert-manager installation
+ flags := []string{
+ "upgrade", "--install", "cert-manager", "/opt/rancher/helm/cert-manager-" + certManagerVersion + ".tgz",
+ "--namespace", "cert-manager",
+ "--create-namespace",
+ "--set", "image.repository=rancher-manager.test:5000/cert/cert-manager-controller",
+ "--set", "webhook.image.repository=rancher-manager.test:5000/cert/cert-manager-webhook",
+ "--set", "cainjector.image.repository=rancher-manager.test:5000/cert/cert-manager-cainjector",
+ "--set", "startupapicheck.image.repository=rancher-manager.test:5000/cert/cert-manager-ctl",
+ "--set", "installCRDs=true",
+ "--wait", "--wait-for-jobs",
+ }
+
+ RunHelmCmdWithRetry(flags...)
+
+ checkList := [][]string{
+ {"cert-manager", "app.kubernetes.io/component=controller"},
+ {"cert-manager", "app.kubernetes.io/component=webhook"},
+ {"cert-manager", "app.kubernetes.io/component=cainjector"},
+ }
+ err := rancher.CheckPod(k, checkList)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Installing Rancher", func() {
+ // TODO: Use the DeployRancherManager function from install.go
+ rancherAirgapVersion, err := exec.Command("bash", "-c", "ls /opt/rancher/helm/rancher-*.tgz").Output()
+ // Set flags for Rancher Manager installation
+ flags := []string{
+ "upgrade", "--install", "rancher", string(rancherAirgapVersion),
+ //"upgrade", "--install", "rancher", "/opt/rancher/helm/rancher-" + rancherHeadVersion + ".tgz",
+ "--namespace", "cattle-system",
+ "--create-namespace",
+ "--set", "hostname=rancher-manager.test",
+ "--set", "extraEnv[0].name=CATTLE_SERVER_URL",
+ "--set", "extraEnv[0].value=https://rancher-manager.test",
+ "--set", "extraEnv[1].name=CATTLE_BOOTSTRAP_PASSWORD",
+ "--set", "extraEnv[1].value=rancherpassword",
+ "--set", "replicas=1",
+ "--set", "useBundledSystemChart=true",
+ "--set", "rancherImage=rancher-manager.test:5000/rancher/rancher",
+ "--set", "systemDefaultRegistry=rancher-manager.test:5000",
+ }
+
+ RunHelmCmdWithRetry(flags...)
+
+ // Wait for all pods to be started
+ checkList := [][]string{
+ {"cattle-system", "app=rancher"},
+ {"cattle-fleet-local-system", "app=fleet-agent"},
+ {"cattle-system", "app=rancher-webhook"},
+ }
+ err = rancher.CheckPod(k, checkList)
+ Expect(err).To(Not(HaveOccurred()))
+ })
+
+ By("Installing Elemental Operator", func() {
+ // Install Elemental Operator CRDs first
+ // Set flags for Elemental Operator CRDs installation
+ flags := []string{
+ "upgrade", "--install", "elemental-crds", "/opt/rancher/helm/elemental-operator-crds-chart-" + elementalVersion,
+ "--namespace", "cattle-elemental-system",
+ "--create-namespace",
+ }
+
+ RunHelmCmdWithRetry(flags...)
+ time.Sleep(20 * time.Second)
+
+ // Set flags for Elemental Operator installation
+ flags = []string{
+ "upgrade", "--install", "elemental", "/opt/rancher/helm/elemental-operator-chart-" + elementalVersion,
+ "--namespace", "cattle-elemental-system",
+ "--create-namespace",
+ "--set", "image.repository=rancher-manager.test:5000/elemental/elemental-operator",
+ "--set", "registryUrl=",
+ "--set", "seedImage.repository=rancher-manager.test:5000/elemental/seedimage-builder",
+ "--set", "channel.repository=rancher-manager.test:5000/elemental/elemental-channel",
+ "--wait", "--wait-for-jobs",
+ }
+
+ RunHelmCmdWithRetry(flags...)
+
+ // Wait for pod to be started
+ err := rancher.CheckPod(k, [][]string{{"cattle-elemental-system", "app=elemental-operator"}})
+ Expect(err).To(Not(HaveOccurred()))
+ })
+ })
+})
diff --git a/tests/e2e/configure_test.go b/tests/e2e/configure_test.go
index 7bbaaf88a..d20229c79 100644
--- a/tests/e2e/configure_test.go
+++ b/tests/e2e/configure_test.go
@@ -17,6 +17,7 @@ package e2e_test
import (
"os"
"os/exec"
+ "strings"
"time"
. "github.com/onsi/ginkgo/v2"
@@ -140,17 +141,18 @@ var _ = Describe("E2E - Configure test", Label("configure"), func() {
CheckCreatedRegistration(clusterNS, "machine-registration-"+pool+"-"+clusterName)
}
})
+ if !strings.Contains(clusterType, "airgap") {
+ By("Starting default network", func() {
+ // Don't check return code, as the default network could be already removed
+ for _, c := range []string{"net-destroy", "net-undefine"} {
+ _ = exec.Command("sudo", "virsh", c, "default").Run()
+ }
- By("Starting default network", func() {
- // Don't check return code, as the default network could be already removed
- for _, c := range []string{"net-destroy", "net-undefine"} {
- _ = exec.Command("sudo", "virsh", c, "default").Run()
- }
-
- // Wait a bit between virsh commands
- time.Sleep(1 * time.Minute)
- err := exec.Command("sudo", "virsh", "net-create", netDefaultFileName).Run()
- Expect(err).To(Not(HaveOccurred()))
- })
+ // Wait a bit between virsh commands
+ time.Sleep(1 * time.Minute)
+ err := exec.Command("sudo", "virsh", "net-create", netDefaultFileName).Run()
+ Expect(err).To(Not(HaveOccurred()))
+ })
+ }
})
})
diff --git a/tests/e2e/install_test.go b/tests/e2e/install_test.go
index 839814376..fc57d6941 100644
--- a/tests/e2e/install_test.go
+++ b/tests/e2e/install_test.go
@@ -203,7 +203,7 @@ var _ = Describe("E2E - Install Rancher Manager", Label("install"), func() {
}
if clusterType == "hardened" {
- flags = append(flags, "--version", CertManagerVersion)
+ flags = append(flags, "--version", certManagerVersion)
}
RunHelmCmdWithRetry(flags...)
diff --git a/tests/e2e/seedImage_test.go b/tests/e2e/seedImage_test.go
index 9a6de853d..5a9503f20 100644
--- a/tests/e2e/seedImage_test.go
+++ b/tests/e2e/seedImage_test.go
@@ -51,6 +51,10 @@ var _ = Describe("E2E - Creating ISO image", Label("iso-image"), func() {
Expect(err).To(Not(HaveOccurred()))
Expect(baseImageURL).To(Not(BeEmpty()))
+ if clusterType == "airgap" {
+ baseImageURL = "localhost:5000/elemental/sle-micro-iso-5.5:2.1.0"
+ }
+
// Set poweroff to false for master pool to have time to check SeedImage cloud-config
if poolType == "master" && isoBoot {
_, err := kubectl.Run("patch", "MachineRegistration",
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index 9ccae8d5f..0c06cf1d3 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -31,34 +31,36 @@ import (
)
const (
- appYaml = "../assets/hello-world_app.yaml"
- backupYaml = "../assets/backup.yaml"
- ciTokenYaml = "../assets/local-kubeconfig-token-skel.yaml"
- configPrivateCAScript = "../scripts/config-private-ca"
- configRKE2Yaml = "../assets/config_rke2.yaml"
- dumbRegistrationYaml = "../assets/dumb_machineRegistration.yaml"
- emulateTPMYaml = "../assets/emulateTPM.yaml"
- getOSScript = "../scripts/get-name-from-managedosversion"
- httpSrv = "http://192.168.122.1:8000"
- installConfigYaml = "../../install-config.yaml"
- installHardenedScript = "../scripts/config-hardened"
- installVMScript = "../scripts/install-vm"
- localKubeconfigYaml = "../assets/local-kubeconfig-skel.yaml"
- netDefaultFileName = "../assets/net-default.xml"
- numberOfNodesMax = 30
- resetMachineInv = "../assets/reset_machine_inventory.yaml"
- restoreYaml = "../assets/restore.yaml"
- upgradeSkelYaml = "../assets/upgrade_skel.yaml"
- userName = "root"
- userPassword = "r0s@pwd1"
- vmNameRoot = "node"
+ airgapBuildScript = "../scripts/build-airgap"
+ appYaml = "../assets/hello-world_app.yaml"
+ backupYaml = "../assets/backup.yaml"
+ ciTokenYaml = "../assets/local-kubeconfig-token-skel.yaml"
+ configPrivateCAScript = "../scripts/config-private-ca"
+ configRKE2Yaml = "../assets/config_rke2.yaml"
+ dumbRegistrationYaml = "../assets/dumb_machineRegistration.yaml"
+ emulateTPMYaml = "../assets/emulateTPM.yaml"
+ getOSScript = "../scripts/get-name-from-managedosversion"
+ httpSrv = "http://192.168.122.1:8000"
+ installConfigYaml = "../../install-config.yaml"
+ installHardenedScript = "../scripts/config-hardened"
+ installVMScript = "../scripts/install-vm"
+ localKubeconfigYaml = "../assets/local-kubeconfig-skel.yaml"
+ netDefaultFileName = "../assets/net-default.xml"
+ netDefaultAirgapFileName = "../assets/net-default-airgap.xml"
+ numberOfNodesMax = 30
+ resetMachineInv = "../assets/reset_machine_inventory.yaml"
+ restoreYaml = "../assets/restore.yaml"
+ upgradeSkelYaml = "../assets/upgrade_skel.yaml"
+ userName = "root"
+ userPassword = "r0s@pwd1"
+ vmNameRoot = "node"
)
var (
arch string
backupRestoreVersion string
caType string
- CertManagerVersion string
+ certManagerVersion string
clusterName string
clusterNS string
clusterType string
@@ -403,7 +405,7 @@ var _ = BeforeSuite(func() {
arch = os.Getenv("ARCH")
backupRestoreVersion = os.Getenv("BACKUP_RESTORE_VERSION")
caType = os.Getenv("CA_TYPE")
- CertManagerVersion = os.Getenv("CERT_MANAGER_VERSION")
+ certManagerVersion = os.Getenv("CERT_MANAGER_VERSION")
clusterName = os.Getenv("CLUSTER_NAME")
clusterNS = os.Getenv("CLUSTER_NS")
clusterType = os.Getenv("CLUSTER_TYPE")
@@ -516,6 +518,11 @@ var _ = BeforeSuite(func() {
selectorYaml = "../assets/selector.yaml"
}
+ // Enable airgap support if needed
+ if testType == "airgap" {
+ clusterYaml = "../assets/cluster-airgap.yaml"
+ }
+
// Start HTTP server
tools.HTTPShare("../..", ":8000")
})
diff --git a/tests/scripts/build-airgap b/tests/scripts/build-airgap
new file mode 100755
index 000000000..8694c521a
--- /dev/null
+++ b/tests/scripts/build-airgap
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+# Build Airgap
+
+set -e -x
+
+K3S_UPSTREAM_VERSION=$1
+CERT_MANAGER_VERSION=$2
+RANCHER_CHANNEL=$3
+K3S_DOWNSTREAM_VERSION=$4
+
+# Create directories
+mkdir -p /opt/rancher/{k3s_$K3S_UPSTREAM_VERSION,helm} /opt/rancher/images/{cert,rancher,registry,elemental}
+cd /opt/rancher/k3s_$K3S_UPSTREAM_VERSION/
+
+# Install packages
+zypper --no-refresh -n in zstd skopeo yq
+
+# Add rancher manager in /etc/hosts
+sudo echo "192.168.122.102 rancher-manager.test" >> /etc/hosts
+
+# Download k3s and rancher
+curl -#OL https://github.com/k3s-io/k3s/releases/download/v$K3S_UPSTREAM_VERSION%2Bk3s1/k3s-airgap-images-amd64.tar.zst
+curl -#OL https://github.com/k3s-io/k3s/releases/download/v$K3S_UPSTREAM_VERSION%2Bk3s1/k3s
+
+# Get the install script
+curl -sfL https://get.k3s.io/ -o install.sh
+
+# Get the airgap deploy script
+cp /home/gh-runner/actions-runner/_work/elemental/elemental/tests/scripts/deploy-airgap .
+
+# Get Helm Charts
+cd /opt/rancher/helm/
+
+# Add repos
+helm repo add jetstack https://charts.jetstack.io > /dev/null 2>&1
+helm repo add rancher-$RANCHER_CHANNEL https://releases.rancher.com/server-charts/$RANCHER_CHANNEL > /dev/null 2>&1
+helm repo update > /dev/null 2>&1
+
+# Get charts
+helm pull jetstack/cert-manager --version $CERT_MANAGER_VERSION > /dev/null 2>&1
+if [[ "$RANCHER_CHANNEL" == "latest" ]]; then
+ helm pull rancher-$RANCHER_CHANNEL/rancher --devel > /dev/null 2>&1
+else
+ helm pull rancher-$RANCHER_CHANNEL/rancher > /dev/null 2>&1
+fi
+
+# Get rancher manager version
+RANCHER_MANAGER_VERSION=$(ls rancher*| cut -d '-' -f '2-3')
+helm pull oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher/elemental-operator-chart > /dev/null 2>&1
+helm pull oci://registry.opensuse.org/isv/rancher/elemental/dev/charts/rancher/elemental-operator-crds-chart > /dev/null 2>&1
+
+# Temporary thing to get latest version of elemental-teal-iso (very ugly...)
+wget https://raw.githubusercontent.com/fgiudici/elemental-operator/airgap-allow-only-channel-creation/scripts/elemental-airgap.sh
+chmod +x elemental-airgap.sh
+./elemental-airgap.sh -r localhost:5000 -sa dev
+
+# Get Images - Rancher/Elemental
+cd /opt/rancher/images/
+
+# Rancher image list
+curl -#L https://github.com/rancher/rancher/releases/download/v${RANCHER_MANAGER_VERSION%.*}/rancher-images.txt -o rancher/orig_rancher-images.txt
+
+# Shorten rancher list with a sort
+# Fix library tags
+sed -i -e '0,/busybox/s/busybox/library\/busybox/' -e 's/registry/library\/registry/g' rancher/orig_rancher-images.txt
+
+# We need to keep the following images
+IMAGES_LIST="mirrored-cluster-api-controller mirrored-pause mirrored-coredns-coredns mirrored-library-traefik"
+for i in $IMAGES_LIST; do
+ IMAGES+="$(grep $i rancher/orig_rancher-images.txt)\n"
+done
+
+# Remove things that are not needed and overlapped
+sed -i -E '/neuvector|minio|gke|aks|eks|sriov|harvester|mirrored|longhorn|thanos|tekton|istio|multus|hyper|jenkins|windows/d' rancher/orig_rancher-images.txt
+echo -e $IMAGES >> rancher/orig_rancher-images.txt
+
+# Get latest version
+for i in $(cat rancher/orig_rancher-images.txt|awk -F: '{print $1}'); do
+ grep -w $i rancher/orig_rancher-images.txt | sort -Vr| head -1 >> rancher/version_unsorted.txt
+done
+# Except for rancher/kubectl
+grep 'rancher/kubectl' rancher/orig_rancher-images.txt >> rancher/version_unsorted.txt
+grep "rancher/system-agent-installer-k3s:${K3S_DOWNSTREAM_VERSION%+*}" rancher/orig_rancher-images.txt >> rancher/version_unsorted.txt
+
+# Final sort
+cat rancher/version_unsorted.txt | sort -u > rancher/rancher-images.txt
+
+# Cert-manager image list
+helm template --kube-version=1.22 /opt/rancher/helm/cert-manager-$CERT_MANAGER_VERSION.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g > cert/cert-manager-images.txt
+
+# Elemental image list
+helm template --kube-version=1.22 /opt/rancher/helm/elemental-operator-chart-*.tgz | awk '$1 ~ /image:/ {print $2}' | sed s/\"//g > elemental/elemental-images.txt
+helm template --kube-version=1.22 /opt/rancher/helm/elemental-operator-chart-*.tgz|grep 'seedimage-builder' | awk '{print $2}' >> elemental/elemental-images.txt
+
+# Temporary thing to get latest version of elemental-teal-iso (very ugly...)
+grep 'registry.opensuse.*sle-micro-iso' /opt/rancher/helm/elemental-images.txt >> elemental/elemental-images.txt
+
+# Get images
+# Skopeo - cert-manager
+for i in $(cat cert/cert-manager-images.txt); do
+ skopeo copy docker://$i docker-archive:cert/$(echo $i| awk -F/ '{print $3}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $3}') > /dev/null 2>&1
+done
+
+# Skopeo - Elemental
+for i in $(cat elemental/elemental-images.txt); do
+ if grep 'sle-micro-iso' <<< $i; then
+ skopeo copy docker://$i docker-archive:elemental/$(echo $i| awk -F/ '{print $8"-"$9}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $8"-"$9}') > /dev/null 2>&1
+ else
+ skopeo copy docker://$i docker-archive:elemental/$(echo $i| awk -F/ '{print $8}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $8}') > /dev/null 2>&1
+ fi
+done
+
+# Skopeo - Rancher
+for i in $(cat rancher/rancher-images.txt); do
+ skopeo copy docker://$i docker-archive:rancher/$(echo $i| awk -F/ '{print $2}'|sed 's/:/_/g').tar:$(echo $i| awk -F/ '{print $2}') > /dev/null 2>&1
+done
+
+# TODO: improve how to fetch the registry image
+curl -#L https://github.com/clemenko/rke_airgap_install/raw/main/registry.tar -o registry/registry.tar > /dev/null 2>&1
+
+# Compress all the things
+cd /opt/rancher/
+tar -I zstd -vcf /opt/airgap_rancher.zst $(ls) > /dev/null 2>&1
diff --git a/tests/scripts/deploy-airgap b/tests/scripts/deploy-airgap
new file mode 100755
index 000000000..d1eff3d62
--- /dev/null
+++ b/tests/scripts/deploy-airgap
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+set -e -x
+
+K3S_UPSTREAM_VERSION=$1
+CERT_MANAGER_VERSION=$2
+
+# Install k3s
+cd /opt/rancher/k3s_$K3S_UPSTREAM_VERSION
+sudo mkdir -p /var/lib/rancher/k3s/agent/images/ /etc/rancher/k3s
+sudo cp k3s-airgap-images-amd64.tar.zst /var/lib/rancher/k3s/agent/images/
+sudo chmod +x k3s install.sh
+sudo cp k3s /usr/local/bin/
+
+# Add registry configuration
+cat <> ~/.bashrc
+source ~/.bashrc
+
+# Run local registry
+cat <