diff --git a/ansible/playbooks/infra-deploy.yaml b/ansible/playbooks/infra-deploy.yaml index e9e95935..bff9857b 100644 --- a/ansible/playbooks/infra-deploy.yaml +++ b/ansible/playbooks/infra-deploy.yaml @@ -507,7 +507,6 @@ os_keypair_name: "{{ os_network_name }}-keypair" # ansible_ssh_common_args: "-F {{ lookup('env', 'HOME') }}/.ssh/{{ os_keypair_name }}.config" ansible_ssh_private_key_file: "{{ lookup('env', 'HOME') }}/.ssh/{{ os_keypair_name }}.key" - genestack_product: openstack-flex tasks: - name: Create ssh directory on jump host ansible.builtin.file: @@ -605,8 +604,6 @@ msg: "This will install ansible, collections, etc." - name: Genestack bootstrap command: /opt/genestack/bootstrap.sh - environment: - GENESTACK_PRODUCT: "{{ genestack_product }}" - name: Source Genestack venv via .bashrc ansible.builtin.lineinfile: path: /root/.bashrc diff --git a/bootstrap.sh b/bootstrap.sh index 42ece723..7049e01d 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -21,10 +21,6 @@ cd "${BASEDIR}" || error "Could not change to ${BASEDIR}" source scripts/lib/functions.sh -# Set GENESTACK_PRODUCT to 'genestack' -GENESTACK_PRODUCT="genestack" -export GENESTACK_PRODUCT - set -e success "Environment variables:" @@ -50,12 +46,11 @@ test -L "$GENESTACK_CONFIG" 2>&1 || mkdir -p "${GENESTACK_CONFIG}" # Set config test -f "$GENESTACK_CONFIG/provider" || echo "${K8S_PROVIDER}" > "${GENESTACK_CONFIG}/provider" -test -f "$GENESTACK_CONFIG/product" || echo "${GENESTACK_PRODUCT}" > "${GENESTACK_CONFIG}/product" mkdir -p "$GENESTACK_CONFIG/inventory/group_vars" "${GENESTACK_CONFIG}/inventory/credentials" # Copy default k8s config PRODUCT_DIR="ansible/inventory/genestack" -if [ "$(find ${GENESTACK_CONFIG}/inventory -name \*.yaml -o -name \*.yml 2>/dev/null | wc -l)" -eq 0 ]; then +if [ "$(find "${GENESTACK_CONFIG}/inventory" -name \*.yaml -o -name \*.yml 2>/dev/null | wc -l)" -eq 0 ]; then cp -r "${PRODUCT_DIR}"/* "${GENESTACK_CONFIG}/inventory" fi @@ -63,9 +58,9 @@ fi test -d "$GENESTACK_CONFIG/gateway-api" || cp -a "${BASEDIR}/etc/gateway-api" "$GENESTACK_CONFIG"/ # Create venv and prepare Ansible -python3 -m venv ~/.venvs/genestack -~/.venvs/genestack/bin/pip install pip --upgrade -source ~/.venvs/genestack/bin/activate && success "Switched to venv ~/.venvs/genestack" +python3 -m venv "${HOME}/.venvs/genestack" +"${HOME}/.venvs/genestack/bin/pip" install pip --upgrade +source "${HOME}/.venvs/genestack/bin/activate" && success "Switched to venv ~/.venvs/genestack" pip install -r "${BASEDIR}/requirements.txt" && success "Installed ansible package" ansible-playbook "${BASEDIR}/scripts/get-ansible-collection-requirements.yml" \ -e collections_file="${ANSIBLE_COLLECTION_FILE}" \ diff --git a/docs/adding-new-node.md b/docs/adding-new-node.md index 78e8c4c9..8083953a 100644 --- a/docs/adding-new-node.md +++ b/docs/adding-new-node.md @@ -7,19 +7,20 @@ Lets assume we are adding one new worker node: `computegpu001.p40.example.com` a 1. Add the node to your ansible inventory file ```shell - vim /etc/genestack/inventory/openstack-flex-inventory.ini + vim /etc/genestack/inventory/inventory.yaml ``` 2. Ensure hostname is correctly set and hosts file has 127.0.0.1 entry 3. Run scale.yaml to add the node to your cluster ```shell - ansible-playbook -i /etc/genestack/inventory/openstack-flex-inventory.yaml scale.yml --limit computegpu001.p40.example.com --become + source /opt/genestack/scripts/genestack.rc + ansible-playbook scale.yml --limit compute-12481.rackerlabs.dev.local --become ``` Once step 3 competes succesfully, validate that the node is up and running in the cluster ```shell - kubectl get nodes | grep computegpu001.p40.example.com + kubectl get nodes | grep compute-12481.rackerlabs.dev.local ``` ### PreferNoSchedule Taint @@ -35,7 +36,7 @@ pods and the Nova VMs therein. !!! tip "Setting this is a matter of architerural preference:" ```shell - kubectl taint nodes computegpu001.p40.example.com key1=value1:PreferNoSchedule + kubectl taint nodes compute-12481.rackerlabs.dev.local key1=value1:PreferNoSchedule ``` ## Adding the node in openstack @@ -45,16 +46,16 @@ labels and annotations. 1. Export the nodes to add ```shell - export NODES='computegpu001.p40.example.com' + export NODES='compute-12481.rackerlabs.dev.local' ``` 2. For compute node add the following labels ```shell # Label the openstack compute nodes - kubectl label node computegpu001.p40.example.com openstack-compute-node=enabled + kubectl label node compute-12481.rackerlabs.dev.local openstack-compute-node=enabled # With OVN we need the compute nodes to be "network" nodes as well. While they will be configured for networking, they wont be gateways. - kubectl label node computegpu001.p40.example.com openstack-network-node=enabled + kubectl label node compute-12481.rackerlabs.dev.local openstack-network-node=enabled ``` 3. Add the right annotations to the node diff --git a/docs/build-test-envs.md b/docs/build-test-envs.md index d4698ad5..038c534b 100644 --- a/docs/build-test-envs.md +++ b/docs/build-test-envs.md @@ -123,10 +123,10 @@ The lab deployment playbook will build an environment suitable for running Genes ### SSH to lab -If you have not set your .ssh config do not forget to put in your path for your openstack-flex-keypair. Your Ip will be present after running the infra-deploy.yaml. +If you have not set your .ssh config do not forget to put in your path for your openstack-keypair. Your Ip will be present after running the infra-deploy.yaml. ``` shell -ssh -i /path/to/.ssh/openstack-flex-keypair.key ubuntu@X.X.X.X +ssh -i /path/to/.ssh/openstack-keypair.key ubuntu@X.X.X.X ``` diff --git a/docs/genestack-getting-started.md b/docs/genestack-getting-started.md index 2018b976..e59a5af6 100644 --- a/docs/genestack-getting-started.md +++ b/docs/genestack-getting-started.md @@ -14,9 +14,6 @@ git clone --recurse-submodules -j4 https://github.com/rackerlabs/genestack /opt/ The basic setup requires ansible, ansible collection and helm installed to install Kubernetes and OpenStack Helm: -The environment variable `GENESTACK_PRODUCT` is used to bootstrap specific configurations and alters playbook handling. -It is persisted at /etc/genestack/product` for subsequent executions, it only has to be used once. - ``` shell /opt/genestack/bootstrap.sh ``` @@ -25,6 +22,6 @@ It is persisted at /etc/genestack/product` for subsequent executions, it only ha If running this command with `sudo`, be sure to run with `-E`. `sudo -E /opt/genestack/bootstrap.sh`. This will ensure your active environment is passed into the bootstrap command. -Once the bootstrap is completed the default Kubernetes provider will be configured inside `/etc/genestack/provider` +Once the bootstrap is completed the default Kubernetes provider will be configured inside `/etc/genestack/provider` and currently defaults to kubespray. The ansible inventory is expected at `/etc/genestack/inventory` diff --git a/docs/k8s-config.md b/docs/k8s-config.md index f65c6617..77a8276b 100644 --- a/docs/k8s-config.md +++ b/docs/k8s-config.md @@ -1,7 +1,12 @@ # Retrieving the Kube Config +!!! note + This step is optional once the `setup-kubernetes.yml` playbook has been used to deploy Kubernetes + Once the environment is online, proceed to login to the environment and begin the deployment normally. You'll find the launch node has everything needed, in the places they belong, to get the environment online. + + ## Install `kubectl` Install the `kubectl` tool. @@ -34,7 +39,7 @@ Retrieve the kube config from our first controller. ``` shell mkdir -p ~/.kube -rsync -e "ssh -F ${HOME}/.ssh/openstack-flex-keypair.config" \ +rsync -e "ssh -F ${HOME}/.ssh/openstack-keypair.config" \ --rsync-path="sudo rsync" \ -avz ubuntu@X.X.X.X:/root/.kube/config "${HOME}/.kube/config" ``` diff --git a/docs/k8s-kubespray.md b/docs/k8s-kubespray.md index 16d26392..b1882308 100644 --- a/docs/k8s-kubespray.md +++ b/docs/k8s-kubespray.md @@ -2,10 +2,6 @@ Currently only the k8s provider kubespray is supported and included as submodule into the code base. -!!! info - - Existing OpenStack Ansible inventory can be converted using the `/opt/genestack/scripts/convert_osa_inventory.py` script which provides a `hosts.yml` - ### Before you Deploy Kubespray will be using OVN for all of the network functions, as such, you will need to ensure your hosts are ready to receive the deployment at a low level. @@ -42,13 +38,13 @@ you will need to prepare your networking infrastructure and basic storage layout A default inventory file for kubespray is provided at `/etc/genestack/inventory` and must be modified. -Checkout the [openstack-flex/prod-inventory-example.yaml](https://github.com/rackerlabs/genestack/blob/main/ansible/inventory/openstack-flex/inventory.yaml.example) file for an example of a target environment. +Checkout the [inventory.yaml.example](https://github.com/rackerlabs/genestack/blob/main/ansible/inventory/genestack/inventory.yaml.example) file for an example of a target environment. !!! note Before you deploy the kubernetes cluster you should define the `kube_override_hostname` option in your inventory. This variable will set the node name which we will want to be an FQDN. When you define the option, it should have the same suffix defined in our `cluster_name` variable. -However, any Kubespray compatible inventory will work with this deployment tooling. The official [Kubespray documentation](https://kubespray.io) can be used to better understand the inventory options and requirements. Within the `ansible/playbooks/inventory` directory there is a directory named `openstack-flex` and `openstack-enterprise`. These directories provide everything we need to run a successful Kubernetes environment for genestack at scale. The difference between **enterprise** and **flex** are just target environment types. +However, any Kubespray compatible inventory will work with this deployment tooling. The official [Kubespray documentation](https://kubespray.io) can be used to better understand the inventory options and requirements. ### Ensure systems have a proper FQDN Hostname @@ -56,8 +52,8 @@ Before running the Kubernetes deployment, make sure that all hosts have a proper ``` shell source /opt/genestack/scripts/genestack.rc -ansible -i /etc/genestack/inventory/openstack-flex-inventory.ini -m shell -a 'hostnamectl set-hostname {{ inventory_hostname }}' --become all -ansible -i /etc/genestack/inventory/openstack-flex-inventory.ini -m shell -a "grep 127.0.0.1 /etc/hosts | grep -q {{ inventory_hostname }} || sed -i 's/^127.0.0.1.*/127.0.0.1 {{ inventory_hostname }} localhost.localdomain localhost/' /etc/hosts" --become all +ansible -m shell -a 'hostnamectl set-hostname {{ inventory_hostname }}' --become all +ansible -m shell -a "grep 127.0.0.1 /etc/hosts | grep -q {{ inventory_hostname }} || sed -i 's/^127.0.0.1.*/127.0.0.1 {{ inventory_hostname }} localhost.localdomain localhost/' /etc/hosts" --become all ``` !!! note @@ -65,7 +61,7 @@ ansible -i /etc/genestack/inventory/openstack-flex-inventory.ini -m shell -a "gr In the above command I'm assuming the use of `cluster.local` this is the default **cluster_name** as defined in the group_vars k8s_cluster file. If you change that option, make sure to reset your domain name on your hosts accordingly. -The ansible inventory is expected at `/etc/genestack/inventory` +The ansible inventory is expected at `/etc/genestack/inventory` and automatically loaded once `genestack.rc` is sourced. ### Prepare hosts for installation @@ -76,7 +72,7 @@ cd /opt/genestack/ansible/playbooks !!! note - The RC file sets a number of environment variables that help ansible to run in a more easily to understand way. + The rc file sets a number of environment variables that help ansible to run in a more easily to understand way. While the `ansible-playbook` command should work as is with the sourced environment variables, sometimes it's necessary to set some overrides on the command line. The following example highlights a couple of overrides that are generally useful. @@ -89,50 +85,29 @@ ansible-playbook host-setup.yml #### Example host setup playbook with overrides -Confirm openstack-flex-inventory.yaml matches what is in /etc/genestack/inventory. If it does not match update the command to match the file names. +Confirm `inventory.yaml` matches what is in `/etc/genestack/inventory`. If it does not match update the command to match the file names. ``` shell +source /opt/genestack/scripts/genestack.rc # Example overriding things on the CLI -ansible-playbook host-setup.yml --inventory /etc/genestack/inventory/openstack-flex-inventory.ini \ - --private-key ${HOME}/.ssh/openstack-flex-keypair.key -``` - -### Run the cluster deployment - -This is used to deploy kubespray against infra on an OpenStack cloud. If you're deploying on baremetal you will need to setup an inventory that meets your environmental needs. - -Change the directory to the kubespray submodule. - -``` shell -cd /opt/genestack/submodules/kubespray +ansible-playbook host-setup.yml ``` -Source your environment variables +The `private-key` option can be used to instruct ansible to use a custom SSH key for the SSH connection ``` shell -source /opt/genestack/scripts/genestack.rc + --private-key ${HOME}/.ssh/openstack-keypair.key ``` -!!! note - - The RC file sets a number of environment variables that help ansible to run in a more easy to understand way. - -Once the inventory is updated and configuration altered (networking etc), the Kubernetes cluster can be initialized with - -``` shell -ansible-playbook cluster.yml -``` +### Run the cluster deployment -The cluster deployment playbook can also have overrides defined to augment how the playbook is executed. -Confirm openstack-flex-inventory.yaml matches what is in /etc/genestack/inventory. If it does not match update the command to match the file names. +This is used to deploy kubespray against infra on an OpenStack cloud. If you're deploying on baremetal you will need to setup an inventory that meets your environmental needs. +The playbook `setup-kubernetes.yml` is used to invoke the selected provider installation and label and configure a kube config: ``` shell -ansible-playbook --inventory /etc/genestack/inventory/openstack-flex-inventory.ini \ - --private-key /home/ubuntu/.ssh/openstack-flex-keypair.key \ - --user ubuntu \ - --become \ - cluster.yml +source /opt/genestack/scripts/genestack.rc +ansible-playbook setup-kubernetes.yml ``` !!! tip diff --git a/docs/k8s-labels.md b/docs/k8s-labels.md index 5aa3d7ed..1e173374 100644 --- a/docs/k8s-labels.md +++ b/docs/k8s-labels.md @@ -1,7 +1,8 @@ # Label all of the nodes in the environment -To use the K8S environment for OpenStack all of the nodes MUST be labeled. The following Labels will be used within your environment. -Make sure you label things accordingly. +The labeling of nodes is automated as part of the `setup-kubernetes.yml` playbook based on ansible groups. +For understanding the use of k8s labels is defined as following, automation and documented deployment +steps build ontop of the labels referenced here: !!! note diff --git a/docs/multi-region-support.md b/docs/multi-region-support.md index d0032a6a..1798a1b3 100644 --- a/docs/multi-region-support.md +++ b/docs/multi-region-support.md @@ -30,18 +30,18 @@ The structure may look something like: !!! example ``` ├── my-genestack-configs - │ ├── sjc + │ ├── region1 │ │ ├── inventory - │ │ │ ├── my-sjc-inventory.ini + │ │ │ ├── inventory.yaml │ │ ├── helm-configs │ │ │ ├── nova - │ │ │ │ ├── my-custom-sjc-nova-helm-overrides.yaml - │ ├── dfw + │ │ │ │ ├── region1-custom-nova-helm-overrides.yaml + │ ├── region2 │ │ ├── inventory - │ │ │ ├── my-dfw-inventory.ini + │ │ │ ├── -inventory.yaml │ │ ├── helm-configs │ │ │ ├── nova - │ │ │ │ ├── my-custom-dfw-nova-helm-overrides.yaml + │ │ │ │ ├── region2-custom-nova-helm-overrides.yaml └── .gitignore ``` @@ -68,7 +68,7 @@ For our example we just want to override the cpu_allocation as they are differen Create the override files within the respective structure as noted above with the contents of: -!!! example "my-custom-sjc-nova-helm-overrides.yaml" +!!! example "region1-custom-nova-helm-overrides.yaml" ``` conf: nova: @@ -76,7 +76,7 @@ Create the override files within the respective structure as noted above with th cpu_allocation_ratio: 8.0 ``` -!!! example "my-custom-dfw-nova-helm-overrides.yaml" +!!! example "region2-custom-nova-helm-overrides.yaml" ``` conf: nova: @@ -95,7 +95,7 @@ For the rest of the workflow example we'll be working with the `sjc` environment !!! example "symlink the repo" ``` shell - ln -s /opt/my-genestack-configs/sjc /etc/genestack + ln -s /opt/my-genestack-configs/region1 /etc/genestack ``` This will make our `/etc/genestack` directory look like: @@ -103,10 +103,10 @@ This will make our `/etc/genestack` directory look like: !!! example "/etc/genestack/" ``` ├── inventory - │ │ ├── my-sjc-inventory.ini + │ │ ├── inventory.yaml ├── helm-configs │ ├── nova - │ │ ├── my-custom-sjc-nova-helm-overrides.yaml + │ │ ├── region1-custom-nova-helm-overrides.yaml ``` #### Running helm @@ -127,7 +127,7 @@ helm upgrade --install nova ./nova \ --namespace=openstack \ --timeout 120m \ -f /etc/genestack/helm-configs/nova/nova-helm-overrides.yaml \ - -f /etc/genestack/helm-configs/nova/my-custom-sjc-nova-helm-overrides.yaml \ + -f /etc/genestack/helm-configs/nova/region1-nova-helm-overrides.yaml \ --set conf.nova.neutron.metadata_proxy_shared_secret="$(kubectl --namespace openstack get secret metadata-shared-secret -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.identity.auth.admin.password="$(kubectl --namespace openstack get secret keystone-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.identity.auth.nova.password="$(kubectl --namespace openstack get secret nova-admin -o jsonpath='{.data.password}' | base64 -d)" \ diff --git a/docs/openstack-cinder-lvmisci.md b/docs/openstack-cinder-lvmisci.md index 99e7c60f..41dbf773 100644 --- a/docs/openstack-cinder-lvmisci.md +++ b/docs/openstack-cinder-lvmisci.md @@ -89,13 +89,13 @@ ansible-playbook -i inventory-example.yaml deploy-cinder-volumes-reference.yaml Once the playbook has finished executing, check the cinder api to verify functionality. ``` shell -root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume service list -+------------------+-------------------------------------------------+------+---------+-------+----------------------------+ -| Binary | Host | Zone | Status | State | Updated At | -+------------------+-------------------------------------------------+------+---------+-------+----------------------------+ -| cinder-scheduler | cinder-volume-worker | nova | enabled | up | 2023-12-26T17:43:07.000000 | -| cinder-volume | openstack-flex-node-4.cluster.local@lvmdriver-1 | nova | enabled | up | 2023-12-26T17:43:04.000000 | -+------------------+-------------------------------------------------+------+---------+-------+----------------------------+ +root@openstack-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume service list ++------------------+--------------------------------------------+------+---------+-------+----------------------------+ +| Binary | Host | Zone | Status | State | Updated At | ++------------------+--------------------------------------------+------+---------+-------+----------------------------+ +| cinder-scheduler | cinder-volume-worker | nova | enabled | up | 2023-12-26T17:43:07.000000 | +| cinder-volume | openstack-node-4.cluster.local@lvmdriver-1 | nova | enabled | up | 2023-12-26T17:43:04.000000 | ++------------------+--------------------------------------------+------+---------+-------+----------------------------+ ``` !!! note @@ -106,7 +106,7 @@ At this point it would be a good time to define your types within cinder. For ou type so that we can schedule volumes to our environment. ``` shell -root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume type create lvmdriver-1 +root@openstack-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume type create lvmdriver-1 +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ @@ -122,7 +122,7 @@ root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-a If wanted, create a test volume to tinker with ``` shell -root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume create --size 1 test +root@openstack-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume create --size 1 test +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ @@ -148,7 +148,7 @@ root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-a | user_id | 2ddf90575e1846368253474789964074 | +---------------------+--------------------------------------+ -root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume list +root@openstack-node-0:~# kubectl --namespace openstack exec -ti openstack-admin-client -- openstack volume list +--------------------------------------+------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +--------------------------------------+------+-----------+------+-------------+ @@ -159,7 +159,7 @@ root@openstack-flex-node-0:~# kubectl --namespace openstack exec -ti openstack-a You can validate the environment is operational by logging into the storage nodes to validate the LVM targets are being created. ``` shell -root@openstack-flex-node-4:~# lvs +root@openstack-node-4:~# lvs LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert c744af27-fb40-4ffa-8a84-b9f44cb19b2b cinder-volumes-1 -wi-a----- 1.00g ``` diff --git a/docs/prometheus-custom-node-metrics.md b/docs/prometheus-custom-node-metrics.md index ebaa971a..ad450567 100644 --- a/docs/prometheus-custom-node-metrics.md +++ b/docs/prometheus-custom-node-metrics.md @@ -15,12 +15,12 @@ ansible-playbook custom_exporters.yml #### Example custom exporter playbook with overrides -Confirm openstack-flex-inventory.yaml matches what is in /etc/genestack/inventory. If it does not match update the command to match the file names. +Confirm `inventory.yaml` matches what is in /etc/genestack/inventory. If it does not match update the command to match the file names. ``` shell # Example overriding things on the CLI -ansible-playbook custom_exporters.yml --inventory /etc/genestack/inventory/openstack-flex-inventory.yaml \ - --private-key ${HOME}/.ssh/openstack-flex-keypair.key +source /opt/genestack/scripts/genestack.rc +ansible-playbook custom_exporters.yml --private-key ${HOME}/.ssh/openstack-keypair.key ``` Once the scripts run the node exporter will collect your metrics and supply them to prometheus for you to view. diff --git a/etc/netplan/openstack-enterprise.yaml b/etc/netplan/openstack-enterprise.yaml deleted file mode 100644 index 0642c087..00000000 --- a/etc/netplan/openstack-enterprise.yaml +++ /dev/null @@ -1,87 +0,0 @@ ---- -network: - version: 2 - ethernets: - em49: - mtu: 9000 - p4p1: - mtu: 9000 - bonds: - bond0: - interfaces: [ em49, p4p1 ] - parameters: - mode: 802.3ad - lacp-rate: fast - transmit-hash-policy: layer2+3 - mii-monitor-interval: 100 - dhcp4: false - mtu: 9000 - bridges: - br-bond0: - dhcp4: false - mtu: 1500 - interfaces: - - bond0 - br-host: - dhcp4: false - mtu: 1500 - interfaces: - - vlan1000 - addresses: [ 10.240.0.51/22 ] - nameservers: - addresses: [ 1.1.1.1, 1.0.0.1 ] - routes: - - to: 0.0.0.0/0 - via: 10.240.0.1 - metric: 500 - br-storage: - dhcp4: false - mtu: 9000 - interfaces: - - vlan1030 - addresses: [ 172.29.244.51/22 ] - br-repl: - dhcp4: false - mtu: 9000 - interfaces: - - vlan1040 - addresses: [ 172.29.248.51/22 ] - br-ovs: - dhcp4: false - mtu: 9000 - interfaces: - - vlan1020 - addresses: [ 172.29.240.51/22 ] - br-pxe: - dhcp4: false - mtu: 1500 - interfaces: - - vlan1050 - addresses: [ 172.23.208.5/22 ] - openvswitch: {} - vlans: - vlan1000: - id: 1000 - link: bond0 - dhcp4: false - mtu: 1500 - vlan1020: - id: 1020 - link: bond0 - dhcp4: false - mtu: 9000 - vlan1030: - id: 1030 - link: bond0 - dhcp4: false - mtu: 9000 - vlan1040: - id: 1040 - link: bond0 - dhcp4: false - mtu: 9000 - vlan1050: - id: 1050 - link: bond0 - dhcp4: false - mtu: 1050 diff --git a/scripts/genestack.rc b/scripts/genestack.rc index 88dfd97c..66e99073 100644 --- a/scripts/genestack.rc +++ b/scripts/genestack.rc @@ -13,13 +13,6 @@ export USER_COLLECTION_FILE=${USER_COLLECTION_FILE:-"$(readlink -f ${BASEDIR}/us test -f "${GENESTACK_CONFIG}/provider" 2>/dev/null && export K8S_PROVIDER=$(head -n1 ${GENESTACK_CONFIG}/provider) export K8S_PROVIDER="${K8S_PROVIDER:-kubespray}" -if [[ -f "${GENESTACK_CONFIG}/product" ]]; then - export GENESTACK_PRODUCT=$(head -n1 ${GENESTACK_CONFIG}/product) -elif [[ -z "${GENESTACK_PRODUCT}" ]]; then - echo -e "No GENESTACK_PRODUCT defined" - echo -e "Define the environment variable GENESTACK_PRODUCT to continue." -fi - # Export OSH variables export CONTAINER_DISTRO_NAME=ubuntu export CONTAINER_DISTRO_VERSION=jammy