diff --git a/docs/infrastructure-mariadb-ops.md b/docs/infrastructure-mariadb-ops.md index def1e049..309ddbd9 100644 --- a/docs/infrastructure-mariadb-ops.md +++ b/docs/infrastructure-mariadb-ops.md @@ -7,7 +7,7 @@ Tips and tricks for managing and operating the MariaDB cluster within a Genestac Sometimes an operator may need to connect to the database to troubleshoot things or otherwise make modifications to the databases in place. The following command can be used to connect to the database from a node within the cluster. ``` shell -mysql -h $(kubectl -n openstack get service mariadb-galera-primary -o jsonpath='{.spec.clusterIP}') \ +mysql -h $(kubectl -n openstack get service mariadb-cluster-primary -o jsonpath='{.spec.clusterIP}') \ -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ -u root ``` @@ -21,7 +21,7 @@ mysql -h $(kubectl -n openstack get service mariadb-galera-primary -o jsonpath=' When running `mysqldump` or `mariadbdump` the following commands can be useful for generating a quick backup. ``` shell -mysqldump --host=$(kubectl -n openstack get service mariadb-galera -o jsonpath='{.spec.clusterIP}')\ +mysqldump --host=$(kubectl -n openstack get service mariadb-cluster -o jsonpath='{.spec.clusterIP}')\ --user=root \ --password=$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ --single-transaction \ @@ -35,14 +35,14 @@ mysqldump --host=$(kubectl -n openstack get service mariadb-galera -o jsonpath=' !!! example "Dump all databases as individual files in `/tmp`" ``` shell - mysql -h $(kubectl -n openstack get service mariadb-galera -o jsonpath='{.spec.clusterIP}') \ + mysql -h $(kubectl -n openstack get service mariadb-cluster -o jsonpath='{.spec.clusterIP}') \ -u root \ -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ -e 'show databases;' \ --column-names=false \ --vertical | \ awk '/[:alnum:]/' | \ - xargs -i mysqldump --host=$(kubectl -n openstack get service mariadb-galera -o jsonpath='{.spec.clusterIP}') \ + xargs -i mysqldump --host=$(kubectl -n openstack get service mariadb-cluster -o jsonpath='{.spec.clusterIP}') \ --user=root \ --password=$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ --single-transaction \ @@ -56,7 +56,7 @@ mysqldump --host=$(kubectl -n openstack get service mariadb-galera -o jsonpath=' !!! example "Restoring a database" ``` shell - mysql -h $(kubectl -n openstack get service mariadb-galera -o jsonpath='{.spec.clusterIP}') \ + mysql -h $(kubectl -n openstack get service mariadb-cluster -o jsonpath='{.spec.clusterIP}') \ -u root \ -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ ${DATABASE_NAME} < /tmp/${DATABASE_FILE} @@ -81,7 +81,7 @@ for more information. name: maria-restore spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster backupRef: name: mariadb-backup EOF diff --git a/docs/openstack-cinder.md b/docs/openstack-cinder.md index aba8a234..b606edb0 100644 --- a/docs/openstack-cinder.md +++ b/docs/openstack-cinder.md @@ -34,7 +34,7 @@ helm upgrade --install cinder ./cinder \ --set endpoints.identity.auth.cinder.password="$(kubectl --namespace openstack get secret cinder-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.cinder.password="$(kubectl --namespace openstack get secret cinder-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.cinder.database.slave_connection="mysql+pymysql://cinder:$(kubectl --namespace openstack get secret cinder-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/cinder" \ + --set conf.cinder.database.slave_connection="mysql+pymysql://cinder:$(kubectl --namespace openstack get secret cinder-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/cinder" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.cinder.password="$(kubectl --namespace openstack get secret cinder-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --post-renderer /opt/genestack/kustomize/kustomize.sh \ diff --git a/docs/openstack-compute-kit.md b/docs/openstack-compute-kit.md index e0f07b37..e70c5c3e 100644 --- a/docs/openstack-compute-kit.md +++ b/docs/openstack-compute-kit.md @@ -102,7 +102,7 @@ helm upgrade --install placement ./placement --namespace=openstack \ --set endpoints.identity.auth.placement.password="$(kubectl --namespace openstack get secret placement-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.placement.password="$(kubectl --namespace openstack get secret placement-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.placement.placement_database.slave_connection="mysql+pymysql://placement:$(kubectl --namespace openstack get secret placement-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/placement" \ + --set conf.placement.placement_database.slave_connection="mysql+pymysql://placement:$(kubectl --namespace openstack get secret placement-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/placement" \ --set endpoints.oslo_db.auth.nova_api.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ --post-renderer /opt/genestack/kustomize/kustomize.sh \ --post-renderer-args placement/base @@ -130,9 +130,9 @@ helm upgrade --install nova ./nova \ --set endpoints.oslo_db_api.auth.nova.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db_cell0.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db_cell0.auth.nova.password="$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.nova.database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/nova" \ - --set conf.nova.api_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/nova_api" \ - --set conf.nova.cell0_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/nova_cell0" \ + --set conf.nova.database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova" \ + --set conf.nova.api_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova_api" \ + --set conf.nova.cell0_database.slave_connection="mysql+pymysql://nova:$(kubectl --namespace openstack get secret nova-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/nova_cell0" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.nova.password="$(kubectl --namespace openstack get secret nova-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --set network.ssh.public_key="$(kubectl -n openstack get secret nova-ssh-keypair -o jsonpath='{.data.public_key}' | base64 -d)"$'\n' \ @@ -178,7 +178,7 @@ helm upgrade --install neutron ./neutron \ --set endpoints.identity.auth.ironic.password="$(kubectl --namespace openstack get secret ironic-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.neutron.password="$(kubectl --namespace openstack get secret neutron-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.neutron.database.slave_connection="mysql+pymysql://neutron:$(kubectl --namespace openstack get secret neutron-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/neutron" \ + --set conf.neutron.database.slave_connection="mysql+pymysql://neutron:$(kubectl --namespace openstack get secret neutron-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/neutron" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.neutron.password="$(kubectl --namespace openstack get secret neutron-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --set conf.neutron.ovn.ovn_nb_connection="tcp:$(kubectl --namespace kube-system get service ovn-nb -o jsonpath='{.spec.clusterIP}:{.spec.ports[0].port}')" \ diff --git a/docs/openstack-glance.md b/docs/openstack-glance.md index 9630dff6..9a3b2ce8 100644 --- a/docs/openstack-glance.md +++ b/docs/openstack-glance.md @@ -38,7 +38,7 @@ helm upgrade --install glance ./glance \ --set endpoints.identity.auth.glance.password="$(kubectl --namespace openstack get secret glance-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.glance.password="$(kubectl --namespace openstack get secret glance-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.glance.database.slave_connection="mysql+pymysql://glance:$(kubectl --namespace openstack get secret glance-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/glance" \ + --set conf.glance.database.slave_connection="mysql+pymysql://glance:$(kubectl --namespace openstack get secret glance-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/glance" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.glance.password="$(kubectl --namespace openstack get secret glance-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --post-renderer /opt/genestack/kustomize/kustomize.sh \ diff --git a/docs/openstack-heat.md b/docs/openstack-heat.md index c1ffcc85..1c00f2db 100644 --- a/docs/openstack-heat.md +++ b/docs/openstack-heat.md @@ -43,7 +43,7 @@ helm upgrade --install heat ./heat \ --set endpoints.identity.auth.heat_stack_user.password="$(kubectl --namespace openstack get secret heat-stack-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.heat.password="$(kubectl --namespace openstack get secret heat-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.heat.database.slave_connection="mysql+pymysql://heat:$(kubectl --namespace openstack get secret heat-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/heat" \ + --set conf.heat.database.slave_connection="mysql+pymysql://heat:$(kubectl --namespace openstack get secret heat-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/heat" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.heat.password="$(kubectl --namespace openstack get secret heat-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --post-renderer /opt/genestack/kustomize/kustomize.sh \ diff --git a/docs/openstack-keystone.md b/docs/openstack-keystone.md index 3aeed568..09d8aeff 100644 --- a/docs/openstack-keystone.md +++ b/docs/openstack-keystone.md @@ -37,7 +37,7 @@ helm upgrade --install keystone ./keystone \ --set endpoints.identity.auth.admin.password="$(kubectl --namespace openstack get secret keystone-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.keystone.password="$(kubectl --namespace openstack get secret keystone-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.keystone.database.slave_connection="mysql+pymysql://keystone:$(kubectl --namespace openstack get secret keystone-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/keystone" \ + --set conf.keystone.database.slave_connection="mysql+pymysql://keystone:$(kubectl --namespace openstack get secret keystone-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/keystone" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.keystone.password="$(kubectl --namespace openstack get secret keystone-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --post-renderer /opt/genestack/kustomize/kustomize.sh \ diff --git a/docs/openstack-octavia.md b/docs/openstack-octavia.md index 673fd34f..c09e75b6 100644 --- a/docs/openstack-octavia.md +++ b/docs/openstack-octavia.md @@ -38,7 +38,7 @@ helm upgrade --install octavia ./octavia \ --set endpoints.identity.auth.octavia.password="$(kubectl --namespace openstack get secret octavia-admin -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_db.auth.admin.password="$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d)" \ --set endpoints.oslo_db.auth.octavia.password="$(kubectl --namespace openstack get secret octavia-db-password -o jsonpath='{.data.password}' | base64 -d)" \ - --set conf.octavia.database.slave_connection="mysql+pymysql://octavia:$(kubectl --namespace openstack get secret octavia-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-galera-secondary.openstack.svc.cluster.local:3306/octavia" \ + --set conf.octavia.database.slave_connection="mysql+pymysql://octavia:$(kubectl --namespace openstack get secret octavia-db-password -o jsonpath='{.data.password}' | base64 -d)@mariadb-cluster-secondary.openstack.svc.cluster.local:3306/octavia" \ --set endpoints.oslo_messaging.auth.admin.password="$(kubectl --namespace openstack get secret rabbitmq-default-user -o jsonpath='{.data.password}' | base64 -d)" \ --set endpoints.oslo_messaging.auth.octavia.password="$(kubectl --namespace openstack get secret octavia-rabbitmq-password -o jsonpath='{.data.password}' | base64 -d)" \ --set conf.octavia.certificates.ca_private_key_passphrase="$(kubectl --namespace openstack get secret octavia-certificates -o jsonpath='{.data.password}' | base64 -d)" \ diff --git a/docs/openstack-skyline.md b/docs/openstack-skyline.md index 619696ac..9e748ee1 100644 --- a/docs/openstack-skyline.md +++ b/docs/openstack-skyline.md @@ -17,7 +17,7 @@ kubectl --namespace openstack \ --from-literal=service-domain="service" \ --from-literal=service-project="service" \ --from-literal=service-project-domain="service" \ - --from-literal=db-endpoint="mariadb-galera-primary.openstack.svc.cluster.local" \ + --from-literal=db-endpoint="mariadb-cluster-primary.openstack.svc.cluster.local" \ --from-literal=db-name="skyline" \ --from-literal=db-username="skyline" \ --from-literal=db-password="$(< /dev/urandom tr -dc _A-Za-z0-9 | head -c${1:-32};echo;)" \ diff --git a/docs/ovn-troubleshooting.md b/docs/ovn-troubleshooting.md index bf905bb9..da47f1a8 100644 --- a/docs/ovn-troubleshooting.md +++ b/docs/ovn-troubleshooting.md @@ -379,7 +379,7 @@ Then you can connect to the database: ```shell mysql -u root \ -p$(kubectl --namespace openstack get secret mariadb -o jsonpath='{.data.root-password}' | base64 -d) \ --h mariadb-galera-primary.openstack.svc.cluster.local +-h mariadb-cluster-primary.openstack.svc.cluster.local ``` Make sure to change `svc.cluster.local` if you have set the name of your cluster diff --git a/helm-configs/cinder/cinder-helm-overrides.yaml b/helm-configs/cinder/cinder-helm-overrides.yaml index 4766f80e..e3b90f20 100644 --- a/helm-configs/cinder/cinder-helm-overrides.yaml +++ b/helm-configs/cinder/cinder-helm-overrides.yaml @@ -25,19 +25,19 @@ release_group: null images: tags: - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - cinder_db_sync: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - cinder_api: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - cinder_scheduler: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" - cinder_volume: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" - cinder_volume_usage_audit: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + cinder_db_sync: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + cinder_api: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + cinder_scheduler: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" + cinder_volume: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" + cinder_volume_usage_audit: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" cinder_storage_init: "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal" - cinder_backup: "docker.io/openstackhelm/cinder:2023.1-ubuntu_jammy" + cinder_backup: "docker.io/openstackhelm/cinder:2024.1-ubuntu_jammy" cinder_backup_storage_init: "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal" test: docker.io/xrally/xrally-openstack:2.0.0 rabbit_init: docker.io/rabbitmq:3.7-management @@ -1327,7 +1327,7 @@ endpoints: username: cinder password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /cinder diff --git a/helm-configs/designate/designate-helm-overrides.yaml b/helm-configs/designate/designate-helm-overrides.yaml new file mode 100644 index 00000000..b6744b41 --- /dev/null +++ b/helm-configs/designate/designate-helm-overrides.yaml @@ -0,0 +1,753 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default values for designate. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +--- +release_group: null + +labels: + api: + node_selector_key: openstack-control-plane + node_selector_value: enabled + central: + node_selector_key: openstack-control-plane + node_selector_value: enabled + producer: + node_selector_key: openstack-control-plane + node_selector_value: enabled + worker: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + mdns: + node_selector_key: openstack-control-plane + node_selector_value: enabled + sink: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + tags: + bootstrap: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + db_init: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + db_drop: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + rabbit_init: docker.io/rabbitmq:3.7-management + ks_user: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + ks_service: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + ks_endpoints: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 + designate_db_sync: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_api: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_central: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_mdns: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_worker: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_producer: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_sink: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + image_repo_sync: docker.io/docker:17.07.0 + pull_policy: "IfNotPresent" + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync + +pod: + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + mounts: + designate_api: + init_container: null + designate_api: + volumeMounts: + volumes: + designate_central: + init_container: null + designate_central: + volumeMounts: + volumes: + designate_mdns: + init_container: null + designate_mdns: + volumeMounts: + volumes: + designate_worker: + init_container: null + designate_worker: + volumeMounts: + volumes: + designate_producer: + init_container: null + designate_producer: + volumeMounts: + volumes: + designate_sink: + init_container: null + designate_sink: + volumeMounts: + volumes: + designate_db_sync: + designate_db_sync: + volumeMounts: + volumes: + replicas: + api: 1 + central: 1 + mdns: 1 + producer: 1 + sink: 1 + worker: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + disruption_budget: + api: + min_available: 0 + central: + min_available: 0 + mdns: + min_available: 0 + worker: + min_available: 0 + producer: + min_available: 0 + sink: + min_available: 0 + termination_grace_period: + api: + timeout: 30 + mdns: + timeout: 30 + + resources: + enabled: false + api: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + jobs: + bootstrap: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + db_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + db_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_endpoints: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_service: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_user: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + rabbit_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + tests: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + +network: + api: + ingress: + public: true + classes: + namespace: "nginx" + cluster: "nginx-cluster" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + external_policy_local: false + node_port: + enabled: false + port: 9001 + mdns: + name: "designate-mdns" + proto: "http" + external_policy_local: false + node_port: + enabled: true + port: 5354 + +bootstrap: + enabled: false + script: | + openstack token issue + +dependencies: + dynamic: + common: + local_image_registry: + jobs: + - designate-image-repo-sync + services: + - endpoint: node + service: local_image_registry + job_rabbit_init: + api: + jobs: + - designate-rabbit-init + sink: + jobs: + - designate-rabbit-init + central: + jobs: + - designate-rabbit-init + worker: + jobs: + - designate-rabbit-init + static: + db_init: + services: + - service: oslo_db + endpoint: internal + db_sync: + jobs: + - designate-db-init + services: + - service: oslo_db + endpoint: internal + ks_user: + services: + - service: identity + endpoint: internal + ks_service: + services: + - service: identity + endpoint: internal + ks_endpoints: + jobs: + - designate-ks-service + services: + - service: identity + endpoint: internal + rabbit_init: + services: + - service: oslo_messaging + endpoint: internal + api: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + service: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: oslo_messaging + endpoint: internal + central: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + service: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: oslo_messaging + endpoint: internal + worker: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: mdns + endpoint: internal + mdns: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + producer: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + sink: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + +conf: + pools: | + - name: default + # The name is immutable. There will be no option to change the name after + # creation and the only way will to change it will be to delete it + # (and all zones associated with it) and recreate it. + description: Default Pool + + attributes: {} + + # List out the NS records for zones hosted within this pool + # This should be a record that is created outside of designate, that + # points to the public IP of the controller node. + ns_records: + - hostname: {{ printf "ns.%s.svc.%s." .Release.Namespace .Values.endpoints.cluster_domain_suffix }} + priority: 1 + + # List out the nameservers for this pool. These are the actual DNS servers. + # We use these to verify changes have propagated to all nameservers. + nameservers: + - host: 127.0.0.1 # enter your own values ${POWERDNS_SERVICE_HOST} + port: {{ tuple "powerdns" "internal" "powerdns" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + + # List out the targets for this pool. For BIND there will be one + # entry for each BIND server, as we have to run rndc command on each server + targets: + - type: pdns4 + description: PowerDNS Server + + # List out the designate-mdns servers from which PowerDNS servers should + # request zone transfers (AXFRs) from. + # This should be the IP of the controller node. + # If you have multiple controllers you can add multiple masters + # by running designate-mdns on them, and adding them here. + masters: + - host: 127.0.0.1 # enter your own values ${MINIDNS_SERVICE_HOST} + port: {{ tuple "mdns" "internal" "ipc" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + + # PowerDNS Configuration options + options: + host: 127.0.0.1 # enter your own values ${POWERDNS_SERVICE_HOST} + port: {{ tuple "powerdns" "internal" "powerdns" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + api_endpoint: http://${POWERDNS_SERVICE_HOST}:{{ tuple "powerdns" "internal" "powerdns_api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + api_token: {{ tuple "powerdns" "service" . | include "helm-toolkit.endpoints.endpoint_token_lookup" }} + paste: + composite:osapi_dns: + use: egg:Paste#urlmap + /: osapi_dns_versions + /v2: osapi_dns_v2 + /admin: osapi_dns_admin + composite:osapi_dns_versions: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions + keystone: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions + app:osapi_dns_app_versions: + paste.app_factory: designate.api.versions:factory + composite:osapi_dns_v2: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 noauthcontext maintenance normalizeuri osapi_dns_app_v2 + keystone: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 authtoken keystonecontext maintenance normalizeuri osapi_dns_app_v2 + app:osapi_dns_app_v2: + paste.app_factory: designate.api.v2:factory + composite:osapi_dns_admin: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors request_id faultwrapper noauthcontext maintenance normalizeuri osapi_dns_app_admin + keystone: http_proxy_to_wsgi cors request_id faultwrapper authtoken keystonecontext maintenance normalizeuri osapi_dns_app_admin + app:osapi_dns_app_admin: + paste.app_factory: designate.api.admin:factory + filter:cors: + paste.filter_factory: oslo_middleware.cors:filter_factory + oslo_config_project: designate + filter:request_id: + paste.filter_factory: oslo_middleware:RequestId.factory + filter:http_proxy_to_wsgi: + paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory + filter:noauthcontext: + paste.filter_factory: designate.api.middleware:NoAuthContextMiddleware.factory + filter:authtoken: + paste.filter_factory: keystonemiddleware.auth_token:filter_factory + filter:keystonecontext: + paste.filter_factory: designate.api.middleware:KeystoneContextMiddleware.factory + filter:maintenance: + paste.filter_factory: designate.api.middleware:MaintenanceMiddleware.factory + filter:normalizeuri: + paste.filter_factory: designate.api.middleware:NormalizeURIMiddleware.factory + filter:faultwrapper: + paste.filter_factory: designate.api.middleware:FaultWrapperMiddleware.factory + filter:validation_API_v2: + paste.filter_factory: designate.api.middleware:APIv2ValidationErrorMiddleware.factory + policy: {} + designate: + DEFAULT: + debug: false + log_config_append: /etc/designate/logging.conf + service:api: + auth_strategy: keystone + enable_api_v2: true + enable_api_admin: true + enabled_extensions_v2: quotas,reports + workers: 2 + service:worker: + enabled: true + notify: false + oslo_middleware: + enable_proxy_headers_parsing: true + oslo_policy: + policy_file: /etc/designate/policy.yaml + database: + max_retries: -1 + storage:sqlalchemy: + max_retries: -1 + keystone_authtoken: + service_token_roles: service + service_token_roles_required: true + auth_type: password + auth_version: v3 + memcache_security_strategy: ENCRYPT + service_type: dns + logging: + loggers: + keys: + - root + - designate + handlers: + keys: + - stdout + - stderr + - "null" + formatters: + keys: + - context + - default + logger_root: + level: WARNING + handlers: 'null' + logger_designate: + level: INFO + handlers: + - stdout + qualname: designate + logger_amqp: + level: WARNING + handlers: stderr + qualname: amqp + logger_amqplib: + level: WARNING + handlers: stderr + qualname: amqplib + logger_eventletwsgi: + level: WARNING + handlers: stderr + qualname: eventlet.wsgi.server + logger_sqlalchemy: + level: WARNING + handlers: stderr + qualname: sqlalchemy + logger_boto: + level: WARNING + handlers: stderr + qualname: boto + handler_null: + class: logging.NullHandler + formatter: default + args: () + handler_stdout: + class: StreamHandler + args: (sys.stdout,) + formatter: context + handler_stderr: + class: StreamHandler + args: (sys.stderr,) + formatter: context + formatter_context: + class: oslo_log.formatters.ContextFormatter + datefmt: "%Y-%m-%d %H:%M:%S" + formatter_default: + format: "%(message)s" + datefmt: "%Y-%m-%d %H:%M:%S" + +# Names of secrets used by bootstrap and environmental checks +secrets: + identity: + admin: designate-keystone-admin + designate: designate-keystone-user + test: designate-keystone-test + oslo_db: + admin: designate-db-admin + designate: designate-db-user + oslo_messaging: + admin: designate-rabbitmq-admin + designate: designate-rabbitmq-user + tls: + dns: + api: + public: designate-tls-public + oci_image_registry: + designate: designate-oci-image-registry + +endpoints: + cluster_domain_suffix: cluster.local + local_image_registry: + name: docker-registry + namespace: docker-registry + hosts: + default: localhost + internal: docker-registry + node: localhost + host_fqdn_override: + default: null + port: + registry: + node: 5000 + oci_image_registry: + name: oci-image-registry + namespace: oci-image-registry + auth: + enabled: false + designate: + username: designate + password: password + hosts: + default: localhost + host_fqdn_override: + default: null + port: + registry: + default: null + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + designate: + role: admin + region_name: RegionOne + username: designate + password: password + project_name: service + user_domain_name: service + project_domain_name: service + test: + role: admin + region_name: RegionOne + username: designate-test + password: password + project_name: test + user_domain_name: service + project_domain_name: service + hosts: + default: keystone + internal: keystone-api + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: http + port: + api: + default: 80 + internal: 5000 + dns: + name: designate + hosts: + default: designate-api + public: designate + host_fqdn_override: + default: null + path: + default: / + scheme: + default: 'http' + port: + api: + default: 9001 + public: 80 + mdns: + name: minidns + hosts: + default: minidns + public: designate-mdns + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'tcp' + port: + ipc: + default: 5354 + oslo_db: + auth: + admin: + username: root + password: password + secret: + tls: + internal: mariadb-tls-direct + hosts: + default: mariadb-cluster-primary + host_fqdn_override: + default: null + path: /designate + scheme: mysql+pymysql + port: + mysql: + default: 3306 + oslo_cache: + hosts: + default: memcached + host_fqdn_override: + default: null + port: + memcache: + default: 11211 + auth: + # NOTE: this is used to define the value for keystone + # authtoken cache encryption key, if not set it will be populated + # automatically with a random value, but to take advantage of + # this feature all services should be set to use the same key, + # and memcache service. + memcache_secret_key: null + oslo_messaging: + auth: + admin: + username: rabbitmq + password: password + secret: + tls: + internal: rabbitmq-tls-direct + designate: + username: designate + password: password + statefulset: + replicas: 3 + name: rabbitmq-server + hosts: + default: rabbitmq-nodes + host_fqdn_override: + default: null + path: /designate + scheme: rabbit + port: + amqp: + default: 5672 + http: + default: 15672 + powerdns: + auth: + service: + token: chiave_segreta + hosts: + default: 8.8.8.8 + host_fqdn_override: + default: null + port: + powerdns_api: + default: 8081 + powerdns: + default: 53 + +manifests: + configmap_bin: true + configmap_etc: true + deployment_api: true + deployment_central: true + deployment_worker: true + deployment_producer: true + deployment_mdns: true + deployment_sink: false + ingress_api: true + job_bootstrap: true + job_db_init: true + job_db_sync: true + job_ks_endpoints: true + job_ks_service: true + job_ks_user: true + job_rabbit_init: false + pdb_api: true + pdb_producer: true + pdb_central: true + pdb_worker: true + pdb_mdns: true + pdb_sink: false + secret_db: true + secret_ingress_tls: true + secret_keystone: true + secret_rabbitmq: true + secret_registry: true + service_api: true + service_mdns: true + service_ingress_api: true +... diff --git a/helm-configs/glance/glance-helm-overrides.yaml b/helm-configs/glance/glance-helm-overrides.yaml index 20cf7163..fd643429 100644 --- a/helm-configs/glance/glance-helm-overrides.yaml +++ b/helm-configs/glance/glance-helm-overrides.yaml @@ -17,18 +17,18 @@ release_group: null images: tags: test: docker.io/xrally/xrally-openstack:2.0.0 - glance_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_jammy - glance_metadefs_load: docker.io/openstackhelm/glance:2023.1-ubuntu_jammy - db_init: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - glance_db_sync: docker.io/openstackhelm/glance:2023.1-ubuntu_jammy - db_drop: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - ks_user: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - ks_service: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - ks_endpoints: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + glance_storage_init: docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_xenial + glance_metadefs_load: "docker.io/openstackhelm/glance:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + glance_db_sync: "docker.io/openstackhelm/glance:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" rabbit_init: docker.io/rabbitmq:3.7-management - glance_api: docker.io/openstackhelm/glance:2023.1-ubuntu_jammy + glance_api: "docker.io/openstackhelm/glance:2024.1-ubuntu_jammy" # Bootstrap image requires curl - bootstrap: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 image_repo_sync: docker.io/docker:17.07.0 pull_policy: "IfNotPresent" @@ -258,6 +258,9 @@ conf: paste_deploy: flavor: keystone database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 max_retries: -1 oslo_concurrency: lock_path: "/var/lib/glance/tmp" @@ -595,7 +598,7 @@ endpoints: username: glance password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /glance diff --git a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml index db1c37bb..b8bd082a 100644 --- a/helm-configs/gnocchi/gnocchi-helm-overrides.yaml +++ b/helm-configs/gnocchi/gnocchi-helm-overrides.yaml @@ -622,7 +622,7 @@ endpoints: username: gnocchi password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /gnocchi diff --git a/helm-configs/heat/heat-helm-overrides.yaml b/helm-configs/heat/heat-helm-overrides.yaml index d3e48903..04875b92 100644 --- a/helm-configs/heat/heat-helm-overrides.yaml +++ b/helm-configs/heat/heat-helm-overrides.yaml @@ -23,19 +23,19 @@ labels: images: tags: - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_db_sync: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_api: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_cfn: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_cloudwatch: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_engine: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_engine_cleaner: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - heat_purge_deleted: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_db_sync: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_api: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_cfn: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_cloudwatch: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_engine: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_engine_cleaner: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + heat_purge_deleted: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" test: docker.io/xrally/xrally-openstack:2.0.0 rabbit_init: docker.io/rabbitmq:3.7-management dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 @@ -336,6 +336,9 @@ conf: memcache_security_strategy: ENCRYPT service_type: orchestration database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 max_retries: -1 trustee: auth_type: password @@ -865,7 +868,7 @@ endpoints: username: heat password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /heat diff --git a/helm-configs/horizon/horizon-helm-overrides.yaml b/helm-configs/horizon/horizon-helm-overrides.yaml index 298f8238..73e77af5 100644 --- a/helm-configs/horizon/horizon-helm-overrides.yaml +++ b/helm-configs/horizon/horizon-helm-overrides.yaml @@ -18,10 +18,10 @@ --- images: tags: - db_init: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - db_drop: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy - horizon_db_sync: ghcr.io/rackerlabs/genestack/horizon-rxt:2023.1 - horizon: ghcr.io/rackerlabs/genestack/horizon-rxt:2023.1 + db_init: docker.io/openstackhelm/heat:2024.1-ubuntu_jammy + db_drop: docker.io/openstackhelm/heat:2024.1-ubuntu_jammy + horizon_db_sync: docker.io/openstackhelm/horizon:2023.1-ubuntu_jammy + horizon: docker.io/openstackhelm/horizon:2023.1-ubuntu_jammy test: docker.io/openstackhelm/osh-selenium:latest-ubuntu_focal dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 image_repo_sync: docker.io/docker:17.07.0 @@ -7242,7 +7242,7 @@ endpoints: username: horizon password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /horizon diff --git a/helm-configs/keystone/keystone-helm-overrides.yaml b/helm-configs/keystone/keystone-helm-overrides.yaml index b909f93f..54834567 100644 --- a/helm-configs/keystone/keystone-helm-overrides.yaml +++ b/helm-configs/keystone/keystone-helm-overrides.yaml @@ -18,21 +18,21 @@ helm3_hook: true images: tags: - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - keystone_api: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - keystone_credential_rotate: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_credential_setup: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_db_sync: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_domain_manage: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_fernet_rotate: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - keystone_fernet_setup: "ghcr.io/rackerlabs/genestack/keystone-rxt:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + keystone_api: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + keystone_credential_rotate: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_credential_setup: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_db_sync: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_domain_manage: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_fernet_rotate: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + keystone_fernet_setup: "docker.io/aedan/keystone:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" test: docker.io/xrally/xrally-openstack:2.0.0 rabbit_init: docker.io/rabbitmq:3.7-management - keystone_credential_cleanup: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + keystone_credential_cleanup: "docker.io/aedan/heat:2024.1-ubuntu_jammy" dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 image_repo_sync: docker.io/docker:17.07.0 pull_policy: "IfNotPresent" @@ -513,6 +513,9 @@ conf: credential: key_repository: /etc/keystone/credential-keys/ database: + idle_timeout: 3600 + connection_recycle_time: 3600 + pool_timeout: 60 max_retries: -1 cache: enabled: true @@ -547,10 +550,10 @@ conf: # NOTE(vdrok): The following two options have effect only for SQL backend lockout_failure_attempts: 5 lockout_duration: 1800 - auth: - methods: password,token,application_credential,totp - password: rxt - totp: rxt +# auth: +# methods: password,token,application_credential,rxt +# password: rxt +# totp: rxt # NOTE(lamt) We can leverage multiple domains with different # configurations as outlined in @@ -978,7 +981,7 @@ endpoints: username: keystone password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /keystone diff --git a/helm-configs/neutron/neutron-helm-overrides.yaml b/helm-configs/neutron/neutron-helm-overrides.yaml index 4c3ef005..77d67366 100644 --- a/helm-configs/neutron/neutron-helm-overrides.yaml +++ b/helm-configs/neutron/neutron-helm-overrides.yaml @@ -2,24 +2,24 @@ release_group: null images: tags: - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - neutron_db_sync: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_dhcp: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_l3: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_l2gw: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_linuxbridge_agent: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_metadata: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_ovn_metadata: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_openvswitch_agent: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_server: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_rpc_server: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_bagpipe_bgp: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" - neutron_netns_cleanup_cron: "docker.io/openstackhelm/neutron:2023.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + neutron_db_sync: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_dhcp: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_l3: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_l2gw: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_linuxbridge_agent: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_metadata: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_ovn_metadata: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_openvswitch_agent: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_server: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_rpc_server: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_bagpipe_bgp: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" + neutron_netns_cleanup_cron: "docker.io/openstackhelm/neutron:2024.1-ubuntu_jammy" test: docker.io/xrally/xrally-openstack:2.0.0 purge_test: docker.io/openstackhelm/ospurge:latest rabbit_init: docker.io/rabbitmq:3.7-management @@ -1779,6 +1779,12 @@ conf: oslo_concurrency: lock_path: /var/lib/neutron/tmp database: + mysql_sql_mode: "" + connection_debug: 100 + connection_trace: true + use_db_reconnect: True + idle_timeout: 3600 + connection_recycle_time: 3600 max_retries: -1 agent: availability_zone: nova @@ -2205,7 +2211,7 @@ endpoints: username: neutron password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /neutron diff --git a/helm-configs/nova/nova-helm-overrides.yaml b/helm-configs/nova/nova-helm-overrides.yaml index f9a97a6d..7cadd6a0 100644 --- a/helm-configs/nova/nova-helm-overrides.yaml +++ b/helm-configs/nova/nova-helm-overrides.yaml @@ -17,7 +17,6 @@ --- release_group: null - labels: agent: compute: @@ -50,32 +49,31 @@ labels: test: node_selector_key: openstack-control-plane node_selector_value: enabled - images: pull_policy: IfNotPresent tags: - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - nova_api: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_cell_setup: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_cell_setup_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - nova_compute: "ghcr.io/rackerlabs/genestack/nova-efi:2023.1-ubuntu_jammy" - nova_compute_ssh: "ghcr.io/rackerlabs/genestack/nova-efi:2023.1-ubuntu_jammy" - nova_conductor: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_db_sync: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_novncproxy: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_novncproxy_assets: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_scheduler: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_spiceproxy: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" - nova_spiceproxy_assets: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + nova_api: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_cell_setup: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_cell_setup_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + nova_compute: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_compute_ssh: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_conductor: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_db_sync: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_novncproxy: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_novncproxy_assets: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_scheduler: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_spiceproxy: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" + nova_spiceproxy_assets: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" nova_service_cleaner: "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal" dep_check: 'quay.io/airshipit/kubernetes-entrypoint:v1.0.0' rabbit_init: docker.io/rabbitmq:3.7-management - nova_archive_deleted_rows: "docker.io/openstackhelm/nova:2023.1-ubuntu_jammy" + nova_archive_deleted_rows: "docker.io/openstackhelm/nova:2024.1-ubuntu_jammy" nova_compute_ironic: 'docker.io/kolla/ubuntu-source-nova-compute-ironic:wallaby' nova_storage_init: 'docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_focal' test: docker.io/xrally/xrally-openstack:2.0.0 @@ -86,7 +84,6 @@ images: exclude: - dep_check - image_repo_sync - jobs: # NOTE(portdirect): When using cells new nodes will be added to the cell on the hour by default. # TODO(portdirect): Add a post-start action to nova compute pods that registers themselves. @@ -115,7 +112,6 @@ jobs: history: success: 3 failed: 1 - bootstrap: enabled: true ks_user: admin @@ -201,7 +197,6 @@ bootstrap: sleep $SLEEP fi done - network: # provide what type of network wiring will be used # possible options: openvswitch, linuxbridge, sriov @@ -266,7 +261,6 @@ network: - ed25519 private_key: 'null' public_key: 'null' - dependencies: dynamic: common: @@ -478,7 +472,6 @@ dependencies: services: - endpoint: internal service: local_image_registry - console: # serial | spice | novnc | none console_kind: novnc @@ -506,17 +499,14 @@ console: # or set network cidr vncserver_proxyclient_network_cidr: 0/0 address_search_enabled: true - ceph_client: configmap: ceph-etc user_secret_name: pvc-ceph-client-key - rbd_pool: app_name: nova-vms replication: 3 crush_rule: replicated_rule chunk_size: 8 - conf: security: | # @@ -1567,7 +1557,6 @@ conf: before: enabled: false date: 'nil' - # Names of secrets used by bootstrap and environmental checks secrets: identity: @@ -1607,7 +1596,6 @@ secrets: internal: nova-spiceproxy-tls-proxy oci_image_registry: nova: nova-oci-image-registry - # typically overridden by environmental # values, but should include all endpoints # required by this chart @@ -1652,7 +1640,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /nova @@ -1669,7 +1657,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /nova_api @@ -1686,7 +1674,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /nova_cell0 @@ -2016,7 +2004,6 @@ endpoints: port: ingress: default: 80 - pod: probes: rpc_timeout: 60 @@ -2252,12 +2239,12 @@ pod: nova: enabled: false tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule mounts: nova_compute: init_container: null @@ -2511,7 +2498,6 @@ pod: limits: memory: "1024Mi" cpu: "2000m" - network_policy: nova: # TODO(lamt): Need to tighten this ingress for security. @@ -2519,20 +2505,16 @@ network_policy: - {} egress: - {} - # NOTE(helm_hook): helm_hook might break for helm2 binary. # set helm3_hook: false when using the helm2 binary. helm3_hook: true - health_probe: logging: level: ERROR - tls: identity: false oslo_messaging: false oslo_db: false - manifests: certificates: false configmap_bin: true diff --git a/helm-configs/octavia/octavia-helm-overrides.yaml b/helm-configs/octavia/octavia-helm-overrides.yaml index 1a30a9e2..eee143ad 100644 --- a/helm-configs/octavia/octavia-helm-overrides.yaml +++ b/helm-configs/octavia/octavia-helm-overrides.yaml @@ -21,13 +21,13 @@ labels: images: tags: test: docker.io/xrally/xrally-openstack:2.0.0 - bootstrap: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + bootstrap: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" rabbit_init: docker.io/rabbitmq:3.7-management - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 image_repo_sync: docker.io/docker:17.07.0 octavia_db_sync: ghcr.io/rackerlabs/genestack/octavia-ovn:master-ubuntu_jammy @@ -35,7 +35,7 @@ images: octavia_worker: ghcr.io/rackerlabs/genestack/octavia-ovn:master-ubuntu_jammy octavia_housekeeping: ghcr.io/rackerlabs/genestack/octavia-ovn:master-ubuntu_jammy octavia_health_manager: ghcr.io/rackerlabs/genestack/octavia-ovn:master-ubuntu_jammy - octavia_health_manager_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" + octavia_health_manager_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" openvswitch_vswitchd: docker.io/kolla/centos-source-openvswitch-vswitchd:rocky pull_policy: "IfNotPresent" local_registry: @@ -466,7 +466,7 @@ endpoints: username: octavia password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /octavia diff --git a/helm-configs/placement/placement-helm-overrides.yaml b/helm-configs/placement/placement-helm-overrides.yaml index 9d85dd6e..b58a6e96 100644 --- a/helm-configs/placement/placement-helm-overrides.yaml +++ b/helm-configs/placement/placement-helm-overrides.yaml @@ -11,13 +11,13 @@ labels: images: pull_policy: IfNotPresent tags: - placement: "docker.io/openstackhelm/placement:2023.1-ubuntu_jammy" - ks_user: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_service: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - ks_endpoints: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_init: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - db_drop: "docker.io/openstackhelm/heat:2023.1-ubuntu_jammy" - placement_db_sync: "docker.io/openstackhelm/placement:2023.1-ubuntu_jammy" + placement: "docker.io/openstackhelm/placement:2024.1-ubuntu_jammy" + ks_user: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_service: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + ks_endpoints: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_init: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + db_drop: "docker.io/openstackhelm/heat:2024.1-ubuntu_jammy" + placement_db_sync: "docker.io/openstackhelm/placement:2024.1-ubuntu_jammy" dep_check: "quay.io/airshipit/kubernetes-entrypoint:v1.0.0" image_repo_sync: "docker.io/docker:17.07.0" local_registry: @@ -206,7 +206,7 @@ endpoints: username: nova password: password hosts: - default: mariadb-galera-primary + default: mariadb-cluster-primary host_fqdn_override: default: null path: /placement diff --git a/kustomize/cinder/base/cinder-mariadb-database.yaml b/kustomize/cinder/base/cinder-mariadb-database.yaml index 94076c21..1f579f45 100644 --- a/kustomize/cinder/base/cinder-mariadb-database.yaml +++ b/kustomize/cinder/base/cinder-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: cinder-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "cinder" diff --git a/kustomize/designate/aio/kustomization.yaml b/kustomize/designate/aio/kustomization.yaml new file mode 100644 index 00000000..64d1224c --- /dev/null +++ b/kustomize/designate/aio/kustomization.yaml @@ -0,0 +1,14 @@ +bases: + - ../base + +patches: + - target: + kind: HorizontalPodAutoscaler + name: designate-api + patch: |- + - op: replace + path: /spec/minReplicas + value: 1 + - op: replace + path: /spec/maxReplicas + value: 1 diff --git a/kustomize/designate/base/designate-mariadb-database.yaml b/kustomize/designate/base/designate-mariadb-database.yaml new file mode 100644 index 00000000..0151497e --- /dev/null +++ b/kustomize/designate/base/designate-mariadb-database.yaml @@ -0,0 +1,55 @@ +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Database +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + # If you want the database to be created with a different name than the resource name + # name: data-custom + mariaDbRef: + name: mariadb-cluster + characterSet: utf8 + collate: utf8_general_ci + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: User +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + # If you want the user to be created with a different name than the resource name + # name: user-custom + mariaDbRef: + name: mariadb-cluster + passwordSecretKeyRef: + name: designate-db-password + key: password + # This field is immutable and defaults to 10, 0 means unlimited. + maxUserConnections: 0 + host: "%" + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Grant +metadata: + name: designate-grant + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + mariaDbRef: + name: mariadb-cluster + privileges: + - "ALL" + database: "designate" + table: "*" + username: designate + grantOption: true + host: "%" + retryInterval: 5s diff --git a/kustomize/designate/base/designate-rabbitmq-queue.yaml b/kustomize/designate/base/designate-rabbitmq-queue.yaml new file mode 100644 index 00000000..a7113d66 --- /dev/null +++ b/kustomize/designate/base/designate-rabbitmq-queue.yaml @@ -0,0 +1,81 @@ +--- +apiVersion: rabbitmq.com/v1beta1 +kind: User +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + tags: + - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' + - policymaker + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack + importCredentialsSecret: + name: designate-rabbitmq-password +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Vhost +metadata: + name: designate-vhost + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + name: "designate" # vhost name; required and cannot be updated + defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +# status: +# conditions: +# - lastTransitionTime: "" +# status: "True" # true, false, or unknown +# type: Ready +# Reason: "SuccessfulCreateOrUpdate" # status false result in reason FailedCreateOrUpdate +# Message: "" # set when status is false +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Queue +metadata: + name: designate-queue + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + name: designate-qq # name of the queue + vhost: "designate" # default to '/' if not provided + type: quorum # without providing a queue type, rabbitmq creates a classic queue + autoDelete: false + durable: true # seting 'durable' to false means this queue won't survive a server restart + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Permission +metadata: + name: designate-permission + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + vhost: "designate" # name of a vhost + userReference: + name: "designate" # name of a user.rabbitmq.com in the same namespace; must specify either spec.userReference or spec.user + permissions: + write: ".*" + configure: ".*" + read: ".*" + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +# status: +# conditions: +# - lastTransitionTime: "" +# status: "True" # true, false, or unknown +# type: Ready +# Reason: "SuccessfulCreateOrUpdate" # status false result in reason FailedCreateOrUpdate +# Message: "" # set when status is false diff --git a/kustomize/designate/base/hpa-designate-api.yaml b/kustomize/designate/base/hpa-designate-api.yaml new file mode 100644 index 00000000..4eca66f1 --- /dev/null +++ b/kustomize/designate/base/hpa-designate-api.yaml @@ -0,0 +1,19 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: designate-api + namespace: openstack +spec: + maxReplicas: 9 + minReplicas: 3 + metrics: + - resource: + name: cpu + target: + averageUtilization: 50 + type: Utilization + type: Resource + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: designate-api diff --git a/kustomize/designate/base/kustomization.yaml b/kustomize/designate/base/kustomization.yaml new file mode 100644 index 00000000..a67aeb43 --- /dev/null +++ b/kustomize/designate/base/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - designate-mariadb-database.yaml + - designate-rabbitmq-queue.yaml + - all.yaml + - hpa-designate-api.yaml diff --git a/kustomize/designate/letsencrypt/kustomization.yaml b/kustomize/designate/letsencrypt/kustomization.yaml new file mode 100644 index 00000000..5fb919f6 --- /dev/null +++ b/kustomize/designate/letsencrypt/kustomization.yaml @@ -0,0 +1,13 @@ +bases: + - ../base + +patches: + - target: + kind: Ingress + name: designate-namespace-fqdn + patch: |- + - op: add + path: /metadata/annotations + value: + cert-manager.io/cluster-issuer: letsencrypt-prod + acme.cert-manager.io/http01-edit-in-place: "true" diff --git a/kustomize/glance/base/glance-mariadb-database.yaml b/kustomize/glance/base/glance-mariadb-database.yaml index 5f3f540b..721d45b6 100644 --- a/kustomize/glance/base/glance-mariadb-database.yaml +++ b/kustomize/glance/base/glance-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: glance-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "glance" diff --git a/kustomize/grafana/base/grafana-database.yaml b/kustomize/grafana/base/grafana-database.yaml index f8e57070..d5a28552 100644 --- a/kustomize/grafana/base/grafana-database.yaml +++ b/kustomize/grafana/base/grafana-database.yaml @@ -11,7 +11,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci requeueInterval: 30s @@ -28,7 +28,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: grafana-db key: password @@ -47,7 +47,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "grafana" diff --git a/kustomize/heat/base/heat-mariadb-database.yaml b/kustomize/heat/base/heat-mariadb-database.yaml index 76bd3eac..24d0b907 100644 --- a/kustomize/heat/base/heat-mariadb-database.yaml +++ b/kustomize/heat/base/heat-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: heat-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "heat" diff --git a/kustomize/horizon/base/horizon-mariadb-database.yaml b/kustomize/horizon/base/horizon-mariadb-database.yaml index 2daf6706..c0c2bf7b 100644 --- a/kustomize/horizon/base/horizon-mariadb-database.yaml +++ b/kustomize/horizon/base/horizon-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: horizon-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "horizon" diff --git a/kustomize/keystone/base/keystone-mariadb-database.yaml b/kustomize/keystone/base/keystone-mariadb-database.yaml index 94865021..1af8b1fc 100644 --- a/kustomize/keystone/base/keystone-mariadb-database.yaml +++ b/kustomize/keystone/base/keystone-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: keystone-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "keystone" diff --git a/kustomize/libvirt/helm/libvirt-helm-overrides.yaml b/kustomize/libvirt/helm/libvirt-helm-overrides.yaml index 7e7af52c..ddc63363 100644 --- a/kustomize/libvirt/helm/libvirt-helm-overrides.yaml +++ b/kustomize/libvirt/helm/libvirt-helm-overrides.yaml @@ -1,14 +1,12 @@ release_group: null - labels: agent: libvirt: node_selector_key: openstack-compute-node node_selector_value: enabled - images: tags: - libvirt: docker.io/openstackhelm/libvirt:2023.2-ubuntu_jammy # We want to use jammy. 2023.2 is the latest version that supports jammy. + libvirt: docker.io/openstackhelm/libvirt:2023.2-ubuntu_jammy # We want to use jammy. 2023.2 is the latest version that supports jammy. libvirt_exporter: vexxhost/libvirtd-exporter:latest ceph_config_helper: 'docker.io/openstackhelm/ceph-config-helper:ubuntu_focal_18.2.0-1-20231013' dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 @@ -20,13 +18,11 @@ images: exclude: - dep_check - image_repo_sync - network: # provide what type of network wiring will be used # possible options: ovn, openvswitch, linuxbridge, sriov backend: - ovn - endpoints: cluster_domain_suffix: cluster.local local_image_registry: @@ -60,21 +56,18 @@ endpoints: port: metrics: default: 9474 - network_policy: libvirt: ingress: - {} egress: - {} - ceph_client: configmap: ceph-etc user_secret_name: pvc-ceph-client-key - conf: ceph: - enabled: false # Set to true when we has ceph support for openstack. + enabled: false # Set to true when we has ceph support for openstack. admin_keyring: null cinder: user: "cinder" @@ -158,7 +151,6 @@ conf: kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.crt}' | base64 -d > /tmp/${TYPE}.crt kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.tls\.key}' | base64 -d > /tmp/${TYPE}.key kubectl -n ${POD_NAMESPACE} get secret ${POD_NAME}-${TYPE} -o jsonpath='{.data.ca\.crt}' | base64 -d > /tmp/${TYPE}-ca.crt - pod: probes: libvirt: @@ -191,7 +183,6 @@ pod: privileged: true sidecars: libvirt_exporter: false - affinity: anti: type: @@ -204,12 +195,12 @@ pod: libvirt: enabled: false tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/control-plane - operator: Exists - effect: NoSchedule + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + operator: Exists + effect: NoSchedule dns_policy: "ClusterFirstWithHostNet" mounts: libvirt: @@ -244,7 +235,6 @@ pod: cpu: "100m" limits: memory: "256Mi" - dependencies: dynamic: common: @@ -257,11 +247,11 @@ dependencies: targeted: ovn: libvirt: - pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt - # - requireSameNode: true - # labels: - # application: ovn - # component: ovn-controller + pod: [] # In a hybrid deployment, we don't want to run ovn-controller on the same node as libvirt + # - requireSameNode: true + # labels: + # application: ovn + # component: ovn-controller openvswitch: libvirt: pod: @@ -290,7 +280,6 @@ dependencies: services: - endpoint: internal service: local_image_registry - manifests: configmap_bin: true configmap_etc: true @@ -299,7 +288,6 @@ manifests: network_policy: false role_cert_manager: false secret_registry: true - secrets: oci_image_registry: libvirt: libvirt-oci-image-registry-key diff --git a/kustomize/mariadb-cluster/aio/kustomization.yaml b/kustomize/mariadb-cluster/aio/kustomization.yaml index 13943e97..a4f71afb 100644 --- a/kustomize/mariadb-cluster/aio/kustomization.yaml +++ b/kustomize/mariadb-cluster/aio/kustomization.yaml @@ -4,11 +4,8 @@ bases: patches: - target: kind: MariaDB - name: mariadb-galera + name: mariadb-cluster patch: |- - - op: replace - path: /spec/affinity/enableAntiAffinity - value: false - op: replace path: /spec/replicas - value: 2 + value: 1 diff --git a/kustomize/mariadb-cluster/base/kustomization.yaml b/kustomize/mariadb-cluster/base/kustomization.yaml index f297b151..d1d6323a 100644 --- a/kustomize/mariadb-cluster/base/kustomization.yaml +++ b/kustomize/mariadb-cluster/base/kustomization.yaml @@ -1,4 +1,4 @@ resources: - mariadb-configmap.yaml - - mariadb-galera.yaml + - mariadb-replication.yaml - mariadb-backup.yaml diff --git a/kustomize/mariadb-cluster/base/mariadb-backup.yaml b/kustomize/mariadb-cluster/base/mariadb-backup.yaml index da665b54..5c968d0f 100644 --- a/kustomize/mariadb-cluster/base/mariadb-backup.yaml +++ b/kustomize/mariadb-cluster/base/mariadb-backup.yaml @@ -5,7 +5,7 @@ metadata: namespace: openstack spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster namespace: openstack schedule: cron: "0 0 * * *" diff --git a/kustomize/mariadb-cluster/base/mariadb-configmap.yaml b/kustomize/mariadb-cluster/base/mariadb-configmap.yaml index 1742bde5..9a1581e9 100644 --- a/kustomize/mariadb-cluster/base/mariadb-configmap.yaml +++ b/kustomize/mariadb-cluster/base/mariadb-configmap.yaml @@ -1,7 +1,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: mariadb-galera + name: mariadb-cluster data: UMASK: "0660" UMASK_DIR: "0750" diff --git a/kustomize/mariadb-cluster/base/mariadb-galera.yaml b/kustomize/mariadb-cluster/base/mariadb-galera.yaml deleted file mode 100644 index 83bfae12..00000000 --- a/kustomize/mariadb-cluster/base/mariadb-galera.yaml +++ /dev/null @@ -1,158 +0,0 @@ -apiVersion: k8s.mariadb.com/v1alpha1 -kind: MariaDB -metadata: - name: mariadb-galera - namespace: openstack -spec: - rootPasswordSecretKeyRef: - name: mariadb - key: root-password - username: mariadb - database: mariadb - image: mariadb:10.11.7 - - storage: - size: 10Gi - storageClassName: general - resizeInUseVolumes: true - waitForVolumeResize: true - volumeClaimTemplate: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 10Gi - storageClassName: general - - replicas: 3 - podSecurityContext: - runAsUser: 0 - - galera: - enabled: true - primary: - podIndex: 0 - automaticFailover: true - sst: mariabackup - availableWhenDonor: false - galeraLibPath: /usr/lib/galera/libgalera_smm.so - replicaThreads: 1 - agent: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 - port: 5555 - kubernetesAuth: - enabled: true - gracefulShutdownTimeout: 1s - recovery: - enabled: true - minClusterSize: 50% - clusterHealthyTimeout: 30s - clusterBootstrapTimeout: 10m - podRecoveryTimeout: 3m - podSyncTimeout: 3m - initContainer: - image: ghcr.io/mariadb-operator/mariadb-operator:v0.0.27 - config: - reuseStorageVolume: false - volumeClaimTemplate: - resources: - requests: - storage: 10Gi - accessModes: - - ReadWriteOnce - - service: - type: LoadBalancer - annotations: - metallb.universe.tf/address-pool: primary - connection: - secretName: mariadb-galera-conn - secretTemplate: - key: dsn - - primaryService: - type: LoadBalancer - annotations: - metallb.universe.tf/address-pool: primary - primaryConnection: - secretName: mariadb-galera-conn-primary - secretTemplate: - key: dsn - - secondaryService: - type: LoadBalancer - annotations: - metallb.universe.tf/address-pool: primary - secondaryConnection: - secretName: mariadb-galera-conn-secondary - secretTemplate: - key: dsn - - affinity: - enableAntiAffinity: true - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node-role.kubernetes.io/worker - operator: In - values: - - worker - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: app.kubernetes.io/instance - operator: In - values: - - mariadb-galera - topologyKey: kubernetes.io/hostname - - tolerations: - - key: "k8s.mariadb.com/ha" - operator: "Exists" - effect: "NoSchedule" - - podDisruptionBudget: - maxUnavailable: 40% - - updateStrategy: - type: RollingUpdate - - myCnf: | - [mariadb] - bind-address=* - default_storage_engine=InnoDB - binlog_format=ROW - innodb_autoinc_lock_mode=2 - max_allowed_packet=256M - max_connections=10240 - open_files_limit=10240 - max-connect-errors=1000000 - wsrep_retry_autocommit=0 - innodb_rollback_on_timeout=1 - performance_schema=ON - innodb_log_buffer_size=33554432 - wsrep_slave_threads=144 - wsrep_sync_wait=0 - innodb_flush_log_at_trx_commit=0 - ignore-db-dir=lost+found - skip-name-resolve - innodb_buffer_pool_size=4G - innodb_doublewrite=0 - innodb_file_format=Barracuda - innodb_file_per_table=1 - innodb_flush_method=O_DIRECT - innodb_io_capacity=500 - innodb_locks_unsafe_for_binlog=1 - innodb_log_file_size=1G - innodb_old_blocks_time=1000 - innodb_read_io_threads=8 - innodb_write_io_threads=8 - - resources: - requests: - memory: 256Mi - - metrics: - enabled: true diff --git a/kustomize/mariadb-cluster/base/mariadb-replication.yaml b/kustomize/mariadb-cluster/base/mariadb-replication.yaml new file mode 100644 index 00000000..45481eb0 --- /dev/null +++ b/kustomize/mariadb-cluster/base/mariadb-replication.yaml @@ -0,0 +1,105 @@ +apiVersion: k8s.mariadb.com/v1alpha1 +kind: MariaDB +metadata: + name: mariadb-cluster + namespace: openstack +spec: + rootPasswordSecretKeyRef: + name: mariadb + key: root-password + generate: false + username: mariadb + database: mariadb + image: mariadb:10.11.7 + + storage: + size: 10Gi + storageClassName: general + resizeInUseVolumes: true + waitForVolumeResize: true + volumeClaimTemplate: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: general + + replicas: 3 + podSecurityContext: + runAsUser: 0 + + replication: + enabled: true + + service: + type: LoadBalancer + metadata: + annotations: + metallb.universe.tf/address-pool: primary + connection: + secretName: mariadb-cluster-conn + secretTemplate: + key: dsn + + primaryService: + type: LoadBalancer + metadata: + annotations: + metallb.universe.tf/address-pool: primary + primaryConnection: + secretName: mariadb-cluster-conn-primary + secretTemplate: + key: dsn + + secondaryService: + type: LoadBalancer + metadata: + annotations: + metallb.universe.tf/address-pool: primary + secondaryConnection: + secretName: mariadb-cluster-conn-secondary + secretTemplate: + key: dsn + + updateStrategy: + type: RollingUpdate + + myCnf: | + [mariadb] + bind-address=0.0.0.0 + default_storage_engine=InnoDB + binlog_format=ROW + innodb_autoinc_lock_mode=2 + max_allowed_packet=256M + max_connections=10240 + open_files_limit=10240 + max-connect-errors=1000000 + innodb_rollback_on_timeout=1 + performance_schema=ON + innodb_log_buffer_size=33554432 + innodb_flush_log_at_trx_commit=1 + ignore-db-dirs=lost+found + skip-name-resolve + innodb_buffer_pool_size=4G + innodb_doublewrite=0 + innodb_file_per_table=1 + innodb_flush_method=O_DIRECT + innodb_io_capacity=500 + innodb_log_file_size=1G + innodb_old_blocks_time=1000 + innodb_read_io_threads=8 + innodb_write_io_threads=8 + + wait_timeout=3600 + interactive_timeout=3600 + net_read_timeout=360 + net_write_timeout=360 + + sync_binlog=1 + thread_cache_size=16 + table_open_cache=2048 + table_definition_cache=1024 + + metrics: + enabled: true diff --git a/kustomize/mariadb-operator/kustomization.yaml b/kustomize/mariadb-operator/kustomization.yaml index dc9c9148..360fcdda 100644 --- a/kustomize/mariadb-operator/kustomization.yaml +++ b/kustomize/mariadb-operator/kustomization.yaml @@ -32,5 +32,5 @@ helmCharts: values: - worker includeCRDs: true - version: 0.27.0 + version: 0.28.1 namespace: mariadb-system diff --git a/kustomize/neutron/base/neutron-mariadb-database.yaml b/kustomize/neutron/base/neutron-mariadb-database.yaml index 36563b22..ae70ccb0 100644 --- a/kustomize/neutron/base/neutron-mariadb-database.yaml +++ b/kustomize/neutron/base/neutron-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: neutron-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "neutron" diff --git a/kustomize/nova/base/nova-mariadb-database.yaml b/kustomize/nova/base/nova-mariadb-database.yaml index 150af95d..79b2f63e 100644 --- a/kustomize/nova/base/nova-mariadb-database.yaml +++ b/kustomize/nova/base/nova-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -27,7 +27,7 @@ spec: # name: data-custom name: nova_api mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -44,7 +44,7 @@ spec: # name: data-custom name: nova_cell0 mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -60,7 +60,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: nova-db-password key: password @@ -78,7 +78,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "nova" @@ -97,7 +97,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "nova_api" @@ -116,7 +116,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "nova_cell0" diff --git a/kustomize/octavia/base/octavia-agent.yaml b/kustomize/octavia/base/octavia-agent.yaml index 94ace373..ba2bb5fc 100644 --- a/kustomize/octavia/base/octavia-agent.yaml +++ b/kustomize/octavia/base/octavia-agent.yaml @@ -81,7 +81,7 @@ spec: - name: PATH value: /usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/ - name: DEPENDENCY_SERVICE - value: "openstack:mariadb-galera-primary,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" + value: "openstack:mariadb-cluster-primary,openstack:keystone-api,openstack:rabbitmq-nodes,openstack:memcached,openstack:neutron-server" - name: DEPENDENCY_JOBS value: "octavia-db-sync,octavia-ks-user,octavia-ks-endpoints" - name: DEPENDENCY_DAEMONSET diff --git a/kustomize/octavia/base/octavia-mariadb-database.yaml b/kustomize/octavia/base/octavia-mariadb-database.yaml index 66deeea2..9011233a 100644 --- a/kustomize/octavia/base/octavia-mariadb-database.yaml +++ b/kustomize/octavia/base/octavia-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: octavia-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "octavia" diff --git a/kustomize/placement/base/placement-mariadb-database.yaml b/kustomize/placement/base/placement-mariadb-database.yaml index ff6e1c31..8dd236d1 100644 --- a/kustomize/placement/base/placement-mariadb-database.yaml +++ b/kustomize/placement/base/placement-mariadb-database.yaml @@ -10,7 +10,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -26,7 +26,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: placement-db-password key: password @@ -44,7 +44,7 @@ metadata: helm.sh/resource-policy: keep spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "placement" diff --git a/kustomize/prometheus-mysql-exporter/monitoring_user_create.yaml b/kustomize/prometheus-mysql-exporter/monitoring_user_create.yaml index ee2af569..870e03ec 100644 --- a/kustomize/prometheus-mysql-exporter/monitoring_user_create.yaml +++ b/kustomize/prometheus-mysql-exporter/monitoring_user_create.yaml @@ -5,7 +5,7 @@ metadata: spec: name: monitoring mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: mariadb-monitoring key: password diff --git a/kustomize/prometheus-mysql-exporter/monitoring_user_grant.yaml b/kustomize/prometheus-mysql-exporter/monitoring_user_grant.yaml index f7e6aa88..8e1a5fc5 100644 --- a/kustomize/prometheus-mysql-exporter/monitoring_user_grant.yaml +++ b/kustomize/prometheus-mysql-exporter/monitoring_user_grant.yaml @@ -4,7 +4,7 @@ metadata: name: monitoring-grant spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "SELECT" - "PROCESS" diff --git a/kustomize/prometheus-mysql-exporter/values.yaml b/kustomize/prometheus-mysql-exporter/values.yaml index 65be7878..6c3f11f5 100644 --- a/kustomize/prometheus-mysql-exporter/values.yaml +++ b/kustomize/prometheus-mysql-exporter/values.yaml @@ -169,7 +169,7 @@ collectors: # mysql connection params which build the my.cnf config mysql: db: "" - host: "mariadb-galera" + host: "mariadb-cluster" # config my.cnf https://dev.mysql.com/doc/c-api/8.0/en/mysql-options.html additionalConfig: # - connect-timeout=5 diff --git a/kustomize/prometheus/alerting_rules.yaml b/kustomize/prometheus/alerting_rules.yaml index b7be5d46..8e9085a4 100644 --- a/kustomize/prometheus/alerting_rules.yaml +++ b/kustomize/prometheus/alerting_rules.yaml @@ -1,10 +1,10 @@ additionalPrometheusRulesMap: rabbitmq-alerts: groups: - - name: RabbitMQ Alerts + - name: Prometheus Alerts rules: - alert: RabbitQueueSizeTooLarge - expr: rabbitmq_queuesTotal>1600 + expr: rabbitmq_queuesTotal>25 for: 5m labels: severity: critical @@ -46,29 +46,3 @@ additionalPrometheusRulesMap: annotations: summary: MySQL restarted (instance {{ $labels.instance }}) description: "MySQL has just been restarted, less than one minute ago on {{ $labels.instance }}.\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" - blackbox-alerts: - groups: - - name: Blackbox Alerts - rules: - - alert: TLS certificate expiring - expr: (probe_ssl_earliest_cert_expiry - time())/86400 < 60 - labels: - severity: warning - annotations: - summary: "SSL certificate will expire soon on (instance {{ $labels.instance }})" - description: "SSL certificate expires within 60 days\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" - - alert: TLS certificate expiring - expr: (probe_ssl_earliest_cert_expiry - time())/86400 < 30 - labels: - severity: critical - annotations: - summary: "SSL certificate will expire soon on (instance {{ $labels.instance }})" - description: "SSL certificate expires within 30 days\n VALUE = {{ $value }}\n LABELS: {{ $labels }}" - - alert: Service Down - expr: probe_success == 0 - for: 2m - labels: - severity: critical - annotations: - summary: "Service probe has failed for more than two minutes on (instance {{ $labels.instance }})" - description: "Service probe has failed for more than two minutes \n LABELS: {{ $labels }}" diff --git a/kustomize/prometheus/values.yaml b/kustomize/prometheus/values.yaml index 18c97bfe..6e908e47 100644 --- a/kustomize/prometheus/values.yaml +++ b/kustomize/prometheus/values.yaml @@ -2016,16 +2016,9 @@ prometheus-node-exporter: ## jobLabel: node-exporter releaseLabel: true - extraHostVolumeMounts: - - name: text-file-collector - hostPath: /opt/node_exporter/textfile_collector - mountPath: /var/lib/node_exporter/textfile_collector - readOnly: true - mountPropagation: HostToContainer extraArgs: - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ - - --collector.textfile.directory=/var/lib/node_exporter/textfile_collector service: portName: http-metrics prometheus: diff --git a/kustomize/skyline/base/skyline-mariadb-database.yaml b/kustomize/skyline/base/skyline-mariadb-database.yaml index 37d5ded9..b0e2731c 100644 --- a/kustomize/skyline/base/skyline-mariadb-database.yaml +++ b/kustomize/skyline/base/skyline-mariadb-database.yaml @@ -8,7 +8,7 @@ spec: # If you want the database to be created with a different name than the resource name # name: data-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster characterSet: utf8 collate: utf8_general_ci retryInterval: 5s @@ -22,7 +22,7 @@ spec: # If you want the user to be created with a different name than the resource name # name: user-custom mariaDbRef: - name: mariadb-galera + name: mariadb-cluster passwordSecretKeyRef: name: skyline-apiserver-secrets key: db-password @@ -38,7 +38,7 @@ metadata: namespace: openstack spec: mariaDbRef: - name: mariadb-galera + name: mariadb-cluster privileges: - "ALL" database: "skyline"