diff --git a/helm-configs/designate/designate-helm-overrides.yaml b/helm-configs/designate/designate-helm-overrides.yaml new file mode 100644 index 00000000..b6744b41 --- /dev/null +++ b/helm-configs/designate/designate-helm-overrides.yaml @@ -0,0 +1,753 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default values for designate. +# This is a YAML-formatted file. +# Declare name/value pairs to be passed into your templates. +# name: value + +--- +release_group: null + +labels: + api: + node_selector_key: openstack-control-plane + node_selector_value: enabled + central: + node_selector_key: openstack-control-plane + node_selector_value: enabled + producer: + node_selector_key: openstack-control-plane + node_selector_value: enabled + worker: + node_selector_key: openstack-control-plane + node_selector_value: enabled + job: + node_selector_key: openstack-control-plane + node_selector_value: enabled + mdns: + node_selector_key: openstack-control-plane + node_selector_value: enabled + sink: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + tags: + bootstrap: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + db_init: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + db_drop: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + rabbit_init: docker.io/rabbitmq:3.7-management + ks_user: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + ks_service: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + ks_endpoints: docker.io/openstackhelm/heat:2023.1-ubuntu_jammy + dep_check: quay.io/airshipit/kubernetes-entrypoint:v1.0.0 + designate_db_sync: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_api: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_central: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_mdns: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_worker: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_producer: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + designate_sink: docker.io/openstackhelm/designate:2024.1-ubuntu_jammy + image_repo_sync: docker.io/docker:17.07.0 + pull_policy: "IfNotPresent" + local_registry: + active: false + exclude: + - dep_check + - image_repo_sync + +pod: + affinity: + anti: + type: + default: preferredDuringSchedulingIgnoredDuringExecution + topologyKey: + default: kubernetes.io/hostname + mounts: + designate_api: + init_container: null + designate_api: + volumeMounts: + volumes: + designate_central: + init_container: null + designate_central: + volumeMounts: + volumes: + designate_mdns: + init_container: null + designate_mdns: + volumeMounts: + volumes: + designate_worker: + init_container: null + designate_worker: + volumeMounts: + volumes: + designate_producer: + init_container: null + designate_producer: + volumeMounts: + volumes: + designate_sink: + init_container: null + designate_sink: + volumeMounts: + volumes: + designate_db_sync: + designate_db_sync: + volumeMounts: + volumes: + replicas: + api: 1 + central: 1 + mdns: 1 + producer: 1 + sink: 1 + worker: 1 + lifecycle: + upgrades: + deployments: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + disruption_budget: + api: + min_available: 0 + central: + min_available: 0 + mdns: + min_available: 0 + worker: + min_available: 0 + producer: + min_available: 0 + sink: + min_available: 0 + termination_grace_period: + api: + timeout: 30 + mdns: + timeout: 30 + + resources: + enabled: false + api: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + jobs: + bootstrap: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + db_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + db_sync: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_endpoints: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_service: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + ks_user: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + rabbit_init: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + tests: + requests: + memory: "128Mi" + cpu: "100m" + limits: + memory: "1024Mi" + cpu: "2000m" + +network: + api: + ingress: + public: true + classes: + namespace: "nginx" + cluster: "nginx-cluster" + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / + external_policy_local: false + node_port: + enabled: false + port: 9001 + mdns: + name: "designate-mdns" + proto: "http" + external_policy_local: false + node_port: + enabled: true + port: 5354 + +bootstrap: + enabled: false + script: | + openstack token issue + +dependencies: + dynamic: + common: + local_image_registry: + jobs: + - designate-image-repo-sync + services: + - endpoint: node + service: local_image_registry + job_rabbit_init: + api: + jobs: + - designate-rabbit-init + sink: + jobs: + - designate-rabbit-init + central: + jobs: + - designate-rabbit-init + worker: + jobs: + - designate-rabbit-init + static: + db_init: + services: + - service: oslo_db + endpoint: internal + db_sync: + jobs: + - designate-db-init + services: + - service: oslo_db + endpoint: internal + ks_user: + services: + - service: identity + endpoint: internal + ks_service: + services: + - service: identity + endpoint: internal + ks_endpoints: + jobs: + - designate-ks-service + services: + - service: identity + endpoint: internal + rabbit_init: + services: + - service: oslo_messaging + endpoint: internal + api: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + service: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: oslo_messaging + endpoint: internal + central: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + service: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: oslo_messaging + endpoint: internal + worker: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + - service: mdns + endpoint: internal + mdns: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + producer: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + sink: + jobs: + - designate-db-sync + - designate-ks-user + - designate-ks-endpoints + services: + - service: oslo_db + endpoint: internal + - service: identity + endpoint: internal + +conf: + pools: | + - name: default + # The name is immutable. There will be no option to change the name after + # creation and the only way will to change it will be to delete it + # (and all zones associated with it) and recreate it. + description: Default Pool + + attributes: {} + + # List out the NS records for zones hosted within this pool + # This should be a record that is created outside of designate, that + # points to the public IP of the controller node. + ns_records: + - hostname: {{ printf "ns.%s.svc.%s." .Release.Namespace .Values.endpoints.cluster_domain_suffix }} + priority: 1 + + # List out the nameservers for this pool. These are the actual DNS servers. + # We use these to verify changes have propagated to all nameservers. + nameservers: + - host: 127.0.0.1 # enter your own values ${POWERDNS_SERVICE_HOST} + port: {{ tuple "powerdns" "internal" "powerdns" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + + # List out the targets for this pool. For BIND there will be one + # entry for each BIND server, as we have to run rndc command on each server + targets: + - type: pdns4 + description: PowerDNS Server + + # List out the designate-mdns servers from which PowerDNS servers should + # request zone transfers (AXFRs) from. + # This should be the IP of the controller node. + # If you have multiple controllers you can add multiple masters + # by running designate-mdns on them, and adding them here. + masters: + - host: 127.0.0.1 # enter your own values ${MINIDNS_SERVICE_HOST} + port: {{ tuple "mdns" "internal" "ipc" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + + # PowerDNS Configuration options + options: + host: 127.0.0.1 # enter your own values ${POWERDNS_SERVICE_HOST} + port: {{ tuple "powerdns" "internal" "powerdns" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + api_endpoint: http://${POWERDNS_SERVICE_HOST}:{{ tuple "powerdns" "internal" "powerdns_api" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }} + api_token: {{ tuple "powerdns" "service" . | include "helm-toolkit.endpoints.endpoint_token_lookup" }} + paste: + composite:osapi_dns: + use: egg:Paste#urlmap + /: osapi_dns_versions + /v2: osapi_dns_v2 + /admin: osapi_dns_admin + composite:osapi_dns_versions: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions + keystone: http_proxy_to_wsgi cors maintenance faultwrapper osapi_dns_app_versions + app:osapi_dns_app_versions: + paste.app_factory: designate.api.versions:factory + composite:osapi_dns_v2: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 noauthcontext maintenance normalizeuri osapi_dns_app_v2 + keystone: http_proxy_to_wsgi cors request_id faultwrapper validation_API_v2 authtoken keystonecontext maintenance normalizeuri osapi_dns_app_v2 + app:osapi_dns_app_v2: + paste.app_factory: designate.api.v2:factory + composite:osapi_dns_admin: + use: call:designate.api.middleware:auth_pipeline_factory + noauth: http_proxy_to_wsgi cors request_id faultwrapper noauthcontext maintenance normalizeuri osapi_dns_app_admin + keystone: http_proxy_to_wsgi cors request_id faultwrapper authtoken keystonecontext maintenance normalizeuri osapi_dns_app_admin + app:osapi_dns_app_admin: + paste.app_factory: designate.api.admin:factory + filter:cors: + paste.filter_factory: oslo_middleware.cors:filter_factory + oslo_config_project: designate + filter:request_id: + paste.filter_factory: oslo_middleware:RequestId.factory + filter:http_proxy_to_wsgi: + paste.filter_factory: oslo_middleware:HTTPProxyToWSGI.factory + filter:noauthcontext: + paste.filter_factory: designate.api.middleware:NoAuthContextMiddleware.factory + filter:authtoken: + paste.filter_factory: keystonemiddleware.auth_token:filter_factory + filter:keystonecontext: + paste.filter_factory: designate.api.middleware:KeystoneContextMiddleware.factory + filter:maintenance: + paste.filter_factory: designate.api.middleware:MaintenanceMiddleware.factory + filter:normalizeuri: + paste.filter_factory: designate.api.middleware:NormalizeURIMiddleware.factory + filter:faultwrapper: + paste.filter_factory: designate.api.middleware:FaultWrapperMiddleware.factory + filter:validation_API_v2: + paste.filter_factory: designate.api.middleware:APIv2ValidationErrorMiddleware.factory + policy: {} + designate: + DEFAULT: + debug: false + log_config_append: /etc/designate/logging.conf + service:api: + auth_strategy: keystone + enable_api_v2: true + enable_api_admin: true + enabled_extensions_v2: quotas,reports + workers: 2 + service:worker: + enabled: true + notify: false + oslo_middleware: + enable_proxy_headers_parsing: true + oslo_policy: + policy_file: /etc/designate/policy.yaml + database: + max_retries: -1 + storage:sqlalchemy: + max_retries: -1 + keystone_authtoken: + service_token_roles: service + service_token_roles_required: true + auth_type: password + auth_version: v3 + memcache_security_strategy: ENCRYPT + service_type: dns + logging: + loggers: + keys: + - root + - designate + handlers: + keys: + - stdout + - stderr + - "null" + formatters: + keys: + - context + - default + logger_root: + level: WARNING + handlers: 'null' + logger_designate: + level: INFO + handlers: + - stdout + qualname: designate + logger_amqp: + level: WARNING + handlers: stderr + qualname: amqp + logger_amqplib: + level: WARNING + handlers: stderr + qualname: amqplib + logger_eventletwsgi: + level: WARNING + handlers: stderr + qualname: eventlet.wsgi.server + logger_sqlalchemy: + level: WARNING + handlers: stderr + qualname: sqlalchemy + logger_boto: + level: WARNING + handlers: stderr + qualname: boto + handler_null: + class: logging.NullHandler + formatter: default + args: () + handler_stdout: + class: StreamHandler + args: (sys.stdout,) + formatter: context + handler_stderr: + class: StreamHandler + args: (sys.stderr,) + formatter: context + formatter_context: + class: oslo_log.formatters.ContextFormatter + datefmt: "%Y-%m-%d %H:%M:%S" + formatter_default: + format: "%(message)s" + datefmt: "%Y-%m-%d %H:%M:%S" + +# Names of secrets used by bootstrap and environmental checks +secrets: + identity: + admin: designate-keystone-admin + designate: designate-keystone-user + test: designate-keystone-test + oslo_db: + admin: designate-db-admin + designate: designate-db-user + oslo_messaging: + admin: designate-rabbitmq-admin + designate: designate-rabbitmq-user + tls: + dns: + api: + public: designate-tls-public + oci_image_registry: + designate: designate-oci-image-registry + +endpoints: + cluster_domain_suffix: cluster.local + local_image_registry: + name: docker-registry + namespace: docker-registry + hosts: + default: localhost + internal: docker-registry + node: localhost + host_fqdn_override: + default: null + port: + registry: + node: 5000 + oci_image_registry: + name: oci-image-registry + namespace: oci-image-registry + auth: + enabled: false + designate: + username: designate + password: password + hosts: + default: localhost + host_fqdn_override: + default: null + port: + registry: + default: null + identity: + name: keystone + auth: + admin: + region_name: RegionOne + username: admin + password: password + project_name: admin + user_domain_name: default + project_domain_name: default + designate: + role: admin + region_name: RegionOne + username: designate + password: password + project_name: service + user_domain_name: service + project_domain_name: service + test: + role: admin + region_name: RegionOne + username: designate-test + password: password + project_name: test + user_domain_name: service + project_domain_name: service + hosts: + default: keystone + internal: keystone-api + host_fqdn_override: + default: null + path: + default: /v3 + scheme: + default: http + port: + api: + default: 80 + internal: 5000 + dns: + name: designate + hosts: + default: designate-api + public: designate + host_fqdn_override: + default: null + path: + default: / + scheme: + default: 'http' + port: + api: + default: 9001 + public: 80 + mdns: + name: minidns + hosts: + default: minidns + public: designate-mdns + host_fqdn_override: + default: null + path: + default: null + scheme: + default: 'tcp' + port: + ipc: + default: 5354 + oslo_db: + auth: + admin: + username: root + password: password + secret: + tls: + internal: mariadb-tls-direct + hosts: + default: mariadb-cluster-primary + host_fqdn_override: + default: null + path: /designate + scheme: mysql+pymysql + port: + mysql: + default: 3306 + oslo_cache: + hosts: + default: memcached + host_fqdn_override: + default: null + port: + memcache: + default: 11211 + auth: + # NOTE: this is used to define the value for keystone + # authtoken cache encryption key, if not set it will be populated + # automatically with a random value, but to take advantage of + # this feature all services should be set to use the same key, + # and memcache service. + memcache_secret_key: null + oslo_messaging: + auth: + admin: + username: rabbitmq + password: password + secret: + tls: + internal: rabbitmq-tls-direct + designate: + username: designate + password: password + statefulset: + replicas: 3 + name: rabbitmq-server + hosts: + default: rabbitmq-nodes + host_fqdn_override: + default: null + path: /designate + scheme: rabbit + port: + amqp: + default: 5672 + http: + default: 15672 + powerdns: + auth: + service: + token: chiave_segreta + hosts: + default: 8.8.8.8 + host_fqdn_override: + default: null + port: + powerdns_api: + default: 8081 + powerdns: + default: 53 + +manifests: + configmap_bin: true + configmap_etc: true + deployment_api: true + deployment_central: true + deployment_worker: true + deployment_producer: true + deployment_mdns: true + deployment_sink: false + ingress_api: true + job_bootstrap: true + job_db_init: true + job_db_sync: true + job_ks_endpoints: true + job_ks_service: true + job_ks_user: true + job_rabbit_init: false + pdb_api: true + pdb_producer: true + pdb_central: true + pdb_worker: true + pdb_mdns: true + pdb_sink: false + secret_db: true + secret_ingress_tls: true + secret_keystone: true + secret_rabbitmq: true + secret_registry: true + service_api: true + service_mdns: true + service_ingress_api: true +... diff --git a/kustomize/designate/aio/kustomization.yaml b/kustomize/designate/aio/kustomization.yaml new file mode 100644 index 00000000..64d1224c --- /dev/null +++ b/kustomize/designate/aio/kustomization.yaml @@ -0,0 +1,14 @@ +bases: + - ../base + +patches: + - target: + kind: HorizontalPodAutoscaler + name: designate-api + patch: |- + - op: replace + path: /spec/minReplicas + value: 1 + - op: replace + path: /spec/maxReplicas + value: 1 diff --git a/kustomize/designate/base/designate-mariadb-database.yaml b/kustomize/designate/base/designate-mariadb-database.yaml new file mode 100644 index 00000000..0151497e --- /dev/null +++ b/kustomize/designate/base/designate-mariadb-database.yaml @@ -0,0 +1,55 @@ +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Database +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + # If you want the database to be created with a different name than the resource name + # name: data-custom + mariaDbRef: + name: mariadb-cluster + characterSet: utf8 + collate: utf8_general_ci + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: User +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + # If you want the user to be created with a different name than the resource name + # name: user-custom + mariaDbRef: + name: mariadb-cluster + passwordSecretKeyRef: + name: designate-db-password + key: password + # This field is immutable and defaults to 10, 0 means unlimited. + maxUserConnections: 0 + host: "%" + retryInterval: 5s +--- +apiVersion: k8s.mariadb.com/v1alpha1 +kind: Grant +metadata: + name: designate-grant + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + mariaDbRef: + name: mariadb-cluster + privileges: + - "ALL" + database: "designate" + table: "*" + username: designate + grantOption: true + host: "%" + retryInterval: 5s diff --git a/kustomize/designate/base/designate-rabbitmq-queue.yaml b/kustomize/designate/base/designate-rabbitmq-queue.yaml new file mode 100644 index 00000000..a7113d66 --- /dev/null +++ b/kustomize/designate/base/designate-rabbitmq-queue.yaml @@ -0,0 +1,81 @@ +--- +apiVersion: rabbitmq.com/v1beta1 +kind: User +metadata: + name: designate + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + tags: + - management # available tags are 'management', 'policymaker', 'monitoring' and 'administrator' + - policymaker + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack + importCredentialsSecret: + name: designate-rabbitmq-password +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Vhost +metadata: + name: designate-vhost + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + name: "designate" # vhost name; required and cannot be updated + defaultQueueType: quorum # default queue type for this vhost; require RabbitMQ version 3.11.12 or above + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +# status: +# conditions: +# - lastTransitionTime: "" +# status: "True" # true, false, or unknown +# type: Ready +# Reason: "SuccessfulCreateOrUpdate" # status false result in reason FailedCreateOrUpdate +# Message: "" # set when status is false +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Queue +metadata: + name: designate-queue + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + name: designate-qq # name of the queue + vhost: "designate" # default to '/' if not provided + type: quorum # without providing a queue type, rabbitmq creates a classic queue + autoDelete: false + durable: true # seting 'durable' to false means this queue won't survive a server restart + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +--- +apiVersion: rabbitmq.com/v1beta1 +kind: Permission +metadata: + name: designate-permission + namespace: openstack + annotations: + helm.sh/resource-policy: keep +spec: + vhost: "designate" # name of a vhost + userReference: + name: "designate" # name of a user.rabbitmq.com in the same namespace; must specify either spec.userReference or spec.user + permissions: + write: ".*" + configure: ".*" + read: ".*" + rabbitmqClusterReference: + name: rabbitmq # rabbitmqCluster must exist in the same namespace as this resource + namespace: openstack +# status: +# conditions: +# - lastTransitionTime: "" +# status: "True" # true, false, or unknown +# type: Ready +# Reason: "SuccessfulCreateOrUpdate" # status false result in reason FailedCreateOrUpdate +# Message: "" # set when status is false diff --git a/kustomize/designate/base/hpa-designate-api.yaml b/kustomize/designate/base/hpa-designate-api.yaml new file mode 100644 index 00000000..4eca66f1 --- /dev/null +++ b/kustomize/designate/base/hpa-designate-api.yaml @@ -0,0 +1,19 @@ +apiVersion: autoscaling/v2 +kind: HorizontalPodAutoscaler +metadata: + name: designate-api + namespace: openstack +spec: + maxReplicas: 9 + minReplicas: 3 + metrics: + - resource: + name: cpu + target: + averageUtilization: 50 + type: Utilization + type: Resource + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: designate-api diff --git a/kustomize/designate/base/kustomization.yaml b/kustomize/designate/base/kustomization.yaml new file mode 100644 index 00000000..a67aeb43 --- /dev/null +++ b/kustomize/designate/base/kustomization.yaml @@ -0,0 +1,5 @@ +resources: + - designate-mariadb-database.yaml + - designate-rabbitmq-queue.yaml + - all.yaml + - hpa-designate-api.yaml diff --git a/kustomize/designate/letsencrypt/kustomization.yaml b/kustomize/designate/letsencrypt/kustomization.yaml new file mode 100644 index 00000000..5fb919f6 --- /dev/null +++ b/kustomize/designate/letsencrypt/kustomization.yaml @@ -0,0 +1,13 @@ +bases: + - ../base + +patches: + - target: + kind: Ingress + name: designate-namespace-fqdn + patch: |- + - op: add + path: /metadata/annotations + value: + cert-manager.io/cluster-issuer: letsencrypt-prod + acme.cert-manager.io/http01-edit-in-place: "true"