diff --git a/cmd/metrics_exporter/app/metrics_exporter.go b/cmd/metrics_exporter/app/metrics_exporter.go index 89314ced3..33b8334f3 100644 --- a/cmd/metrics_exporter/app/metrics_exporter.go +++ b/cmd/metrics_exporter/app/metrics_exporter.go @@ -46,6 +46,12 @@ var ( // chopConfigFile defines path to clickhouse-operator config file to be used chopConfigFile string + // kubeConfigFile defines path to kube config file to be used + kubeConfigFile string + + // masterURL defines URL of kubernetes master to be used + masterURL string + // metricsEP defines metrics end-point IP address metricsEP string @@ -55,6 +61,8 @@ var ( func init() { flag.BoolVar(&versionRequest, "version", false, "Display clickhouse-operator version and exit") flag.StringVar(&chopConfigFile, "config", "", "Path to clickhouse-operator config file.") + flag.StringVar(&kubeConfigFile, "kubeconfig", "", "Path to custom kubernetes config file. Makes sense if runs outside of the cluster only.") + flag.StringVar(&masterURL, "master", "", "The address of custom Kubernetes API server. Makes sense if runs outside of the cluster and not being specified in kube config file only.") flag.StringVar(&metricsEP, "metrics-endpoint", defaultMetricsEndpoint, "The Prometheus exporter endpoint.") flag.StringVar(&chiListEP, "chi-list-endpoint", defaultChiListEP, "The CHI list endpoint.") flag.Parse() @@ -78,18 +86,15 @@ func Run() { os.Exit(1) }() - // - // Create operator instance - // - chop := chop.NewCHOp(version.Version, nil, chopConfigFile) - if err := chop.Init(); err != nil { - glog.Fatalf("Unable to init CHOp instance %v\n", err) - os.Exit(1) - } + glog.V(1).Infof("Starting metrics exporter. Version:%s GitSHA:%s\n", version.Version, version.GitSHA) + + // Initialize k8s API clients + _, chopClient := chop.GetClientset(kubeConfigFile, masterURL) - glog.V(1).Info("Starting metrics exporter\n") + // Create operator instance + chop := chop.GetCHOp(chopClient, chopConfigFile) - metrics.StartMetricsREST( + exporter := metrics.StartMetricsREST( metrics.NewCHAccessInfo( chop.Config().CHUsername, chop.Config().CHPassword, @@ -103,5 +108,7 @@ func Run() { chiListPath, ) + exporter.DiscoveryWatchedCHIs(chop, chopClient) + <-ctx.Done() } diff --git a/cmd/operator/app/clickhouse_operator.go b/cmd/operator/app/clickhouse_operator.go index ce4fed32d..9a3f72493 100644 --- a/cmd/operator/app/clickhouse_operator.go +++ b/cmd/operator/app/clickhouse_operator.go @@ -20,8 +20,6 @@ import ( "fmt" "os" "os/signal" - "os/user" - "path/filepath" "sync" "syscall" "time" @@ -30,15 +28,10 @@ import ( "github.com/altinity/clickhouse-operator/pkg/controller/chi" "github.com/altinity/clickhouse-operator/pkg/version" - chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" chopinformers "github.com/altinity/clickhouse-operator/pkg/client/informers/externalversions" - kubeinformers "k8s.io/client-go/informers" - kube "k8s.io/client-go/kubernetes" - kuberest "k8s.io/client-go/rest" - kubeclientcmd "k8s.io/client-go/tools/clientcmd" - "github.com/golang/glog" + kubeinformers "k8s.io/client-go/informers" ) // Prometheus exporter defaults @@ -89,60 +82,12 @@ func init() { flag.BoolVar(&versionRequest, "version", false, "Display clickhouse-operator version and exit") flag.BoolVar(&debugRequest, "debug", false, "Debug run") flag.StringVar(&chopConfigFile, "config", "", "Path to clickhouse-operator config file.") - flag.StringVar(&kubeConfigFile, "kubeconfig", "", "Path to kubernetes config file. Only required if called outside of the cluster.") - flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Only required if called outside of the cluster and not being specified in kube config file.") + flag.StringVar(&kubeConfigFile, "kubeconfig", "", "Path to custom kubernetes config file. Makes sense if runs outside of the cluster only.") + flag.StringVar(&masterURL, "master", "", "The address of custom Kubernetes API server. Makes sense if runs outside of the cluster and not being specified in kube config file only.") flag.StringVar(&metricsEP, "metrics-endpoint", defaultMetricsEndpoint, "The Prometheus exporter endpoint.") flag.Parse() } -// getKubeConfig creates kuberest.Config object based on current environment -func getKubeConfig(kubeConfigFile, masterURL string) (*kuberest.Config, error) { - if len(kubeConfigFile) > 0 { - // kube config file specified as CLI flag - return kubeclientcmd.BuildConfigFromFlags(masterURL, kubeConfigFile) - } - - if len(os.Getenv("KUBECONFIG")) > 0 { - // kube config file specified as ENV var - return kubeclientcmd.BuildConfigFromFlags(masterURL, os.Getenv("KUBECONFIG")) - } - - if conf, err := kuberest.InClusterConfig(); err == nil { - // in-cluster configuration found - return conf, nil - } - - usr, err := user.Current() - if err != nil { - return nil, fmt.Errorf("user not found") - } - - // OS user found. Parse ~/.kube/config file - conf, err := kubeclientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")) - if err != nil { - return nil, fmt.Errorf("~/.kube/config not found") - } - - // ~/.kube/config found - return conf, nil -} - -// createClientsets creates Clientset objects -func createClientsets(config *kuberest.Config) (*kube.Clientset, *chopclientset.Clientset) { - - kubeClientset, err := kube.NewForConfig(config) - if err != nil { - glog.Fatalf("Unable to initialize kubernetes API clientset: %s", err.Error()) - } - - chopClientset, err := chopclientset.NewForConfig(config) - if err != nil { - glog.Fatalf("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) - } - - return kubeClientset, chopClientset -} - // Run is an entry point of the application func Run() { if versionRequest { @@ -157,28 +102,13 @@ func Run() { glog.V(1).Infof("Starting clickhouse-operator. Version:%s GitSHA:%s\n", version.Version, version.GitSHA) - // // Initialize k8s API clients - // - kubeConfig, err := getKubeConfig(kubeConfigFile, masterURL) - if err != nil { - glog.Fatalf("Unable to build kubeconf: %s", err.Error()) - os.Exit(1) - } - kubeClient, chopClient := createClientsets(kubeConfig) + kubeClient, chopClient := chop.GetClientset(kubeConfigFile, masterURL) - // // Create operator instance - // - chop := chop.NewCHOp(version.Version, chopClient, chopConfigFile) - if err := chop.Init(); err != nil { - glog.Fatalf("Unable to init CHOP instance %v\n", err) - os.Exit(1) - } + chop := chop.GetCHOp(chopClient, chopConfigFile) - // // Create Informers - // kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions( kubeClient, kubeInformerFactoryResyncPeriod, @@ -190,9 +120,7 @@ func Run() { chopinformers.WithNamespace(chop.Config().GetInformerNamespace()), ) - // // Create Controller - // chiController := chi.NewController( chop, chopClient, diff --git a/config/config-dev.yaml b/config/config-dev.yaml index fdb4f5336..757e07241 100644 --- a/config/config-dev.yaml +++ b/config/config-dev.yaml @@ -90,7 +90,7 @@ ChConfigNetworksHostRegexpTemplate: "chi-{chi}-[^.]+\\d+-\\d+\\.{namespace}.svc. # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE -# User with such credentials credentials can be specified in additional ClickHouse .xml config files, +# User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password diff --git a/config/config.yaml b/config/config.yaml index 160026150..acf22eea2 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -6,8 +6,9 @@ # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces -# watchNamespaces: +#watchNamespaces: # - dev +# - test # - info # - onemore @@ -89,7 +90,7 @@ ChConfigNetworksHostRegexpTemplate: "chi-{chi}-[^.]+\\d+-\\d+\\.{namespace}.svc. # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE -# User with such credentials credentials can be specified in additional ClickHouse .xml config files, +# User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password diff --git a/deploy/dev/clickhouse-operator-install-dev.yaml b/deploy/dev/clickhouse-operator-install-dev.yaml index 03e53cbd6..ef2468ebf 100644 --- a/deploy/dev/clickhouse-operator-install-dev.yaml +++ b/deploy/dev/clickhouse-operator-install-dev.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# dev -# altinity/clickhouse-operator:0.9.0 -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -1288,7 +1281,7 @@ subjects: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1308,8 +1301,9 @@ data: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces - # watchNamespaces: + #watchNamespaces: # - dev + # - test # - info # - onemore @@ -1391,7 +1385,7 @@ data: # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE - # User with such credentials credentials can be specified in additional ClickHouse .xml config files, + # User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password @@ -1401,7 +1395,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1416,7 +1410,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1462,7 +1456,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1561,7 +1555,7 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1610,8 +1604,8 @@ data: # Possible Template Parameters: # # dev -# altinity/clickhouse-operator:0.9.0 -# altinity/metrics-exporter:0.9.0 +# altinity/clickhouse-operator:0.9.1 +# altinity/metrics-exporter:0.9.1 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1651,7 +1645,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.0 + image: altinity/clickhouse-operator:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1716,7 +1710,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.0 + image: altinity/metrics-exporter:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/dev/clickhouse-operator-install-yaml-template-01-section-crd-01-chi.yaml b/deploy/dev/clickhouse-operator-install-yaml-template-01-section-crd-01-chi.yaml index 4a2018429..ce50ffdd9 100644 --- a/deploy/dev/clickhouse-operator-install-yaml-template-01-section-crd-01-chi.yaml +++ b/deploy/dev/clickhouse-operator-install-yaml-template-01-section-crd-01-chi.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# ${OPERATOR_NAMESPACE} -# ${OPERATOR_IMAGE} -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: diff --git a/deploy/operator/clickhouse-operator-install-crd.yaml b/deploy/operator/clickhouse-operator-install-crd.yaml index 2a483fa58..3083effb4 100644 --- a/deploy/operator/clickhouse-operator-install-crd.yaml +++ b/deploy/operator/clickhouse-operator-install-crd.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# - -# altinity/clickhouse-operator:0.9.0 -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: diff --git a/deploy/operator/clickhouse-operator-install-deployment.yaml b/deploy/operator/clickhouse-operator-install-deployment.yaml index e2690a988..76e36b670 100644 --- a/deploy/operator/clickhouse-operator-install-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-deployment.yaml @@ -1,7 +1,7 @@ # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-files # apiVersion: v1 @@ -20,8 +20,9 @@ data: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces - # watchNamespaces: + #watchNamespaces: # - dev + # - test # - info # - onemore @@ -103,7 +104,7 @@ data: # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE - # User with such credentials credentials can be specified in additional ClickHouse .xml config files, + # User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password @@ -113,7 +114,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -127,7 +128,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -172,7 +173,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -270,7 +271,7 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -318,8 +319,8 @@ data: # Possible Template Parameters: # # - -# altinity/clickhouse-operator:0.9.0 -# altinity/metrics-exporter:0.9.0 +# altinity/clickhouse-operator:0.9.1 +# altinity/metrics-exporter:0.9.1 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -358,7 +359,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.0 + image: altinity/clickhouse-operator:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -423,7 +424,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.0 + image: altinity/metrics-exporter:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/deploy/operator/clickhouse-operator-install-template-crd.yaml b/deploy/operator/clickhouse-operator-install-template-crd.yaml index 8f3b712f3..3083effb4 100644 --- a/deploy/operator/clickhouse-operator-install-template-crd.yaml +++ b/deploy/operator/clickhouse-operator-install-template-crd.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# $OPERATOR_NAMESPACE -# $OPERATOR_IMAGE -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: diff --git a/deploy/operator/clickhouse-operator-install-template-deployment.yaml b/deploy/operator/clickhouse-operator-install-template-deployment.yaml index b01f203c6..06831214a 100644 --- a/deploy/operator/clickhouse-operator-install-template-deployment.yaml +++ b/deploy/operator/clickhouse-operator-install-template-deployment.yaml @@ -21,8 +21,9 @@ data: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces - # watchNamespaces: + #watchNamespaces: # - dev + # - test # - info # - onemore @@ -104,7 +105,7 @@ data: # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE - # User with such credentials credentials can be specified in additional ClickHouse .xml config files, + # User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password diff --git a/deploy/operator/clickhouse-operator-install-template.yaml b/deploy/operator/clickhouse-operator-install-template.yaml index 9f0232b13..9ba01e941 100644 --- a/deploy/operator/clickhouse-operator-install-template.yaml +++ b/deploy/operator/clickhouse-operator-install-template.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# $OPERATOR_NAMESPACE -# $OPERATOR_IMAGE -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -1308,8 +1301,9 @@ data: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces - # watchNamespaces: + #watchNamespaces: # - dev + # - test # - info # - onemore @@ -1391,7 +1385,7 @@ data: # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE - # User with such credentials credentials can be specified in additional ClickHouse .xml config files, + # User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password diff --git a/deploy/operator/clickhouse-operator-install.yaml b/deploy/operator/clickhouse-operator-install.yaml index 949169044..28b64991d 100644 --- a/deploy/operator/clickhouse-operator-install.yaml +++ b/deploy/operator/clickhouse-operator-install.yaml @@ -1,10 +1,3 @@ -# Possible Template Parameters: -# -# kube-system -# altinity/clickhouse-operator:0.9.0 -# -# Setup CustomResourceDefinition(s) -# CustomResourceDefinition is namespace-less and must have unique name apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: @@ -1288,7 +1281,7 @@ subjects: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-files # apiVersion: v1 @@ -1308,8 +1301,9 @@ data: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces - # watchNamespaces: + #watchNamespaces: # - dev + # - test # - info # - onemore @@ -1391,7 +1385,7 @@ data: # 1. Metrics requests # 2. Schema maintenance # 3. DROP DNS CACHE - # User with such credentials credentials can be specified in additional ClickHouse .xml config files, + # User with such credentials can be specified in additional ClickHouse .xml config files, # located in `chUsersConfigsPath` folder chUsername: clickhouse_operator chPassword: clickhouse_operator_password @@ -1401,7 +1395,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-confd-files # apiVersion: v1 @@ -1416,7 +1410,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-configd-files # apiVersion: v1 @@ -1462,7 +1456,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-templatesd-files # apiVersion: v1 @@ -1561,7 +1555,7 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 +# altinity/clickhouse-operator:0.9.1 # etc-clickhouse-operator-usersd-files # apiVersion: v1 @@ -1610,8 +1604,8 @@ data: # Possible Template Parameters: # # kube-system -# altinity/clickhouse-operator:0.9.0 -# altinity/metrics-exporter:0.9.0 +# altinity/clickhouse-operator:0.9.1 +# altinity/metrics-exporter:0.9.1 # # Setup Deployment for clickhouse-operator # Deployment would be created in kubectl-specified namespace @@ -1651,7 +1645,7 @@ spec: name: etc-clickhouse-operator-usersd-files containers: - name: clickhouse-operator - image: altinity/clickhouse-operator:0.9.0 + image: altinity/clickhouse-operator:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder @@ -1716,7 +1710,7 @@ spec: resource: limits.memory - name: metrics-exporter - image: altinity/metrics-exporter:0.9.0 + image: altinity/metrics-exporter:0.9.1 imagePullPolicy: Always volumeMounts: - name: etc-clickhouse-operator-folder diff --git a/dev/go_build_config.sh b/dev/go_build_config.sh index 9d3e4fed4..7f344687a 100755 --- a/dev/go_build_config.sh +++ b/dev/go_build_config.sh @@ -18,3 +18,5 @@ OPERATOR_BIN="${OPERATOR_BIN:-${SRC_ROOT}/dev/bin/clickhouse-operator}" # Metrics exporter binary name can be specified externally # Default - put 'metrics-exporter' into cur dir METRICS_EXPORTER_BIN="${METRICS_EXPORTER_BIN:-${SRC_ROOT}/dev/bin/metrics-exporter}" + +MODULES_DIR=vendor diff --git a/dev/go_build_metrics_exporter.sh b/dev/go_build_metrics_exporter.sh index d3b40fc73..9764300db 100755 --- a/dev/go_build_metrics_exporter.sh +++ b/dev/go_build_metrics_exporter.sh @@ -11,7 +11,6 @@ source "${CUR_DIR}/go_build_config.sh" "${MANIFESTS_ROOT}/operator/build-clickhouse-operator-install-yaml.sh" # Prepare modules -MODULES_DIR=vendor GO111MODULE=on go mod tidy GO111MODULE=on go mod "${MODULES_DIR}" diff --git a/dev/go_build_operator.sh b/dev/go_build_operator.sh index b43e24ee0..cc009c4fa 100755 --- a/dev/go_build_operator.sh +++ b/dev/go_build_operator.sh @@ -11,7 +11,6 @@ source "${CUR_DIR}/go_build_config.sh" "${MANIFESTS_ROOT}/operator/build-clickhouse-operator-install-yaml.sh" # Prepare modules -MODULES_DIR=vendor GO111MODULE=on go mod tidy GO111MODULE=on go mod "${MODULES_DIR}" diff --git a/dev/image_build_metrics_exporter_dev.sh b/dev/image_build_metrics_exporter_dev.sh index 9f83dd841..ed91cd9cf 100755 --- a/dev/image_build_metrics_exporter_dev.sh +++ b/dev/image_build_metrics_exporter_dev.sh @@ -8,7 +8,7 @@ source "${CUR_DIR}/go_build_config.sh" # Externally configurable build-dependent options TAG="${TAG:-sunsingerus/metrics-exporter:dev}" -DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN:-sunsingerus}" +DOCKERHUB_LOGIN="${DOCKERHUB_LOGIN}" DOCKERHUB_PUBLISH="${DOCKERHUB_PUBLISH:-yes}" MINIKUBE="${MINIKUBE:-no}" diff --git a/dev/update-codegen.sh b/dev/update-codegen.sh index 7e6c13e01..736e77201 100755 --- a/dev/update-codegen.sh +++ b/dev/update-codegen.sh @@ -7,14 +7,17 @@ set -o nounset # Only exit with zero if all commands of the pipeline exit successfully set -o pipefail -PROJECT_ROOT=$(realpath "$(dirname "${BASH_SOURCE}")/..") -CODEGEN_PKG=$(realpath "${CODEGEN_PKG:-$(cd "${PROJECT_ROOT}"; ls -d -1 "${PROJECT_ROOT}/vendor/k8s.io/code-generator" 2>/dev/null || echo "${GOPATH}/src/k8s.io/code-generator")}") +# Source configuration +CUR_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" +source "${CUR_DIR}/go_build_config.sh" + +CODEGEN_PKG=$(realpath "${CODEGEN_PKG:-$(cd "${SRC_ROOT}"; ls -d -1 "${SRC_ROOT}/${MODULES_DIR}/k8s.io/code-generator" 2>/dev/null || echo "${GOPATH}/src/k8s.io/code-generator")}") #echo "Generating code with the following options:" #echo "PROJECT_ROOT=${PROJECT_ROOT}" #echo "CODEGEN_PKG==${CODEGEN_PKG}" -"${PROJECT_ROOT}/vendor/k8s.io/code-generator/generate-groups.sh" \ +bash "${CODEGEN_PKG}/generate-groups.sh" \ all \ github.com/altinity/clickhouse-operator/pkg/client \ github.com/altinity/clickhouse-operator/pkg/apis \ diff --git a/docs/chi-examples/00-custom-ports.yaml b/docs/chi-examples/02-templates-01-custom-ports.yaml similarity index 95% rename from docs/chi-examples/00-custom-ports.yaml rename to docs/chi-examples/02-templates-01-custom-ports.yaml index 113021f02..1469dbaa4 100644 --- a/docs/chi-examples/00-custom-ports.yaml +++ b/docs/chi-examples/02-templates-01-custom-ports.yaml @@ -1,11 +1,16 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: clickhouse-custom-ports + name: "template-custom-ports" spec: defaults: templates: podTemplate: default + configuration: + settings: + http_port: 8124 + tcp_port: 9001 + interserver_http_port: 9010 templates: podTemplates: - name: default @@ -20,9 +25,3 @@ spec: containerPort: 9001 - name: interserver containerPort: 9010 - configuration: - settings: - http_port: 8124 - tcp_port: 9001 - interserver_http_port: 9010 - diff --git a/docs/chi-examples/03-templates-01-pod-resources-limit.yaml b/docs/chi-examples/02-templates-02-pod-resources-limit.yaml similarity index 98% rename from docs/chi-examples/03-templates-01-pod-resources-limit.yaml rename to docs/chi-examples/02-templates-02-pod-resources-limit.yaml index cd6aafe8a..8e1f9fd5a 100644 --- a/docs/chi-examples/03-templates-01-pod-resources-limit.yaml +++ b/docs/chi-examples/02-templates-02-pod-resources-limit.yaml @@ -1,7 +1,7 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "template-01" + name: "template-pod-res" spec: configuration: clusters: diff --git a/docs/chi-examples/03-templates-02-override.yaml b/docs/chi-examples/02-templates-03-override.yaml similarity index 100% rename from docs/chi-examples/03-templates-02-override.yaml rename to docs/chi-examples/02-templates-03-override.yaml diff --git a/docs/chi-examples/03-templates-03-useTemplates-all.yaml b/docs/chi-examples/02-templates-04-useTemplates-all.yaml similarity index 100% rename from docs/chi-examples/03-templates-03-useTemplates-all.yaml rename to docs/chi-examples/02-templates-04-useTemplates-all.yaml diff --git a/docs/chi-examples/02-persistent-volume-01-default-volume.yaml b/docs/chi-examples/03-persistent-volume-01-default-volume.yaml similarity index 100% rename from docs/chi-examples/02-persistent-volume-01-default-volume.yaml rename to docs/chi-examples/03-persistent-volume-01-default-volume.yaml diff --git a/docs/chi-examples/02-persistent-volume-02-pod-template.yaml b/docs/chi-examples/03-persistent-volume-02-pod-template.yaml similarity index 95% rename from docs/chi-examples/02-persistent-volume-02-pod-template.yaml rename to docs/chi-examples/03-persistent-volume-02-pod-template.yaml index 1c7efffc4..3fb5798de 100644 --- a/docs/chi-examples/02-persistent-volume-02-pod-template.yaml +++ b/docs/chi-examples/03-persistent-volume-02-pod-template.yaml @@ -10,8 +10,8 @@ spec: templates: podTemplate: pod-template-with-volumes layout: - shardsCount: 1 - replicasCount: 1 + shardsCount: 2 + replicasCount: 2 templates: podTemplates: diff --git a/docs/chi-examples/02-persistent-volume-03-encrypted-volume.yaml b/docs/chi-examples/03-persistent-volume-03-encrypted-volume.yaml similarity index 100% rename from docs/chi-examples/02-persistent-volume-03-encrypted-volume.yaml rename to docs/chi-examples/03-persistent-volume-03-encrypted-volume.yaml diff --git a/docs/chi-examples/06-advanced-layout-01-shards.yaml b/docs/chi-examples/06-advanced-layout-01-shards.yaml index b04385380..98b518ac0 100644 --- a/docs/chi-examples/06-advanced-layout-01-shards.yaml +++ b/docs/chi-examples/06-advanced-layout-01-shards.yaml @@ -1,7 +1,7 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "adv-layout-01" + name: "adv-layout-shard" spec: configuration: clusters: diff --git a/docs/chi-examples/06-advanced-layout-02-replicas.yaml b/docs/chi-examples/06-advanced-layout-02-replicas.yaml index e5a52f4d3..effa281cf 100644 --- a/docs/chi-examples/06-advanced-layout-02-replicas.yaml +++ b/docs/chi-examples/06-advanced-layout-02-replicas.yaml @@ -1,7 +1,7 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "adv-layout-02" + name: "adv-layout-repl" spec: configuration: clusters: @@ -11,10 +11,19 @@ spec: replicas: - templates: podTemplate: clickhouse:19.1.10 + httpPort: 8000 + tcpPort: 8001 + interserverHTTPPort: 8002 - templates: podTemplate: clickhouse:19.3.5 + httpPort: 9000 + tcpPort: 9001 + interserverHTTPPort: 9002 - templates: podTemplate: clickhouse:19.3.7 + httpPort: 10000 + tcpPort: 10001 + interserverHTTPPort: 10002 templates: podTemplates: diff --git a/docs/chi-examples/06-advanced-layout-03-multiple-clusters.yaml b/docs/chi-examples/06-advanced-layout-03-multiple-clusters.yaml index 576b07911..fd2b98b1b 100644 --- a/docs/chi-examples/06-advanced-layout-03-multiple-clusters.yaml +++ b/docs/chi-examples/06-advanced-layout-03-multiple-clusters.yaml @@ -1,7 +1,7 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "adv-layout-03" + name: "adv-layout-mul" spec: configuration: clusters: diff --git a/docs/chi-examples/13-distribution-01-3x3-simple.yaml b/docs/chi-examples/13-distribution-01-3x3-simple.yaml new file mode 100644 index 000000000..83a995a75 --- /dev/null +++ b/docs/chi-examples/13-distribution-01-3x3-simple.yaml @@ -0,0 +1,23 @@ +apiVersion: "clickhouse.altinity.com/v1" +kind: "ClickHouseInstallation" +metadata: + name: "dist_simple" +spec: + defaults: + templates: + podTemplate: dist-template + configuration: + clusters: + - name: "dist_simple" + layout: + shardsCount: 3 + replicasCount: 3 + templates: + podTemplates: + - name: dist-template + podDistribution: + - type: CircularReplication + spec: + containers: + - name: clickhouse + image: yandex/clickhouse-server:19.16 diff --git a/docs/chi-examples/13-distribution-01-3x3.yaml b/docs/chi-examples/13-distribution-02-3x3-detailed.yaml similarity index 91% rename from docs/chi-examples/13-distribution-01-3x3.yaml rename to docs/chi-examples/13-distribution-02-3x3-detailed.yaml index cdd449c8b..028e4ea18 100644 --- a/docs/chi-examples/13-distribution-01-3x3.yaml +++ b/docs/chi-examples/13-distribution-02-3x3-detailed.yaml @@ -1,14 +1,14 @@ apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallation" metadata: - name: "dist3x3" + name: "dist_exp" spec: defaults: templates: podTemplate: dist-template configuration: clusters: - - name: "dist3x3" + - name: "dist_exp" layout: shardsCount: 3 replicasCount: 3 @@ -25,7 +25,6 @@ spec: - type: ClusterAffinity - type: PreviousTailAffinity spec: - #hostNetwork: true containers: - name: clickhouse image: yandex/clickhouse-server:19.16 diff --git a/docs/quick_start.md b/docs/quick_start.md index 7473aa496..05db649ed 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -184,7 +184,7 @@ Connected to ClickHouse server version 19.4.3 revision 54416. ## Simple Persistent Volume Example In case of having Dynamic Volume Provisioning available - ex.: running on AWS - we are able to use PersistentVolumeClaims -Manifest is [available in examples](./chi-examples/02-persistent-volume-01-default-volume.yaml) +Manifest is [available in examples](./chi-examples/03-persistent-volume-01-default-volume.yaml) ```yaml apiVersion: "clickhouse.altinity.com/v1" @@ -227,7 +227,7 @@ Let's install more complex example with: 1. Pod template 1. VolumeClaim template -Manifest is [available in examples](./chi-examples/02-persistent-volume-02-pod-template.yaml) +Manifest is [available in examples](./chi-examples/03-persistent-volume-02-pod-template.yaml) ```yaml apiVersion: "clickhouse.altinity.com/v1" diff --git a/docs/storage.md b/docs/storage.md index 0b0b66eef..e1bd74946 100644 --- a/docs/storage.md +++ b/docs/storage.md @@ -1,8 +1,8 @@ # Storage Examples are available in [examples](./chi-examples) folder: -1. [Simple Default Persistent Volume](./chi-examples/02-persistent-volume-01-default-volume.yaml) -1. [Pod Template with Persistent Volume](./chi-examples/02-persistent-volume-02-pod-template.yaml) +1. [Simple Default Persistent Volume](./chi-examples/03-persistent-volume-01-default-volume.yaml) +1. [Pod Template with Persistent Volume](./chi-examples/03-persistent-volume-02-pod-template.yaml) 1. AWS-based cluster with data replication and Persistent Volumes [minimal](./chi-examples/04-replication-zookeeper-03-minimal-AWS-persistent-volume.yaml) and [medium](./chi-examples/04-replication-zookeeper-04-medium-AWS-persistent-volume.yaml) Zookeeper installations diff --git a/go.mod b/go.mod index 690bd402a..170a4dc0b 100644 --- a/go.mod +++ b/go.mod @@ -34,6 +34,5 @@ replace ( k8s.io/api => k8s.io/api v0.0.0-20190226173710-145d52631d00 k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc k8s.io/client-go => k8s.io/client-go v0.0.0-20190226174127-78295b709ec6 - k8s.io/code-generator => k8s.io/code-generator v0.0.0-20181128191024-b1289fc74931 k8s.io/gengo => k8s.io/gengo v0.0.0-20190308184658-b90029ef6cd8 ) diff --git a/go.sum b/go.sum index 2c167de92..6659aabb5 100644 --- a/go.sum +++ b/go.sum @@ -115,6 +115,7 @@ github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a h1:2v4Ipjxa3sh+xn6Gvtg github.com/r3labs/diff v0.0.0-20191120142937-b4ed99a31f5a/go.mod h1:ozniNEFS3j1qCwHKdvraMn1WJOsUxHd7lYfukEIS4cs= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff h1:VARhShG49tiji6mdRNp7JTNDtJ0FhuprF93GBQ37xGU= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= diff --git a/hack/tools.go b/hack/tools.go new file mode 100644 index 000000000..06f483793 --- /dev/null +++ b/hack/tools.go @@ -0,0 +1,6 @@ +// +build tools + +// This package imports things required by build scripts, to force `go mod` to see them as dependencies +package tools + +//import _ "k8s.io/code-generator" diff --git a/pkg/apis/clickhouse.altinity.com/v1/env_vars.go b/pkg/apis/clickhouse.altinity.com/v1/env_vars.go new file mode 100644 index 000000000..a4836615e --- /dev/null +++ b/pkg/apis/clickhouse.altinity.com/v1/env_vars.go @@ -0,0 +1,48 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package,register +// +groupName=clickhouse.altinity.com + +// Package v1 defines version 1 of the API used with ClickHouse Installation Custom Resources. +package v1 + +const ( + // spec.nodeName: ip-172-20-52-62.ec2.internal + OPERATOR_POD_NODE_NAME = "OPERATOR_POD_NODE_NAME" + // metadata.name: clickhouse-operator-6f87589dbb-ftcsf + OPERATOR_POD_NAME = "OPERATOR_POD_NAME" + // metadata.namespace: kube-system + OPERATOR_POD_NAMESPACE = "OPERATOR_POD_NAMESPACE" + // status.podIP: 100.96.3.2 + OPERATOR_POD_IP = "OPERATOR_POD_IP" + // spec.serviceAccount: clickhouse-operator + // spec.serviceAccountName: clickhouse-operator + OPERATOR_POD_SERVICE_ACCOUNT = "OPERATOR_POD_SERVICE_ACCOUNT" + + // .containers.resources.requests.cpu + OPERATOR_CONTAINER_CPU_REQUEST = "OPERATOR_CONTAINER_CPU_REQUEST" + // .containers.resources.limits.cpu + OPERATOR_CONTAINER_CPU_LIMIT = "OPERATOR_CONTAINER_CPU_LIMIT" + // .containers.resources.requests.memory + OPERATOR_CONTAINER_MEM_REQUEST = "OPERATOR_CONTAINER_MEM_REQUEST" + // .containers.resources.limits.memory + OPERATOR_CONTAINER_MEM_LIMIT = "OPERATOR_CONTAINER_MEM_LIMIT" + + // What namespaces to watch + WATCH_NAMESPACE = "WATCH_NAMESPACE" + WATCH_NAMESPACES = "WATCH_NAMESPACES" + + CHOP_CONFIG = "CHOP_CONFIG" +) diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go index c87246d67..eae9f0417 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_chi.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_chi.go @@ -20,7 +20,7 @@ import ( ) // fillStatus fills .Status -func (chi *ClickHouseInstallation) FillStatus(endpoint string, pods []string) { +func (chi *ClickHouseInstallation) FillStatus(endpoint string, pods, fqdns []string) { chi.Status.Version = version.Version chi.Status.ClustersCount = chi.ClustersCount() chi.Status.ShardsCount = chi.ShardsCount() @@ -29,6 +29,7 @@ func (chi *ClickHouseInstallation) FillStatus(endpoint string, pods []string) { chi.Status.DeleteHostsCount = 0 chi.Status.DeletedHostsCount = 0 chi.Status.Pods = pods + chi.Status.FQDNs = fqdns chi.Status.Endpoint = endpoint } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_config.go b/pkg/apis/clickhouse.altinity.com/v1/type_config.go index d822a5df3..8a0788e05 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_config.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_config.go @@ -66,43 +66,43 @@ func (config *OperatorConfig) MergeFrom(from *OperatorConfig, _type MergeType) { } } -// readChiTemplates build OperatorConfig.CHITemplate from template files content -func (config *OperatorConfig) readChiTemplates() { +// readCHITemplates build OperatorConfig.CHITemplate from template files content +func (config *OperatorConfig) readCHITemplates() { // Read CHI template files - config.CHITemplateFiles = readConfigFiles(config.CHITemplatesPath, config.isChiTemplateExt) + config.CHITemplateFiles = readConfigFiles(config.CHITemplatesPath, config.isCHITemplateExt) // Produce map of CHI templates out of CHI template files for filename := range config.CHITemplateFiles { template := new(ClickHouseInstallation) if err := yaml.Unmarshal([]byte(config.CHITemplateFiles[filename]), template); err != nil { // Unable to unmarshal - skip incorrect template - glog.V(1).Infof("FAIL readChiTemplates() unable to unmarshal file %s Error: %q", filename, err) + glog.V(1).Infof("FAIL readCHITemplates() unable to unmarshal file %s Error: %q", filename, err) continue } - config.enlistChiTemplate(template) + config.enlistCHITemplate(template) } } -// enlistChiTemplate inserts template into templates catalog -func (config *OperatorConfig) enlistChiTemplate(template *ClickHouseInstallation) { +// enlistCHITemplate inserts template into templates catalog +func (config *OperatorConfig) enlistCHITemplate(template *ClickHouseInstallation) { if config.CHITemplates == nil { config.CHITemplates = make([]*ClickHouseInstallation, 0) } config.CHITemplates = append(config.CHITemplates, template) - glog.V(1).Infof("enlistChiTemplate(%s/%s)", template.Namespace, template.Name) + glog.V(1).Infof("enlistCHITemplate(%s/%s)", template.Namespace, template.Name) } -// unlistChiTemplate removes template from templates catalog -func (config *OperatorConfig) unlistChiTemplate(template *ClickHouseInstallation) { +// unlistCHITemplate removes template from templates catalog +func (config *OperatorConfig) unlistCHITemplate(template *ClickHouseInstallation) { if config.CHITemplates == nil { return } - glog.V(1).Infof("unlistChiTemplate(%s/%s)", template.Namespace, template.Name) + glog.V(1).Infof("unlistCHITemplate(%s/%s)", template.Namespace, template.Name) // Nullify found template entry for _, _template := range config.CHITemplates { if (_template.Name == template.Name) && (_template.Namespace == template.Namespace) { - glog.V(1).Infof("unlistChiTemplate(%s/%s) - found, unlisting", template.Namespace, template.Name) + glog.V(1).Infof("unlistCHITemplate(%s/%s) - found, unlisting", template.Namespace, template.Name) // TODO normalize //config.CHITemplates[i] = nil _template.Name = "" @@ -147,8 +147,8 @@ func (config *OperatorConfig) FindTemplate(use *ChiUseTemplate, namespace string return nil } -// buildUnifiedChiTemplate builds combined CHI Template from templates catalog -func (config *OperatorConfig) buildUnifiedChiTemplate() { +// buildUnifiedCHITemplate builds combined CHI Template from templates catalog +func (config *OperatorConfig) buildUnifiedCHITemplate() { return /* @@ -189,26 +189,26 @@ func (config *OperatorConfig) buildUnifiedChiTemplate() { */ } -func (config *OperatorConfig) AddChiTemplate(template *ClickHouseInstallation) { - config.enlistChiTemplate(template) - config.buildUnifiedChiTemplate() +func (config *OperatorConfig) AddCHITemplate(template *ClickHouseInstallation) { + config.enlistCHITemplate(template) + config.buildUnifiedCHITemplate() } -func (config *OperatorConfig) UpdateChiTemplate(template *ClickHouseInstallation) { - config.enlistChiTemplate(template) - config.buildUnifiedChiTemplate() +func (config *OperatorConfig) UpdateCHITemplate(template *ClickHouseInstallation) { + config.enlistCHITemplate(template) + config.buildUnifiedCHITemplate() } -func (config *OperatorConfig) DeleteChiTemplate(template *ClickHouseInstallation) { - config.unlistChiTemplate(template) - config.buildUnifiedChiTemplate() +func (config *OperatorConfig) DeleteCHITemplate(template *ClickHouseInstallation) { + config.unlistCHITemplate(template) + config.buildUnifiedCHITemplate() } func (config *OperatorConfig) Postprocess() { config.normalize() config.readClickHouseCustomConfigFiles() - config.readChiTemplates() - config.buildUnifiedChiTemplate() + config.readCHITemplates() + config.buildUnifiedCHITemplate() config.applyEnvVarParams() config.applyDefaultWatchNamespace() } @@ -285,11 +285,12 @@ func (config *OperatorConfig) normalize() { // applyEnvVarParams applies ENV VARS over config func (config *OperatorConfig) applyEnvVarParams() { - if ns := os.Getenv("WATCH_NAMESPACE"); len(ns) > 0 { + if ns := os.Getenv(WATCH_NAMESPACE); len(ns) > 0 { // We have WATCH_NAMESPACE explicitly specified config.WatchNamespaces = []string{ns} } - if nss := os.Getenv("WATCH_NAMESPACES"); len(nss) > 0 { + + if nss := os.Getenv(WATCH_NAMESPACES); len(nss) > 0 { // We have WATCH_NAMESPACES explicitly specified namespaces := strings.FieldsFunc(nss, func(r rune) bool { return r == ':' || r == ',' @@ -319,7 +320,7 @@ func (config *OperatorConfig) applyDefaultWatchNamespace() { // No namespaces specified - namespace := os.Getenv("OPERATOR_POD_NAMESPACE") + namespace := os.Getenv(OPERATOR_POD_NAMESPACE) if namespace == "kube-system" { // Do nothing, we already have len(config.WatchNamespaces) == 0 } else { @@ -363,13 +364,13 @@ func (config *OperatorConfig) relativeToConfigFolderPath(relativePath string) st // readClickHouseCustomConfigFiles reads all extra user-specified ClickHouse config files func (config *OperatorConfig) readClickHouseCustomConfigFiles() { - config.CHCommonConfigs = readConfigFiles(config.CHCommonConfigsPath, config.isChConfigExt) - config.CHHostConfigs = readConfigFiles(config.CHHostConfigsPath, config.isChConfigExt) - config.CHUsersConfigs = readConfigFiles(config.CHUsersConfigsPath, config.isChConfigExt) + config.CHCommonConfigs = readConfigFiles(config.CHCommonConfigsPath, config.isCHConfigExt) + config.CHHostConfigs = readConfigFiles(config.CHHostConfigsPath, config.isCHConfigExt) + config.CHUsersConfigs = readConfigFiles(config.CHUsersConfigsPath, config.isCHConfigExt) } -// isChConfigExt returns true in case specified file has proper extension for a ClickHouse config file -func (config *OperatorConfig) isChConfigExt(file string) bool { +// isCHConfigExt returns true in case specified file has proper extension for a ClickHouse config file +func (config *OperatorConfig) isCHConfigExt(file string) bool { switch util.ExtToLower(file) { case ".xml": return true @@ -377,8 +378,8 @@ func (config *OperatorConfig) isChConfigExt(file string) bool { return false } -// isChiTemplateExt returns true in case specified file has proper extension for a CHI template config file -func (config *OperatorConfig) isChiTemplateExt(file string) bool { +// isCHITemplateExt returns true in case specified file has proper extension for a CHI template config file +func (config *OperatorConfig) isCHITemplateExt(file string) bool { switch util.ExtToLower(file) { case ".yaml": return true @@ -388,16 +389,6 @@ func (config *OperatorConfig) isChiTemplateExt(file string) bool { return false } -// IsWatchedNamespace returns whether specified namespace is in a list of watched -func (config *OperatorConfig) IsWatchedNamespace(namespace string) bool { - // In case no namespaces specified - watch all namespaces - if len(config.WatchNamespaces) == 0 { - return true - } - - return util.InArray(namespace, config.WatchNamespaces) -} - // String returns string representation of a OperatorConfig func (config *OperatorConfig) String() string { b := &bytes.Buffer{} @@ -476,8 +467,25 @@ func (config *OperatorConfig) stringMap(name string, m map[string]string) string return b.String() } +// TODO unify with GetInformerNamespace +// IsWatchedNamespace returns whether specified namespace is in a list of watched +func (config *OperatorConfig) IsWatchedNamespace(namespace string) bool { + // In case no namespaces specified - watch all namespaces + if len(config.WatchNamespaces) == 0 { + return true + } + + return util.InArray(namespace, config.WatchNamespaces) +} + +// TODO unify with IsWatchedNamespace +// TODO unify approaches to multiple namespaces support // GetInformerNamespace is a TODO stub // Namespace where informers would watch notifications from +// The thing is that InformerFactory can accept only one parameter as watched namespace, +// be it explicitly specified namespace or empty line for "all namespaces". +// That's what conflicts with CHOp's approach to 'specify list of namespaces to watch in', having +// slice of namespaces (CHOp's approach) incompatible with "one namespace name" approach func (config *OperatorConfig) GetInformerNamespace() string { // Namespace where informers would watch notifications from namespace := metav1.NamespaceAll @@ -496,7 +504,7 @@ func (config *OperatorConfig) GetInformerNamespace() string { // readConfigFiles reads config files from specified path into "file name->file content" map // path - folder where to look for files -// isChConfigExt - accepts path to file return bool whether this file has config extension +// isCHConfigExt - accepts path to file return bool whether this file has config extension func readConfigFiles(path string, isConfigExt func(string) bool) map[string]string { return util.ReadFilesIntoMap(path, isConfigExt) } diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go b/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go index 66b05c048..5b24579aa 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_hosts_field.go @@ -17,7 +17,7 @@ package v1 type HostsField struct { ShardsCount int ReplicasCount int - Field [][]*ChiHost + Field [][]*ChiHost } func NewHostsField(shards, replicas int) *HostsField { @@ -50,7 +50,6 @@ func (hf *HostsField) GetOrCreate(shard, replica int) *ChiHost { return hf.Field[shard][replica] } - func (hf *HostsField) WalkHosts( f func(shard, replica int, host *ChiHost) error, ) []error { diff --git a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go index 3f90a5277..95aa1b8e6 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/type_settings.go +++ b/pkg/apis/clickhouse.altinity.com/v1/type_settings.go @@ -84,11 +84,10 @@ func (settings *Settings) MergeFrom(src Settings) { } func (settings Settings) GetStringMap() map[string]string { - var m map[string]string + m := make(map[string]string) for key := range settings { - stringValue, ok := settings[key].(string) - if ok { + if stringValue, ok := settings[key].(string); ok { m[key] = stringValue } } diff --git a/pkg/apis/clickhouse.altinity.com/v1/types.go b/pkg/apis/clickhouse.altinity.com/v1/types.go index 31db7626c..27994f8d7 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/types.go +++ b/pkg/apis/clickhouse.altinity.com/v1/types.go @@ -83,6 +83,7 @@ type ChiStatus struct { DeletedHostsCount int `json:"deleted"` DeleteHostsCount int `json:"delete"` Pods []string `json:"pods"` + FQDNs []string `json:"fqdns"` Endpoint string `json:"endpoint"` } diff --git a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go index fc354b426..e647e7c5f 100644 --- a/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go +++ b/pkg/apis/clickhouse.altinity.com/v1/zz_generated.deepcopy.go @@ -516,6 +516,11 @@ func (in *ChiStatus) DeepCopyInto(out *ChiStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.FQDNs != nil { + in, out := &in.FQDNs, &out.FQDNs + *out = make([]string, len(*in)) + copy(*out, *in) + } return } diff --git a/pkg/apis/metrics/clickhouse_fetcher.go b/pkg/apis/metrics/clickhouse_fetcher.go index b108c42b8..c28886663 100644 --- a/pkg/apis/metrics/clickhouse_fetcher.go +++ b/pkg/apis/metrics/clickhouse_fetcher.go @@ -16,6 +16,7 @@ package metrics import ( sqlmodule "database/sql" + "github.com/MakeNowJust/heredoc" "github.com/altinity/clickhouse-operator/pkg/model/clickhouse" @@ -126,10 +127,10 @@ func (f *ClickHouseFetcher) newConn() *clickhouse.Conn { return clickhouse.New(f.Hostname, f.Username, f.Password, f.Port) } -// clickHouseQueryMetrics requests metrics data from ClickHouse -func (f *ClickHouseFetcher) clickHouseQueryMetrics() ([][]string, error) { +// getClickHouseQueryMetrics requests metrics data from ClickHouse +func (f *ClickHouseFetcher) getClickHouseQueryMetrics() ([][]string, error) { return f.clickHouseQueryScanRows( - heredoc.Doc(queryMetricsSQL), + queryMetricsSQL, func(rows *sqlmodule.Rows, data *[][]string) error { var metric, value, description, _type string if err := rows.Scan(&metric, &value, &description, &_type); err == nil { @@ -140,10 +141,10 @@ func (f *ClickHouseFetcher) clickHouseQueryMetrics() ([][]string, error) { ) } -// clickHouseQueryTableSizes requests data sizes from ClickHouse -func (f *ClickHouseFetcher) clickHouseQueryTableSizes() ([][]string, error) { +// getClickHouseQueryTableSizes requests data sizes from ClickHouse +func (f *ClickHouseFetcher) getClickHouseQueryTableSizes() ([][]string, error) { return f.clickHouseQueryScanRows( - heredoc.Doc(queryTableSizesSQL), + queryTableSizesSQL, func(rows *sqlmodule.Rows, data *[][]string) error { var database, table, partitions, parts, bytes, uncompressed, _rows string if err := rows.Scan(&database, &table, &partitions, &parts, &bytes, &uncompressed, &_rows); err == nil { @@ -154,10 +155,10 @@ func (f *ClickHouseFetcher) clickHouseQueryTableSizes() ([][]string, error) { ) } -// clickHouseQuerySystemReplicas requests replica information from ClickHouse -func (f *ClickHouseFetcher) clickHouseQuerySystemReplicas() ([][]string, error) { +// getClickHouseQuerySystemReplicas requests replica information from ClickHouse +func (f *ClickHouseFetcher) getClickHouseQuerySystemReplicas() ([][]string, error) { return f.clickHouseQueryScanRows( - heredoc.Doc(querySystemReplicasSQL), + querySystemReplicasSQL, func(rows *sqlmodule.Rows, data *[][]string) error { var database, table, isSessionExpired string if err := rows.Scan(&database, &table, &isSessionExpired); err == nil { @@ -169,9 +170,9 @@ func (f *ClickHouseFetcher) clickHouseQuerySystemReplicas() ([][]string, error) } // clickHouseQuerySystemMutations requests mutations information from ClickHouse -func (f *ClickHouseFetcher) clickHouseQueryMutations() ([][]string, error) { +func (f *ClickHouseFetcher) getClickHouseQueryMutations() ([][]string, error) { return f.clickHouseQueryScanRows( - heredoc.Doc(queryMutationsSQL), + queryMutationsSQL, func(rows *sqlmodule.Rows, data *[][]string) error { var database, table, mutations, parts_to_do string if err := rows.Scan(&database, &table, &mutations, &parts_to_do); err == nil { @@ -183,15 +184,20 @@ func (f *ClickHouseFetcher) clickHouseQueryMutations() ([][]string, error) { } // clickHouseQueryScanRows scan all rows by external scan function -func (f *ClickHouseFetcher) clickHouseQueryScanRows(sql string, scan func(rows *sqlmodule.Rows, data *[][]string) error) ([][]string, error) { - data := make([][]string, 0) - conn := f.newConn() - if rows, err := conn.Query(heredoc.Doc(sql)); err != nil { +func (f *ClickHouseFetcher) clickHouseQueryScanRows( + sql string, + scan func( + rows *sqlmodule.Rows, + data *[][]string, + ) error, +) ([][]string, error) { + if rows, err := f.newConn().Query(heredoc.Doc(sql)); err != nil { return nil, err } else { + data := make([][]string, 0) for rows.Next() { _ = scan(rows, &data) } + return data, nil } - return data, nil } diff --git a/pkg/apis/metrics/exporter.go b/pkg/apis/metrics/exporter.go index a7eed0e1d..e10fb484e 100644 --- a/pkg/apis/metrics/exporter.go +++ b/pkg/apis/metrics/exporter.go @@ -16,11 +16,16 @@ package metrics import ( "encoding/json" + "fmt" + "github.com/altinity/clickhouse-operator/pkg/chop" + "k8s.io/apimachinery/pkg/apis/meta/v1" "net/http" "sync" "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" + + chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" ) // Exporter implements prometheus.Collector interface @@ -156,13 +161,13 @@ func (e *Exporter) UpdateWatch(namespace, chiName string, hostnames []string) { e.updateWatched(chi) } -// collectFromHost collect metrics from one host and write inito chan +// collectFromHost collects metrics from one host and writes them into chan func (e *Exporter) collectFromHost(chi *WatchedCHI, hostname string, c chan<- prometheus.Metric) { fetcher := e.newFetcher(hostname) writer := NewPrometheusWriter(c, chi, hostname) glog.Infof("Querying metrics for %s\n", hostname) - if metrics, err := fetcher.clickHouseQueryMetrics(); err == nil { + if metrics, err := fetcher.getClickHouseQueryMetrics(); err == nil { glog.Infof("Extracted %d metrics for %s\n", len(metrics), hostname) writer.WriteMetrics(metrics) writer.WriteOKFetch("system.metrics") @@ -175,7 +180,7 @@ func (e *Exporter) collectFromHost(chi *WatchedCHI, hostname string, c chan<- pr } glog.Infof("Querying table sizes for %s\n", hostname) - if tableSizes, err := fetcher.clickHouseQueryTableSizes(); err == nil { + if tableSizes, err := fetcher.getClickHouseQueryTableSizes(); err == nil { glog.Infof("Extracted %d table sizes for %s\n", len(tableSizes), hostname) writer.WriteTableSizes(tableSizes) writer.WriteOKFetch("table sizes") @@ -188,7 +193,7 @@ func (e *Exporter) collectFromHost(chi *WatchedCHI, hostname string, c chan<- pr } glog.Infof("Querying system replicas for %s\n", hostname) - if systemReplicas, err := fetcher.clickHouseQuerySystemReplicas(); err == nil { + if systemReplicas, err := fetcher.getClickHouseQuerySystemReplicas(); err == nil { glog.Infof("Extracted %d system replicas for %s\n", len(systemReplicas), hostname) writer.WriteSystemReplicas(systemReplicas) writer.WriteOKFetch("system.replicas") @@ -201,7 +206,7 @@ func (e *Exporter) collectFromHost(chi *WatchedCHI, hostname string, c chan<- pr } glog.Infof("Querying mutations for %s\n", hostname) - if mutations, err := fetcher.clickHouseQueryMutations(); err == nil { + if mutations, err := fetcher.getClickHouseQueryMutations(); err == nil { glog.Infof("Extracted %d mutations for %s\n", len(mutations), hostname) writer.WriteMutations(mutations) writer.WriteOKFetch("system.mutations") @@ -220,30 +225,60 @@ func (e *Exporter) getWatchedCHI(w http.ResponseWriter, r *http.Request) { _ = json.NewEncoder(w).Encode(e.getWatchedCHIs()) } -// updateWatchedCHI serves HTTPS request to add CHI to the list of watched CHIs -func (e *Exporter) updateWatchedCHI(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") +// fetchCHI decodes chi from request +func (e *Exporter) fetchCHI(r *http.Request) (*WatchedCHI, error) { chi := &WatchedCHI{} if err := json.NewDecoder(r.Body).Decode(chi); err == nil { if chi.isValid() { - e.updateWatched(chi) - return + return chi, nil } } - http.Error(w, "Unable to parse CHI.", http.StatusNotAcceptable) + return nil, fmt.Errorf("unable to parse CHI from request") +} + +// updateWatchedCHI serves HTTPS request to add CHI to the list of watched CHIs +func (e *Exporter) updateWatchedCHI(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + if chi, err := e.fetchCHI(r); err == nil { + e.updateWatched(chi) + } else { + http.Error(w, err.Error(), http.StatusNotAcceptable) + } } // deleteWatchedCHI serves HTTP request to delete CHI from the list of watched CHIs func (e *Exporter) deleteWatchedCHI(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") - chi := &WatchedCHI{} - if err := json.NewDecoder(r.Body).Decode(chi); err == nil { - if chi.isValid() { - e.enqueueToRemoveFromWatched(chi) - return - } + if chi, err := e.fetchCHI(r); err == nil { + e.enqueueToRemoveFromWatched(chi) + } else { + http.Error(w, err.Error(), http.StatusNotAcceptable) } +} - http.Error(w, "Unable to parse CHI.", http.StatusNotAcceptable) +// DiscoveryWatchedCHIs discovers all ClickHouseInstallation objects available for monitoring and adds them to watched list +func (e *Exporter) DiscoveryWatchedCHIs(chop *chop.CHOp, chopClient *chopclientset.Clientset) { + // Get all CHI objects from watched namespace(s) + watchedNamespace := chop.Config().GetInformerNamespace() + list, err := chopClient.ClickhouseV1().ClickHouseInstallations(watchedNamespace).List(v1.ListOptions{}) + if err != nil { + glog.V(1).Infof("Error read ClickHouseInstallations %v", err) + return + } + if list == nil { + return + } + + // Walk over the list of ClickHouseInstallation objects and add them as watched + for i := range list.Items { + chi := &list.Items[i] + glog.Infof("Adding explicitly found CHI %s/%s with %d hosts\n", chi.Namespace, chi.Name, len(chi.Status.FQDNs)) + watchedCHI := &WatchedCHI{ + Namespace: chi.Namespace, + Name: chi.Name, + Hostnames: chi.Status.FQDNs, + } + e.updateWatched(watchedCHI) + } } diff --git a/pkg/apis/metrics/rest_client.go b/pkg/apis/metrics/rest_client.go index 539d06ed3..292164882 100644 --- a/pkg/apis/metrics/rest_client.go +++ b/pkg/apis/metrics/rest_client.go @@ -14,7 +14,7 @@ package metrics -func ReportToMetricsExporterWatchedCHI(namespace, chiName string, hostnames []string) error { +func InformMetricsExporterAboutWatchedCHI(namespace, chiName string, hostnames []string) error { chi := &WatchedCHI{ Namespace: namespace, Name: chiName, diff --git a/pkg/apis/metrics/rest_server.go b/pkg/apis/metrics/rest_server.go index 4f179578f..3cc4f75c1 100644 --- a/pkg/apis/metrics/rest_server.go +++ b/pkg/apis/metrics/rest_server.go @@ -33,7 +33,6 @@ func StartMetricsREST( chiListAddress string, chiListPath string, ) *Exporter { - // Initializing Prometheus Metrics Exporter glog.V(1).Infof("Starting metrics exporter at '%s%s'\n", metricsAddress, metricsPath) exporter = NewExporter(chAccess) diff --git a/pkg/chop/chop.go b/pkg/chop/chop.go index 684fe8d21..5b8c58d20 100644 --- a/pkg/chop/chop.go +++ b/pkg/chop/chop.go @@ -28,7 +28,6 @@ func NewCHOp( version string, chopClient *chopclientset.Clientset, initConfigFilePath string, - ) *CHOp { return &CHOp{ Version: version, diff --git a/pkg/chop/config_manager.go b/pkg/chop/config_manager.go index ed41da6c0..c43e9bbd5 100644 --- a/pkg/chop/config_manager.go +++ b/pkg/chop/config_manager.go @@ -32,21 +32,25 @@ type ConfigManager struct { chopClient *chopclientset.Clientset chopConfigList *chiv1.ClickHouseOperatorConfigurationList + // initConfigFilePath is path to the configuration file, which will be used as initial/seed + // to build final config, which will be used/consumed by users initConfigFilePath string - // fileConfig is a file-based config + // fileConfig is a prepared file-based config fileConfig *chiv1.OperatorConfig - // crConfigs is a slice of Custom Resource based configs + // crConfigs is a slice of prepared Custom Resource based configs crConfigs []*chiv1.OperatorConfig - // config is the final, unified config + // config is the final config, + // built as merge of all available configs and it is ready to use/be consumed by users config *chiv1.OperatorConfig + // runtimeParams is set/map of runtime params, influencing configuration runtimeParams map[string]string } -// NewConfigManager creates new Manager +// NewConfigManager creates new ConfigManager func NewConfigManager( chopClient *chopclientset.Clientset, initConfigFilePath string, @@ -73,13 +77,14 @@ func (cm *ConfigManager) Init() error { glog.V(1).Info("File-based ClickHouseOperatorConfigurations") cm.fileConfig.WriteToLog() - // Read config all Custom Resources + // Get configs from all config Custom Resources watchedNamespace := cm.fileConfig.GetInformerNamespace() cm.getCRBasedConfigs(watchedNamespace) cm.logCRBasedConfigs() - // Prepare one unified config + // Prepare one unified config from all available config pieces cm.buildUnifiedConfig() + // From now on we have one unified CHOP config glog.V(1).Info("Unified (but not post-processed yet) CHOP config") cm.config.WriteToLog() @@ -101,10 +106,12 @@ func (cm *ConfigManager) Config() *chiv1.OperatorConfig { // getCRBasedConfigs reads all ClickHouseOperatorConfiguration objects in specified namespace func (cm *ConfigManager) getCRBasedConfigs(namespace string) { + // We need to have chop kube client available in order to fetch ClickHouseOperatorConfiguration objects if cm.chopClient == nil { return } + // Get list of ClickHouseOperatorConfiguration objects var err error if cm.chopConfigList, err = cm.chopClient.ClickhouseV1().ClickHouseOperatorConfigurations(namespace).List(metav1.ListOptions{}); err != nil { glog.V(1).Infof("Error read ClickHouseOperatorConfigurations %v", err) @@ -115,7 +122,7 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { return } - // Get sorted names of ClickHouseOperatorConfiguration object + // Get sorted names of ClickHouseOperatorConfiguration objects from the list of objects var names []string for i := range cm.chopConfigList.Items { chOperatorConfiguration := &cm.chopConfigList.Items[i] @@ -126,9 +133,9 @@ func (cm *ConfigManager) getCRBasedConfigs(namespace string) { // Build sorted slice of configs for _, name := range names { for i := range cm.chopConfigList.Items { + // Convenience wrapper chOperatorConfiguration := &cm.chopConfigList.Items[i] if chOperatorConfiguration.Name == name { - // Save location info into OperatorConfig itself chOperatorConfiguration.Spec.ConfigFolderPath = namespace chOperatorConfiguration.Spec.ConfigFilePath = name @@ -189,9 +196,9 @@ func (cm *ConfigManager) getFileBasedConfig(configFilePath string) (*chiv1.Opera } // No file specified - look for ENV var config file path specification - if len(os.Getenv("CHOP_CONFIG")) > 0 { + if len(os.Getenv(chiv1.CHOP_CONFIG)) > 0 { // Config file explicitly specified as ENV var - if conf, err := cm.buildConfigFromFile(os.Getenv("CHOP_CONFIG")); err == nil { + if conf, err := cm.buildConfigFromFile(os.Getenv(chiv1.CHOP_CONFIG)); err == nil { return conf, nil } else { return nil, err @@ -254,30 +261,19 @@ func (cm *ConfigManager) buildDefaultConfig() (*chiv1.OperatorConfig, error) { func (cm *ConfigManager) getEnvVarParamNames() []string { // This list of ENV VARS is specified in operator .yaml manifest, section "kind: Deployment" return []string{ - // spec.nodeName: ip-172-20-52-62.ec2.internal - "OPERATOR_POD_NODE_NAME", - // metadata.name: clickhouse-operator-6f87589dbb-ftcsf - "OPERATOR_POD_NAME", - // metadata.namespace: kube-system - "OPERATOR_POD_NAMESPACE", - // status.podIP: 100.96.3.2 - "OPERATOR_POD_IP", - // spec.serviceAccount: clickhouse-operator - // spec.serviceAccountName: clickhouse-operator - "OPERATOR_POD_SERVICE_ACCOUNT", - - // .containers.resources.requests.cpu - "OPERATOR_CONTAINER_CPU_REQUEST", - // .containers.resources.limits.cpu - "OPERATOR_CONTAINER_CPU_LIMIT", - // .containers.resources.requests.memory - "OPERATOR_CONTAINER_MEM_REQUEST", - // .containers.resources.limits.memory - "OPERATOR_CONTAINER_MEM_LIMIT", - - // What namespaces to watch - "WATCH_NAMESPACE", - "WATCH_NAMESPACES", + chiv1.OPERATOR_POD_NODE_NAME, + chiv1.OPERATOR_POD_NAME, + chiv1.OPERATOR_POD_NAMESPACE, + chiv1.OPERATOR_POD_IP, + chiv1.OPERATOR_POD_SERVICE_ACCOUNT, + + chiv1.OPERATOR_CONTAINER_CPU_REQUEST, + chiv1.OPERATOR_CONTAINER_CPU_LIMIT, + chiv1.OPERATOR_CONTAINER_MEM_REQUEST, + chiv1.OPERATOR_CONTAINER_MEM_LIMIT, + + chiv1.WATCH_NAMESPACE, + chiv1.WATCH_NAMESPACES, } } diff --git a/pkg/chop/kube_machinery.go b/pkg/chop/kube_machinery.go new file mode 100644 index 000000000..a02ed62fb --- /dev/null +++ b/pkg/chop/kube_machinery.go @@ -0,0 +1,96 @@ +// Copyright 2019 Altinity Ltd and/or its affiliates. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package chop + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + + "github.com/golang/glog" + + chopclientset "github.com/altinity/clickhouse-operator/pkg/client/clientset/versioned" + "github.com/altinity/clickhouse-operator/pkg/version" + + kube "k8s.io/client-go/kubernetes" + kuberest "k8s.io/client-go/rest" + kubeclientcmd "k8s.io/client-go/tools/clientcmd" +) + +// getKubeConfig creates kuberest.Config object based on current environment +func getKubeConfig(kubeConfigFile, masterURL string) (*kuberest.Config, error) { + if len(kubeConfigFile) > 0 { + // kube config file specified as CLI flag + return kubeclientcmd.BuildConfigFromFlags(masterURL, kubeConfigFile) + } + + if len(os.Getenv("KUBECONFIG")) > 0 { + // kube config file specified as ENV var + return kubeclientcmd.BuildConfigFromFlags(masterURL, os.Getenv("KUBECONFIG")) + } + + if conf, err := kuberest.InClusterConfig(); err == nil { + // in-cluster configuration found + return conf, nil + } + + usr, err := user.Current() + if err != nil { + return nil, fmt.Errorf("user not found") + } + + // OS user found. Parse ~/.kube/config file + conf, err := kubeclientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")) + if err != nil { + return nil, fmt.Errorf("~/.kube/config not found") + } + + // ~/.kube/config found + return conf, nil +} + +// GetClientset gets k8s API clients - both kube native client and our custom client +func GetClientset(kubeConfigFile, masterURL string) (*kube.Clientset, *chopclientset.Clientset) { + kubeConfig, err := getKubeConfig(kubeConfigFile, masterURL) + if err != nil { + glog.Fatalf("Unable to build kubeconf: %s", err.Error()) + os.Exit(1) + } + + kubeClientset, err := kube.NewForConfig(kubeConfig) + if err != nil { + glog.Fatalf("Unable to initialize kubernetes API clientset: %s", err.Error()) + } + + chopClientset, err := chopclientset.NewForConfig(kubeConfig) + if err != nil { + glog.Fatalf("Unable to initialize clickhouse-operator API clientset: %s", err.Error()) + } + + return kubeClientset, chopClientset +} + +// chopClient can be nil, in this case CHOp will not be able to use any ConfigMap(s) with configuration +func GetCHOp(chopClient *chopclientset.Clientset, initCHOpConfigFilePath string) *CHOp { + // Create operator instance + chop := NewCHOp(version.Version, chopClient, initCHOpConfigFilePath) + if err := chop.Init(); err != nil { + glog.Fatalf("Unable to init CHOP instance %v\n", err) + os.Exit(1) + } + + return chop +} diff --git a/pkg/controller/chi/controller.go b/pkg/controller/chi/controller.go index ccf7302b5..959cd2309 100644 --- a/pkg/controller/chi/controller.go +++ b/pkg/controller/chi/controller.go @@ -408,7 +408,7 @@ func (c *Controller) updateWatch(namespace, name string, hostnames []string) { } func (c *Controller) updateWatchAsync(namespace, name string, hostnames []string) { - if err := metrics.ReportToMetricsExporterWatchedCHI(namespace, name, hostnames); err != nil { + if err := metrics.InformMetricsExporterAboutWatchedCHI(namespace, name, hostnames); err != nil { glog.V(1).Infof("FAIL update watch (%s/%s): %q", namespace, name, err) } else { glog.V(2).Infof("OK update watch (%s/%s)", namespace, name) @@ -430,7 +430,7 @@ func (c *Controller) deleteWatchAsync(namespace, name string) { // addChit sync new CHIT - creates all its resources func (c *Controller) addChit(chit *chi.ClickHouseInstallationTemplate) error { glog.V(1).Infof("addChit(%s/%s)", chit.Namespace, chit.Name) - c.chop.Config().AddChiTemplate((*chi.ClickHouseInstallation)(chit)) + c.chop.Config().AddCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } @@ -443,14 +443,14 @@ func (c *Controller) updateChit(old, new *chi.ClickHouseInstallationTemplate) er } glog.V(2).Infof("updateChit(%s/%s):", new.Namespace, new.Name) - c.chop.Config().UpdateChiTemplate((*chi.ClickHouseInstallation)(new)) + c.chop.Config().UpdateCHITemplate((*chi.ClickHouseInstallation)(new)) return nil } // deleteChit deletes CHIT func (c *Controller) deleteChit(chit *chi.ClickHouseInstallationTemplate) error { glog.V(2).Infof("deleteChit(%s/%s):", chit.Namespace, chit.Name) - c.chop.Config().DeleteChiTemplate((*chi.ClickHouseInstallation)(chit)) + c.chop.Config().DeleteCHITemplate((*chi.ClickHouseInstallation)(chit)) return nil } diff --git a/pkg/controller/chi/labeler.go b/pkg/controller/chi/labeler.go index a91c19c0e..cca66c5fd 100644 --- a/pkg/controller/chi/labeler.go +++ b/pkg/controller/chi/labeler.go @@ -15,6 +15,9 @@ package chi import ( + chiv1 "github.com/altinity/clickhouse-operator/pkg/apis/clickhouse.altinity.com/v1" + "github.com/altinity/clickhouse-operator/pkg/model" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -47,8 +50,8 @@ func (c *Controller) labelMyObjectsTree() { // uid: a275a8a0-83ae-11e9-b92d-0208b778ea1a // Label operator's Pod with version label - podName, ok1 := c.chop.ConfigManager.GetRuntimeParam("OPERATOR_POD_NAME") - namespace, ok2 := c.chop.ConfigManager.GetRuntimeParam("OPERATOR_POD_NAMESPACE") + podName, ok1 := c.chop.ConfigManager.GetRuntimeParam(chiv1.OPERATOR_POD_NAME) + namespace, ok2 := c.chop.ConfigManager.GetRuntimeParam(chiv1.OPERATOR_POD_NAMESPACE) if !ok1 || !ok2 { glog.V(1).Infof("ERROR fetch Pod name out of %s/%s", namespace, podName) @@ -63,7 +66,7 @@ func (c *Controller) labelMyObjectsTree() { } // Put label on the Pod - pod.Labels["version"] = c.chop.Version + c.addLabels(&pod.ObjectMeta) if _, err := c.kubeClient.CoreV1().Pods(namespace).Update(pod); err != nil { glog.V(1).Infof("ERROR put label on Pod %s/%s %v", namespace, podName, err) } @@ -93,7 +96,7 @@ func (c *Controller) labelMyObjectsTree() { } // Put label on the ReplicaSet - replicaSet.Labels["version"] = c.chop.Version + c.addLabels(&replicaSet.ObjectMeta) if _, err := c.kubeClient.AppsV1().ReplicaSets(namespace).Update(replicaSet); err != nil { glog.V(1).Infof("ERROR put label on ReplicaSet %s/%s %v", namespace, replicaSetName, err) } @@ -123,8 +126,13 @@ func (c *Controller) labelMyObjectsTree() { } // Put label on the Deployment - deployment.Labels["version"] = c.chop.Version + c.addLabels(&deployment.ObjectMeta) if _, err := c.kubeClient.AppsV1().Deployments(namespace).Update(deployment); err != nil { glog.V(1).Infof("ERROR put label on Deployment %s/%s %v", namespace, deploymentName, err) } } + +func (c *Controller) addLabels(meta *v1.ObjectMeta) { + meta.Labels[model.LabelAppName] = model.LabelAppValue + meta.Labels[model.LabelChop] = c.chop.Version +} diff --git a/pkg/model/creator.go b/pkg/model/creator.go index a3d222f65..d8a47528f 100644 --- a/pkg/model/creator.go +++ b/pkg/model/creator.go @@ -416,7 +416,7 @@ func (c *Creator) setupConfigMapVolumes(statefulSetObject *apps.StatefulSet, hos } // setupStatefulSetApplyVolumeMounts applies `volumeMounts` of a `container` -func (c *Creator) setupStatefulSetApplyVolumeMounts(statefulSet *apps.StatefulSet) { +func (c *Creator) setupStatefulSetApplyVolumeMounts(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { // Deal with `volumeMounts` of a `container`, a.k.a. // .spec.templates.podTemplates.*.spec.containers.volumeMounts.* // VolumeClaimTemplates, that are referenced in Containers' VolumeMount object(s) @@ -429,7 +429,7 @@ func (c *Creator) setupStatefulSetApplyVolumeMounts(statefulSet *apps.StatefulSe volumeMount := &container.VolumeMounts[j] if volumeClaimTemplate, ok := c.chi.GetVolumeClaimTemplate(volumeMount.Name); ok { // Found VolumeClaimTemplate to mount by VolumeMount - statefulSetAppendVolumeClaimTemplate(statefulSet, volumeClaimTemplate) + c.statefulSetAppendVolumeClaimTemplate(host, statefulSet, volumeClaimTemplate) } } } @@ -441,13 +441,14 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplates(statefulSet *apps.St for i := range statefulSet.Spec.Template.Spec.Containers { // Convenience wrapper container := &statefulSet.Spec.Template.Spec.Containers[i] - _ = c.setupStatefulSetApplyVolumeClaimTemplate(statefulSet, container.Name, host.Templates.DataVolumeClaimTemplate, dirPathClickHouseData) - _ = c.setupStatefulSetApplyVolumeClaimTemplate(statefulSet, container.Name, host.Templates.LogVolumeClaimTemplate, dirPathClickHouseLog) + _ = c.setupStatefulSetApplyVolumeClaimTemplate(host, statefulSet, container.Name, host.Templates.DataVolumeClaimTemplate, dirPathClickHouseData) + _ = c.setupStatefulSetApplyVolumeClaimTemplate(host, statefulSet, container.Name, host.Templates.LogVolumeClaimTemplate, dirPathClickHouseLog) } } // setupStatefulSetApplyVolumeClaimTemplate applies .templates.volumeClaimTemplates.* to a StatefulSet func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( + host *chiv1.ChiHost, statefulSet *apps.StatefulSet, containerName string, volumeClaimTemplateName string, @@ -455,41 +456,49 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( ) error { // Sanity checks - if volumeClaimTemplateName == "" { - // No VolumeClaimTemplate specified - return nil - } + // 1. mountPath has to be reasonable if mountPath == "" { // No mount path specified return nil } - // Mount specified (by volumeClaimTemplateName) VolumeClaimTemplate into mountPath (say into '/var/lib/clickhouse') - // - // A container wants to have this VolumeClaimTemplate mounted into `mountPath` in case: - // 1. This VolumeClaimTemplate is not already mounted in the container with any VolumeMount (to avoid double-mount of a VolumeClaimTemplate) - // 2. And specified `mountPath` (say '/var/lib/clickhouse') is not already mounted with any VolumeMount (to avoid double-mount into `mountPath`) + // 2. volumeClaimTemplateName has to be reasonable + if volumeClaimTemplateName == "" { + // No VolumeClaimTemplate specified + return nil + } + // 3. Specified (by volumeClaimTemplateName) VolumeClaimTemplate has to be available as well if _, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName); !ok { // Incorrect/unknown .templates.VolumeClaimTemplate specified glog.V(1).Infof("Can not find volumeClaimTemplate %s. Volume claim can not be mounted", volumeClaimTemplateName) return nil } + // 4. Specified container has to be available container := getContainerByName(statefulSet, containerName) if container == nil { glog.V(1).Infof("Can not find container %s. Volume claim can not be mounted", containerName) return nil } + // Looks like all components are in place + + // Mount specified (by volumeClaimTemplateName) VolumeClaimTemplate into mountPath (say into '/var/lib/clickhouse') + // + // A container wants to have this VolumeClaimTemplate mounted into `mountPath` in case: + // 1. This VolumeClaimTemplate is NOT already mounted in the container with any VolumeMount (to avoid double-mount of a VolumeClaimTemplate) + // 2. And specified `mountPath` (say '/var/lib/clickhouse') is NOT already mounted with any VolumeMount (to avoid double-mount/rewrite into single `mountPath`) + // 1. Check whether this VolumeClaimTemplate is already listed in VolumeMount of this container for i := range container.VolumeMounts { // Convenience wrapper volumeMount := &container.VolumeMounts[i] if volumeMount.Name == volumeClaimTemplateName { // This .templates.VolumeClaimTemplate is already used in VolumeMount - glog.V(1).Infof("setupStatefulSetApplyVolumeClaim(%s) container %s volumeClaimTemplateName %s already used", + glog.V(1).Infof( + "setupStatefulSetApplyVolumeClaim(%s) container %s volumeClaimTemplateName %s already used", statefulSet.Name, container.Name, volumeMount.Name, @@ -500,7 +509,7 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( // This VolumeClaimTemplate is not used explicitly by name in a container // So we want to mount it to `mountPath` (say '/var/lib/clickhouse') even more now, because it is unused. - // However, `mountPath` (say /var/lib/clickhouse) may be used already by a VolumeMount. Need to check this + // However, `mountPath` (say /var/lib/clickhouse) may be used already by a VolumeMount. Need to check this out. // 2. Check whether `mountPath` (say '/var/lib/clickhouse') is already mounted for i := range container.VolumeMounts { @@ -508,7 +517,8 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( volumeMount := &container.VolumeMounts[i] if volumeMount.MountPath == mountPath { // `mountPath` (say /var/lib/clickhouse) is already mounted - glog.V(1).Infof("setupStatefulSetApplyVolumeClaim(%s) container %s mountPath %s already used", + glog.V(1).Infof( + "setupStatefulSetApplyVolumeClaim(%s) container %s mountPath %s already used", statefulSet.Name, container.Name, mountPath, @@ -521,7 +531,7 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( // Let's mount this VolumeClaimTemplate into `mountPath` (say '/var/lib/clickhouse') of a container if template, ok := c.chi.GetVolumeClaimTemplate(volumeClaimTemplateName); ok { // Add VolumeClaimTemplate to StatefulSet - statefulSetAppendVolumeClaimTemplate(statefulSet, template) + c.statefulSetAppendVolumeClaimTemplate(host, statefulSet, template) // Add VolumeMount to ClickHouse container to `mountPath` point container.VolumeMounts = append( container.VolumeMounts, @@ -541,7 +551,7 @@ func (c *Creator) setupStatefulSetApplyVolumeClaimTemplate( // setupStatefulSetVolumeClaimTemplates performs VolumeClaimTemplate setup for Containers in PodTemplate of a StatefulSet func (c *Creator) setupStatefulSetVolumeClaimTemplates(statefulSet *apps.StatefulSet, host *chiv1.ChiHost) { - c.setupStatefulSetApplyVolumeMounts(statefulSet) + c.setupStatefulSetApplyVolumeMounts(statefulSet, host) c.setupStatefulSetApplyVolumeClaimTemplates(statefulSet, host) } @@ -582,30 +592,39 @@ func ensurePortByName(container *corev1.Container, name string, port int32) { } // statefulSetAppendVolumeClaimTemplate appends to StatefulSet.Spec.VolumeClaimTemplates new entry with data from provided 'src' ChiVolumeClaimTemplate -func statefulSetAppendVolumeClaimTemplate(statefulSet *apps.StatefulSet, volumeClaimTemplate *chiv1.ChiVolumeClaimTemplate) { +func (c *Creator) statefulSetAppendVolumeClaimTemplate( + host *chiv1.ChiHost, + statefulSet *apps.StatefulSet, + volumeClaimTemplate *chiv1.ChiVolumeClaimTemplate, +) { // Ensure VolumeClaimTemplates slice is in place if statefulSet.Spec.VolumeClaimTemplates == nil { statefulSet.Spec.VolumeClaimTemplates = make([]corev1.PersistentVolumeClaim, 0, 0) } + // Check whether this VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates for i := range statefulSet.Spec.VolumeClaimTemplates { // Convenience wrapper volumeClaimTemplates := &statefulSet.Spec.VolumeClaimTemplates[i] if volumeClaimTemplates.Name == volumeClaimTemplate.Name { - // This VolumeClaimTemplate already listed in statefulSet.Spec.VolumeClaimTemplates + // This VolumeClaimTemplate is already listed in statefulSet.Spec.VolumeClaimTemplates // No need to add it second time return } } - // Volume claim template is not listed in StatefulSet + // VolumeClaimTemplate is not listed in statefulSet.Spec.VolumeClaimTemplates // Append copy of PersistentVolumeClaimSpec - statefulSet.Spec.VolumeClaimTemplates = append(statefulSet.Spec.VolumeClaimTemplates, corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: volumeClaimTemplate.Name, + statefulSet.Spec.VolumeClaimTemplates = append( + statefulSet.Spec.VolumeClaimTemplates, + corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeClaimTemplate.Name, + Labels: c.labeler.getLabelsHostScope(host, false), + }, + Spec: *volumeClaimTemplate.Spec.DeepCopy(), }, - Spec: *volumeClaimTemplate.Spec.DeepCopy(), - }) + ) } // newDefaultHostTemplate returns default Host Template to be used with StatefulSet diff --git a/pkg/model/labeler.go b/pkg/model/labeler.go index 6cf497695..8f9d40a08 100644 --- a/pkg/model/labeler.go +++ b/pkg/model/labeler.go @@ -28,7 +28,7 @@ import ( const ( // Kubernetes labels LabelAppName = clickhousealtinitycom.GroupName + "/app" - labelAppValue = "chop" + LabelAppValue = "chop" LabelChop = clickhousealtinitycom.GroupName + "/chop" LabelNamespace = clickhousealtinitycom.GroupName + "/namespace" LabelChiName = clickhousealtinitycom.GroupName + "/chi" @@ -123,7 +123,7 @@ func (l *Labeler) getLabelsChiScope() map[string]string { // Combine generated labels and CHI-provided labels return l.appendChiLabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(l.chi), - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, LabelChop: l.chop.Version, LabelChiName: l.namer.getNamePartChiName(l.chi), }) @@ -133,7 +133,7 @@ func (l *Labeler) getLabelsChiScope() map[string]string { func (l *Labeler) getSelectorChiScope() map[string]string { // Do not include CHI-provided labels return map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, // Skip chop LabelChiName: l.namer.getNamePartChiName(l.chi), } @@ -144,7 +144,7 @@ func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]stri // Combine generated labels and CHI-provided labels return l.appendChiLabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(cluster), - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, LabelChop: l.chop.Version, LabelChiName: l.namer.getNamePartChiName(cluster), LabelClusterName: l.namer.getNamePartClusterName(cluster), @@ -155,7 +155,7 @@ func (l *Labeler) getLabelsClusterScope(cluster *chi.ChiCluster) map[string]stri func (l *Labeler) getSelectorClusterScope(cluster *chi.ChiCluster) map[string]string { // Do not include CHI-provided labels return map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, // Skip chop LabelChiName: l.namer.getNamePartChiName(cluster), LabelClusterName: l.namer.getNamePartClusterName(cluster), @@ -167,7 +167,7 @@ func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { // Combine generated labels and CHI-provided labels return l.appendChiLabels(map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(shard), - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, LabelChop: l.chop.Version, LabelChiName: l.namer.getNamePartChiName(shard), LabelClusterName: l.namer.getNamePartClusterName(shard), @@ -179,7 +179,7 @@ func (l *Labeler) getLabelsShardScope(shard *chi.ChiShard) map[string]string { func (l *Labeler) getSelectorShardScope(shard *chi.ChiShard) map[string]string { // Do not include CHI-provided labels return map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, // Skip chop LabelChiName: l.namer.getNamePartChiName(shard), LabelClusterName: l.namer.getNamePartClusterName(shard), @@ -192,7 +192,7 @@ func (l *Labeler) getLabelsHostScope(host *chi.ChiHost, applySupplementaryServic // Combine generated labels and CHI-provided labels labels := map[string]string{ LabelNamespace: l.namer.getNamePartNamespace(host), - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, LabelChop: l.chop.Version, LabelChiName: l.namer.getNamePartChiName(host), LabelClusterName: l.namer.getNamePartClusterName(host), @@ -225,7 +225,7 @@ func (l *Labeler) appendChiLabels(dst map[string]string) map[string]string { func (l *Labeler) GetSelectorHostScope(host *chi.ChiHost) map[string]string { // Do not include CHI-provided labels return map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, // skip chop LabelChiName: l.namer.getNamePartChiName(host), LabelClusterName: l.namer.getNamePartClusterName(host), diff --git a/pkg/model/normalizer.go b/pkg/model/normalizer.go index 15981233d..77a4273dd 100644 --- a/pkg/model/normalizer.go +++ b/pkg/model/normalizer.go @@ -232,11 +232,13 @@ func hostApplyHostTemplate(host *chiv1.ChiHost, template *chiv1.ChiHostTemplate) func (n *Normalizer) fillStatus() { endpoint := CreateChiServiceFQDN(n.chi) pods := make([]string, 0) + fqdns := make([]string, 0) n.chi.WalkHosts(func(host *chiv1.ChiHost) error { pods = append(pods, CreatePodName(host)) + fqdns = append(fqdns, CreatePodFQDN(host)) return nil }) - n.chi.FillStatus(endpoint, pods) + n.chi.FillStatus(endpoint, pods, fqdns) } // normalizeStop normalizes .spec.stop @@ -667,7 +669,7 @@ func (n *Normalizer) newPodAntiAffinity(template *chiv1.ChiPodTemplate) *v1.PodA podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = n.addPodAffinityTermWithMatchLabels( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, }, ) } @@ -680,7 +682,7 @@ func (n *Normalizer) newPodAntiAffinity(template *chiv1.ChiPodTemplate) *v1.PodA podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = n.addPodAffinityTermWithMatchLabels( podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, map[string]string{ - LabelAppName: labelAppValue, + LabelAppName: LabelAppValue, }, ) case chiv1.PodDistributionMaxNumberPerNode: @@ -822,7 +824,7 @@ func (n *Normalizer) addPodAffinityTermWithMatchLabels(terms []v1.PodAffinityTer // Key: LabelAppName, // Operator: v12.LabelSelectorOpIn, // Values: []string{ - // labelAppValue, + // LabelAppValue, // }, // }, //}, @@ -845,7 +847,7 @@ func (n *Normalizer) addPodAffinityTermWithMatchExpressions(terms []v1.PodAffini // Key: LabelAppName, // Operator: v12.LabelSelectorOpIn, // Values: []string{ - // labelAppValue, + // LabelAppValue, // }, // }, //}, @@ -877,7 +879,7 @@ func (n *Normalizer) addWeightedPodAffinityTermWithMatchLabels( // Key: LabelAppName, // Operator: v12.LabelSelectorOpIn, // Values: []string{ - // labelAppValue, + // LabelAppValue, // }, // }, //}, diff --git a/release b/release index ac39a106c..f374f6662 100644 --- a/release +++ b/release @@ -1 +1 @@ -0.9.0 +0.9.1 diff --git a/tests/test_examples.py b/tests/test_examples.py index 20834a341..0a2a41ecb 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -16,14 +16,14 @@ def test_examples01_2(): @TestScenario @Name("Persistent volume mapping via defaults") def test_examples02_1(): - create_and_check("../docs/chi-examples/02-persistent-volume-01-default-volume.yaml", + create_and_check("../docs/chi-examples/03-persistent-volume-01-default-volume.yaml", {"pod_count": 1, "pod_volumes": {"/var/lib/clickhouse", "/var/log/clickhouse-server"}}) @TestScenario @Name("Persistent volume mapping via podTemplate") def test_examples02_2(): - create_and_check("../docs/chi-examples/02-persistent-volume-02-pod-template.yaml", + create_and_check("../docs/chi-examples/03-persistent-volume-02-pod-template.yaml", {"pod_count": 1, "pod_image": "yandex/clickhouse-server:19.3.7", "pod_volumes": {"/var/lib/clickhouse", "/var/log/clickhouse-server"}})