Skip to content

Commit

Permalink
Indexing jobSummary for index job (#81)
Browse files Browse the repository at this point in the history
Signed-off-by: Vishnu Challa <[email protected]>
Co-authored-by: Vishnu Challa <[email protected]>
  • Loading branch information
vishnuchalla and Vishnu Challa authored Jul 9, 2024
1 parent bc4afa7 commit e2ec5cc
Show file tree
Hide file tree
Showing 16 changed files with 60 additions and 105 deletions.
59 changes: 4 additions & 55 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,10 @@ Flags:
-h, --help help for kube-burner-ocp
```

## Documentation

Documentation is [available here](https://kube-burner.github.io/kube-burner-ocp/)

## Usage

Some of the benefits the OCP wrapper provides are:
Expand Down Expand Up @@ -336,58 +340,3 @@ alerts.yml metrics.yml node-density.yml pod.yml metrics-report.yml
$ vi node-density.yml # Perform modifications accordingly
$ kube-burner-ocp node-density --pods-per-node=100 # Run workload
```

## Cluster metadata

When the benchmark finishes, kube-burner will index the cluster metadata in the configured indexer. Currently. this is based on the following Golang struct:

```golang
type BenchmarkMetadata struct {
ocpmetadata.ClusterMetadata
UUID string `json:"uuid"`
Benchmark string `json:"benchmark"`
Timestamp time.Time `json:"timestamp"`
EndDate time.Time `json:"endDate"`
Passed bool `json:"passed"`
UserMetadata map[string]interface{} `json:"metadata,omitempty"`
}
```

Where `ocpmetadata.ClusterMetadata` is an embed struct inherited from the [go-commons library](https://github.com/cloud-bulldozer/go-commons/blob/main/ocp-metadata/types.go), which has the following fields:

```golang
// Type to store cluster metadata
type ClusterMetadata struct {
MetricName string `json:"metricName,omitempty"`
Platform string `json:"platform"`
OCPVersion string `json:"ocpVersion"`
OCPMajorVersion string `json:"ocpMajorVersion"`
K8SVersion string `json:"k8sVersion"`
MasterNodesType string `json:"masterNodesType"`
WorkerNodesType string `json:"workerNodesType"`
MasterNodesCount int `json:"masterNodesCount"`
InfraNodesType string `json:"infraNodesType"`
WorkerNodesCount int `json:"workerNodesCount"`
InfraNodesCount int `json:"infraNodesCount"`
TotalNodes int `json:"totalNodes"`
SDNType string `json:"sdnType"`
ClusterName string `json:"clusterName"`
Region string `json:"region"`
ExecutionErrors string `json:"executionErrors"`
}
```

MetricName is hardcoded to `clusterMetadata`

<!-- markdownlint-disable -->
!!! Info
It's important to note that every document indexed when using an OCP wrapper workload will include an small subset of the previous fields:
```yaml
platform
ocpVersion
ocpMajorVersion
k8sVersion
totalNodes
sdnType
```
<!-- markdownlint-restore -->
1 change: 0 additions & 1 deletion cluster-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ func NewClusterDensity(wh *workloads.WorkloadHelper, variant string) *cobra.Comm
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
os.Setenv("CHURN", fmt.Sprint(churn))
os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles))
Expand Down
24 changes: 10 additions & 14 deletions common.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,13 @@ import (
"fmt"
"os"
"strings"
"time"

ocpmetadata "github.com/cloud-bulldozer/go-commons/ocp-metadata"
"github.com/kube-burner/kube-burner/pkg/config"
"github.com/kube-burner/kube-burner/pkg/workloads"
"github.com/spf13/cobra"
)

const clusterMetadataMetric = "clusterMetadata"

func setMetrics(cmd *cobra.Command, metricsProfile string) {
var metricsProfiles []string
Expand All @@ -52,26 +50,24 @@ func GatherMetadata(wh *workloads.WorkloadHelper, alerting bool) error {
return err
}
// When either indexing or alerting are enabled
if alerting && wh.MetricsEndpoint == "" {
wh.PrometheusURL, wh.PrometheusToken, err = wh.MetadataAgent.GetPrometheus()
if alerting && wh.Config.MetricsEndpoint == "" {
wh.Config.PrometheusURL, wh.Config.PrometheusToken, err = wh.MetadataAgent.GetPrometheus()
if err != nil {
return fmt.Errorf("error obtaining Prometheus information: %v", err)
}
}
wh.Metadata.ClusterMetadata, err = wh.MetadataAgent.GetClusterMetadata()
wh.ClusterMetadata, err = wh.MetadataAgent.GetClusterMetadata()
if err != nil {
return err
}
wh.Metadata.UUID = wh.UUID
wh.Metadata.Timestamp = time.Now().UTC()
wh.Metadata.MetricName = clusterMetadataMetric
wh.Config.UUID = wh.UUID
wh.MetricsMetadata = map[string]interface{}{
"platform": wh.Metadata.Platform,
"ocpVersion": wh.Metadata.OCPVersion,
"ocpMajorVersion": wh.Metadata.OCPMajorVersion,
"k8sVersion": wh.Metadata.K8SVersion,
"totalNodes": wh.Metadata.TotalNodes,
"sdnType": wh.Metadata.SDNType,
"platform": wh.ClusterMetadata.Platform,
"ocpVersion": wh.ClusterMetadata.OCPVersion,
"ocpMajorVersion": wh.ClusterMetadata.OCPMajorVersion,
"k8sVersion": wh.ClusterMetadata.K8SVersion,
"totalNodes": wh.ClusterMetadata.TotalNodes,
"sdnType": wh.ClusterMetadata.SDNType,
}
return nil
}
1 change: 0 additions & 1 deletion crd-scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ func NewCrdScale(wh *workloads.WorkloadHelper) *cobra.Command {
Short: "Runs crd-scale workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
},
Run: func(cmd *cobra.Command, args []string) {
Expand Down
3 changes: 0 additions & 3 deletions custom-workload.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@ func CustomWorkload(wh *workloads.WorkloadHelper) *cobra.Command {
cmd := &cobra.Command{
Use: "init",
Short: "Runs custom workload",
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = benchmarkName
},
Run: func(cmd *cobra.Command, args []string) {
if _, err := os.Stat(configFile); err != nil {
log.Fatalf("Error reading custom configuration file: %v", err.Error())
Expand Down
1 change: 0 additions & 1 deletion egressip.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ func NewEgressIP(wh *workloads.WorkloadHelper, variant string) *cobra.Command {
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
os.Setenv("POD_READY_THRESHOLD", fmt.Sprintf("%v", podReadyThreshold))
os.Setenv("ADDRESSES_PER_ITERATION", fmt.Sprint(addressesPerIteration))
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,12 @@ go 1.22.0
toolchain go1.22.3

require (
github.com/cloud-bulldozer/go-commons v1.0.15
github.com/cloud-bulldozer/go-commons v1.0.16
github.com/google/uuid v1.6.0
github.com/kube-burner/kube-burner v1.9.8
github.com/kube-burner/kube-burner v1.10.3
github.com/openshift/api v0.0.0-20240516090725-a20192e21ba6
github.com/openshift/client-go v0.0.0-20240510131258-f646d5f29250
github.com/praserx/ipconv v1.2.1
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cobra v1.8.0
k8s.io/apimachinery v0.30.1
Expand Down Expand Up @@ -50,7 +51,6 @@ require (
github.com/opensearch-project/opensearch-go v1.1.0 // indirect
github.com/openshift/custom-resource-status v1.1.2 // indirect
github.com/pborman/uuid v1.2.0 // indirect
github.com/praserx/ipconv v1.2.1 // indirect
github.com/prometheus/client_golang v1.19.0 // indirect
github.com/prometheus/client_model v0.6.0 // indirect
github.com/prometheus/common v0.52.3 // indirect
Expand Down
8 changes: 4 additions & 4 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloud-bulldozer/go-commons v1.0.15 h1:tqd73a18d+ES96x1oX3To+bJjHnXFuvag9QtcLvSGd0=
github.com/cloud-bulldozer/go-commons v1.0.15/go.mod h1:dUXxFH2mosY5OYY+cFPS3XvCekUTZRtMPuK/ni8Azq8=
github.com/cloud-bulldozer/go-commons v1.0.16 h1:eVjOyFl7RVH3oQni0ssNT6Zjl7iovG6km11YZM1H8Ww=
github.com/cloud-bulldozer/go-commons v1.0.16/go.mod h1:GF5G/9qiKJqUYIsqPgW5gFXzeSYjZn0tTScj7E/6ka4=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
Expand Down Expand Up @@ -354,8 +354,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kube-burner/kube-burner v1.9.8 h1:sFiVWNXppdVdJtNb2TZbUPoYbAueqSXSxUeI3/k0ySI=
github.com/kube-burner/kube-burner v1.9.8/go.mod h1:44exUdvnBXNbjfNn6hhVbnfFWcPtHiC171giwIL2uXY=
github.com/kube-burner/kube-burner v1.10.3 h1:+mnZTt9ldM8MaPCYJWw8ySt94rIAq2Jl9JSeFAUXlfI=
github.com/kube-burner/kube-burner v1.10.3/go.mod h1:SrXPMLVcgHreF9WzjfakZ5i0Njg3p/2Im6IwFkMxFLQ=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
Expand Down
36 changes: 29 additions & 7 deletions index.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,15 @@ package ocp
import (
"os"
"strings"
"encoding/json"
"time"
"fmt"

"github.com/cloud-bulldozer/go-commons/indexers"
"github.com/cloud-bulldozer/go-commons/version"
ocpmetadata "github.com/cloud-bulldozer/go-commons/ocp-metadata"
"github.com/kube-burner/kube-burner/pkg/config"
"github.com/kube-burner/kube-burner/pkg/burner"
"github.com/kube-burner/kube-burner/pkg/prometheus"
"github.com/kube-burner/kube-burner/pkg/util/metrics"
"github.com/kube-burner/kube-burner/pkg/workloads"
Expand All @@ -40,6 +44,7 @@ func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobr
var prometheusURL, prometheusToken string
var tarballName string
var indexer config.MetricsEndpoint
var clusterMetadataMap map[string]interface{}
cmd := &cobra.Command{
Use: "index",
Short: "Runs index sub-command",
Expand Down Expand Up @@ -92,13 +97,11 @@ func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobr
}
}

metadata := map[string]interface{}{
"platform": clusterMetadata.Platform,
"ocpVersion": clusterMetadata.OCPVersion,
"ocpMajorVersion": clusterMetadata.OCPMajorVersion,
"k8sVersion": clusterMetadata.K8SVersion,
"totalNodes": clusterMetadata.TotalNodes,
"sdnType": clusterMetadata.SDNType,
metadata := make(map[string]interface{})
jsonData, _ := json.Marshal(clusterMetadata)
json.Unmarshal(jsonData, &clusterMetadataMap)
for k, v := range clusterMetadataMap {
metadata[k] = v
}
workloads.ConfigSpec.MetricsEndpoints = append(workloads.ConfigSpec.MetricsEndpoints, indexer)
metricsScraper := metrics.ProcessMetricsScraperConfig(metrics.ScraperConfig{
Expand All @@ -124,6 +127,25 @@ func NewIndex(metricsEndpoint *string, ocpMetaAgent *ocpmetadata.Metadata) *cobr
log.Fatal(err)
}
}
var indexerValue indexers.Indexer
for _, value := range metricsScraper.IndexerList {
indexerValue = value
break
}
jobSummary := burner.JobSummary{
Timestamp: time.Unix(start, 0).UTC(),
EndTimestamp: time.Unix(end, 0).UTC(),
ElapsedTime: time.Unix(end, 0).UTC().Sub(time.Unix(start, 0).UTC()).Round(time.Second).Seconds(),
UUID: uuid,
JobConfig: config.Job{
Name: jobName,
},
Metadata: metricsScraper.Metadata,
MetricName: "jobSummary",
Version: fmt.Sprintf("%v@%v", version.Version, version.GitCommit),
Passed: rc == 0,
}
burner.IndexJobSummary([]burner.JobSummary{jobSummary}, indexerValue)
},
}
cmd.Flags().StringVarP(&metricsProfile, "metrics-profile", "m", "metrics.yml", "comma-separated list of metric profiles")
Expand Down
1 change: 0 additions & 1 deletion networkpolicy.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ func NewNetworkPolicy(wh *workloads.WorkloadHelper, variant string) *cobra.Comma
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
os.Setenv("CHURN", fmt.Sprint(churn))
os.Setenv("CHURN_CYCLES", fmt.Sprintf("%v", churnCycles))
Expand Down
3 changes: 1 addition & 2 deletions node-density-cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,7 @@ func NewNodeDensityCNI(wh *workloads.WorkloadHelper) *cobra.Command {
Short: "Runs node-density-cni workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
totalPods := wh.Metadata.WorkerNodesCount * podsPerNode
totalPods := wh.ClusterMetadata.WorkerNodesCount * podsPerNode
podCount, err := wh.MetadataAgent.GetCurrentPodCount()
if err != nil {
log.Fatal(err)
Expand Down
3 changes: 1 addition & 2 deletions node-density-heavy.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,7 @@ func NewNodeDensityHeavy(wh *workloads.WorkloadHelper) *cobra.Command {
Short: "Runs node-density-heavy workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
totalPods := wh.Metadata.WorkerNodesCount * podsPerNode
totalPods := wh.ClusterMetadata.WorkerNodesCount * podsPerNode
podCount, err := wh.MetadataAgent.GetCurrentPodCount()
if err != nil {
log.Fatal(err)
Expand Down
3 changes: 1 addition & 2 deletions node-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ func NewNodeDensity(wh *workloads.WorkloadHelper) *cobra.Command {
Short: "Runs node-density workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
totalPods := wh.Metadata.WorkerNodesCount * podsPerNode
totalPods := wh.ClusterMetadata.WorkerNodesCount * podsPerNode
podCount, err := wh.MetadataAgent.GetCurrentPodCount()
if err != nil {
log.Fatal(err.Error())
Expand Down
1 change: 0 additions & 1 deletion pvc-density.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ func NewPVCDensity(wh *workloads.WorkloadHelper) *cobra.Command {
Short: "Runs pvc-density workload",
SilenceUsage: true,
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("JOB_ITERATIONS", fmt.Sprint(iterations))
os.Setenv("CONTAINER_IMAGE", containerImage)
os.Setenv("CLAIM_SIZE", fmt.Sprint(claimSize))
Expand Down
14 changes: 7 additions & 7 deletions test/test-ocp.bats
Original file line number Diff line number Diff line change
Expand Up @@ -31,32 +31,32 @@ teardown_file() {

@test "custom-workload as node-density" {
run_cmd kube-burner-ocp init --config=custom-workload.yml ${COMMON_FLAGS} --metrics-endpoint metrics-endpoints.yaml
check_metric_value clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "node-density: es-indexing=true" {
run_cmd kube-burner-ocp node-density --pods-per-node=75 --pod-ready-threshold=10s ${COMMON_FLAGS}
check_metric_value etcdVersion clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
check_metric_value etcdVersion jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "node-density-heavy: gc-metrics=true; local-indexing=true" {
run_cmd kube-burner-ocp node-density-heavy --pods-per-node=75 --uuid=abcd --local-indexing --gc-metrics=true
check_file_list collected-metrics-abcd/etcdVersion.json collected-metrics-abcd/clusterMetadata.json collected-metrics-abcd/jobSummary-node-density-heavy.json collected-metrics-abcd/jobSummary-garbage-collection.json collected-metrics-abcd/podLatencyMeasurement-node-density-heavy.json collected-metrics-abcd/podLatencyQuantilesMeasurement-node-density-heavy.json
check_file_list collected-metrics-abcd/etcdVersion.json collected-metrics-abcd/jobSummary.json collected-metrics-abcd/podLatencyMeasurement-node-density-heavy.json collected-metrics-abcd/podLatencyQuantilesMeasurement-node-density-heavy.json
}

@test "cluster-density-ms: metrics-endpoint=true; es-indexing=true" {
run_cmd kube-burner-ocp cluster-density-ms --iterations=1 --churn=false --metrics-endpoint metrics-endpoints.yaml ${COMMON_FLAGS}
check_metric_value clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "cluster-density-v2: profile-type=both; user-metadata=true; es-indexing=true; churning=true; svcLatency=true" {
run_cmd kube-burner-ocp cluster-density-v2 --iterations=5 --churn-duration=1m --churn-delay=5s --profile-type=both ${COMMON_FLAGS} --user-metadata=user-metadata.yml --service-latency
check_metric_value cpu-kubelet clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement svcLatencyMeasurement svcLatencyQuantilesMeasurement etcdVersion
check_metric_value cpu-kubelet jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement svcLatencyMeasurement svcLatencyQuantilesMeasurement etcdVersion
}

@test "cluster-density-v2: churn-deletion-strategy=gvr" {
run_cmd kube-burner-ocp cluster-density-v2 --iterations=2 --churn=true --churn-duration=1m --churn-delay=10s --churn-deletion-strategy=gvr ${COMMON_FLAGS}
check_metric_value etcdVersion clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
check_metric_value etcdVersion jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "cluster-density-v2: indexing=false; churning=false" {
Expand Down Expand Up @@ -90,7 +90,7 @@ teardown_file() {
@test "pvc-density" {
# Since 'aws' is the chosen storage provisioner, this will only execute successfully if the ocp environment is aws
run_cmd kube-burner-ocp pvc-density --iterations=2 --provisioner=aws ${COMMON_FLAGS}
check_metric_value clusterMetadata jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
check_metric_value jobSummary podLatencyMeasurement podLatencyQuantilesMeasurement
}

@test "web-burner-node-density" {
Expand Down
1 change: 0 additions & 1 deletion web-burner.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ func NewWebBurner(wh *workloads.WorkloadHelper, variant string) *cobra.Command {
Use: variant,
Short: fmt.Sprintf("Runs %v workload", variant),
PreRun: func(cmd *cobra.Command, args []string) {
wh.Metadata.Benchmark = cmd.Name()
os.Setenv("BFD", fmt.Sprint(bfd))
os.Setenv("BRIDGE", fmt.Sprint(bridge))
os.Setenv("CRD", fmt.Sprintf("%v", crd))
Expand Down

0 comments on commit e2ec5cc

Please sign in to comment.