diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 38b3100fd6b..57d58cb3e34 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -52,6 +52,7 @@ func main() { opts := &pipeline.Options{} flag.StringVar(&opts.Images.EntrypointImage, "entrypoint-image", "", "The container image containing our entrypoint binary.") + flag.StringVar(&opts.Images.SidecarLogResultsImage, "sidecarlogresults-image", "", "The container image containing the binary for accessing results.") flag.StringVar(&opts.Images.NopImage, "nop-image", "", "The container image used to stop sidecars") flag.StringVar(&opts.Images.GitImage, "git-image", "", "The container image containing our Git binary.") flag.StringVar(&opts.Images.KubeconfigWriterImage, "kubeconfig-writer-image", "", "The container image containing our kubeconfig writer binary.") diff --git a/cmd/entrypoint/main.go b/cmd/entrypoint/main.go index f138c46e2e6..6eb851dabd5 100644 --- a/cmd/entrypoint/main.go +++ b/cmd/entrypoint/main.go @@ -52,9 +52,10 @@ var ( breakpointOnFailure = flag.Bool("breakpoint_on_failure", false, "If specified, expect steps to not skip on failure") onError = flag.String("on_error", "", "Set to \"continue\" to ignore an error and continue when a container terminates with a non-zero exit code."+ " Set to \"stopAndFail\" to declare a failure with a step error and stop executing the rest of the steps.") - stepMetadataDir = flag.String("step_metadata_dir", "", "If specified, create directory to store the step metadata e.g. /tekton/steps//") - enableSpire = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification") - socketPath = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.") + stepMetadataDir = flag.String("step_metadata_dir", "", "If specified, create directory to store the step metadata e.g. /tekton/steps//") + enableSpire = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification") + socketPath = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.") + dontSendResultsToTerminationPath = flag.Bool("dont_send_results_to_termination_path", false, "If specified, dont send results to the termination path.") ) const ( @@ -154,13 +155,14 @@ func main() { stdoutPath: *stdoutPath, stderrPath: *stderrPath, }, - PostWriter: &realPostWriter{}, - Results: strings.Split(*results, ","), - Timeout: timeout, - BreakpointOnFailure: *breakpointOnFailure, - OnError: *onError, - StepMetadataDir: *stepMetadataDir, - SpireWorkloadAPI: spireWorkloadAPI, + PostWriter: &realPostWriter{}, + Results: strings.Split(*results, ","), + Timeout: timeout, + BreakpointOnFailure: *breakpointOnFailure, + OnError: *onError, + StepMetadataDir: *stepMetadataDir, + SpireWorkloadAPI: spireWorkloadAPI, + DontSendResultsToTerminationPath: *dontSendResultsToTerminationPath, } // Copy any creds injected by the controller into the $HOME directory of the current diff --git a/cmd/sidecarlogresults/main.go b/cmd/sidecarlogresults/main.go new file mode 100644 index 00000000000..b2bef7b5ca2 --- /dev/null +++ b/cmd/sidecarlogresults/main.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "flag" + "log" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/sidecarlogresults" +) + +func main() { + var resultsDir string + var resultNames string + flag.StringVar(&resultsDir, "results-dir", pipeline.DefaultResultPath, "Path to the results directory. Default is /tekton/results") + flag.StringVar(&resultNames, "result-names", "", "comma separated result names to expect from the steps running in the pod. eg. foo,bar,baz") + flag.Parse() + if resultNames == "" { + log.Fatal("result-names were not provided") + } + err := sidecarlogresults.LookForResults(resultsDir, resultNames) + if err != nil { + log.Fatal(err) + } +} diff --git a/config/controller.yaml b/config/controller.yaml index 4883d1c4865..174b6f1d2c8 100644 --- a/config/controller.yaml +++ b/config/controller.yaml @@ -68,6 +68,7 @@ spec: "-kubeconfig-writer-image", "ko://github.com/tektoncd/pipeline/cmd/kubeconfigwriter", "-git-image", "ko://github.com/tektoncd/pipeline/cmd/git-init", "-entrypoint-image", "ko://github.com/tektoncd/pipeline/cmd/entrypoint", + "-sidecarlogresults-image", "ko://github.com/tektoncd/pipeline/cmd/sidecarlogresults", "-nop-image", "ko://github.com/tektoncd/pipeline/cmd/nop", "-imagedigest-exporter-image", "ko://github.com/tektoncd/pipeline/cmd/imagedigestexporter", "-pr-image", "ko://github.com/tektoncd/pipeline/cmd/pullrequest-init", diff --git a/config/enable-log-access-to-controller/clusterrole.yaml b/config/enable-log-access-to-controller/clusterrole.yaml new file mode 100644 index 00000000000..5fa2818f5a2 --- /dev/null +++ b/config/enable-log-access-to-controller/clusterrole.yaml @@ -0,0 +1,13 @@ +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller-pod-log-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + - apiGroups: [""] + # Controller needs to get the logs of the results sidecar created by TaskRuns to extract results. + resources: ["pods/log"] + verbs: ["get"] diff --git a/config/enable-log-access-to-controller/clusterrolebinding.yaml b/config/enable-log-access-to-controller/clusterrolebinding.yaml new file mode 100644 index 00000000000..1b63980d177 --- /dev/null +++ b/config/enable-log-access-to-controller/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller-pod-log-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-controller-pod-log-access + apiGroup: rbac.authorization.k8s.io diff --git a/docs/install.md b/docs/install.md index 8bf025cf8f6..2138f98abd9 100644 --- a/docs/install.md +++ b/docs/install.md @@ -24,6 +24,7 @@ This guide explains how to install Tekton Pipelines. It covers the following top - [Customizing the Pipelines Controller behavior](#customizing-the-pipelines-controller-behavior) - [Alpha Features](#alpha-features) - [Beta Features](#beta-features) +- [Enabling larger results using sidecar logs](#enabling-larger-results-using-sidecar-logs) - [Configuring High Availability](#configuring-high-availability) - [Configuring tekton pipeline controller performance](#configuring-tekton-pipeline-controller-performance) - [Creating a custom release of Tekton Pipelines](#creating-a-custom-release-of-tekton-pipelines) @@ -421,6 +422,7 @@ features](#alpha-features) to be used. do both. For more information, see [Configuring usage of `TaskRun` and `Run` embedded statuses](pipelineruns.md#configuring-usage-of-taskrun-and-run-embedded-statuses). - `resource-verification-mode`: Setting this flag to "enforce" will enforce verification of tasks/pipeline. Failing to verify will fail the taskrun/pipelinerun. "warn" will only log the err message and "skip" will skip the whole verification. +- `enable-sidecar-logs-results`: Set this flag to "true" to enable use of a results sidecar logs to extract results larger than the size of the termination message. While termination message restrics the combined size of results to 4K per pod, enabling this feature will allow 1K per result (as many results as required). For example: @@ -470,6 +472,55 @@ the `feature-flags` ConfigMap alongside your Tekton Pipelines deployment via For beta versions of Tekton CRDs, setting `enable-api-fields` to "beta" is the same as setting it to "stable". +## Enabling larger results using sidecar logs + +**Note**: The maximum size of a Task's results is limited by the container termination message feature of Kubernetes, as results are passed back to the controller via this mechanism. At present, the limit is “4096 bytes”. + +To exceed this limit of 4096 bytes, you can enable larger results using sidecar logs. By enabling this feature, you will have a limit of 1024 bytes per result with no restriction on the number of results. + +**Note**: to enable this feature, you need to grant `get` access to all `pods/log` to the `Tekton pipeline controller`. This means that the tekton pipeline controller has the ability to access the pod logs. + +1. Create a cluster role by applying the following spec. + +```yaml +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller-pod-log-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +rules: + - apiGroups: [""] + # Controller needs to get the logs of the results sidecar created by TaskRuns to extract results. + resources: ["pods/log"] + verbs: ["get"] +``` + +2. Create a cluster role binding by applying the folowing spec. + +```yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: tekton-pipelines-controller-pod-log-access + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines +subjects: + - kind: ServiceAccount + name: tekton-pipelines-controller + namespace: tekton-pipelines +roleRef: + kind: ClusterRole + name: tekton-pipelines-controller-pod-log-access + apiGroup: rbac.authorization.k8s.io +``` + +3. Enable the feature flag to use sidecar logs by setting `enable-sidecar-logs-results: "true"` in the [configMap](#customizing-the-pipelines-controller-behavior). + ## Configuring High Availability If you want to run Tekton Pipelines in a way so that webhooks are resiliant against failures and support diff --git a/docs/tasks.md b/docs/tasks.md index 3782ed7cc67..810f683219c 100644 --- a/docs/tasks.md +++ b/docs/tasks.md @@ -23,6 +23,7 @@ weight: 200 - [Specifying `Resources`](#specifying-resources) - [Specifying `Workspaces`](#specifying-workspaces) - [Emitting `Results`](#emitting-results) + - [Larger `Results` using sidecar logs](#larger-results-using-sidecar-logs) - [Specifying `Volumes`](#specifying-volumes) - [Specifying a `Step` template](#specifying-a-step-template) - [Specifying `Sidecars`](#specifying-sidecars) @@ -835,7 +836,7 @@ This also means that the number of Steps in a Task affects the maximum size of a as each Step is implemented as a container in the TaskRun's pod. The more containers we have in our pod, *the smaller the allowed size of each container's message*, meaning that the **more steps you have in a Task, the smaller the result for each step can be**. -For example, if you have 10 steps, the size of each step's Result will have a maximum of less than 1KB*. +For example, if you have 10 steps, the size of each step's Result will have a maximum of less than 1KB. If your `Task` writes a large number of small results, you can work around this limitation by writing each result from a separate `Step` so that each `Step` has its own termination message. @@ -847,6 +848,15 @@ available size will less than 4096 bytes. As a general rule-of-thumb, if a result needs to be larger than a kilobyte, you should likely use a [`Workspace`](#specifying-workspaces) to store and pass it between `Tasks` within a `Pipeline`. +#### Larger `Results` using sidecar logs + +This is an experimental feature. The `enable-sidecar-logs-results` feature flag must be set to `"true"`](./install.md#enabling-larger-results-using-sidecar-logs) + +Instead of using termination messages to store results, the taskrun controller injects a sidecar container which monitors the results of all the steps. The sidecar mounts the volume where results of all the steps are stored. As soon as it finds a new result, it logs it to std out. The controller has access to the logs of the sidecar container (Caution: we need you to enable access to [kubernetes pod/logs](./install.md#enabling-larger-results-using-sidecar-logs). + +**Note**: This feature allows users to store up to `1 KB per result`. Because we are not limited by the size of the termination messages, users can have as many results as they require where each result can be up to 1 KB in size. If the size of a result exceeds 1KB, then the TaskRun will be placed into a failed state with the following message: `Result exceeded the maximum allowed limit of 1024 bytes.` + + ### Specifying `Volumes` Specifies one or more [`Volumes`](https://kubernetes.io/docs/concepts/storage/volumes/) that the `Steps` in your diff --git a/examples/v1beta1/pipelineruns/4808-regression.yaml b/examples/v1beta1/pipelineruns/4808-regression.yaml index df4502a8a88..e7de89f8499 100644 --- a/examples/v1beta1/pipelineruns/4808-regression.yaml +++ b/examples/v1beta1/pipelineruns/4808-regression.yaml @@ -92,4 +92,4 @@ spec: name: result-test params: - name: RESULT_STRING_LENGTH - value: "3000" + value: "1000" diff --git a/examples/v1beta1/pipelineruns/alpha/pipelinerun-large-results.yaml b/examples/v1beta1/pipelineruns/alpha/pipelinerun-large-results.yaml new file mode 100644 index 00000000000..274ca739255 --- /dev/null +++ b/examples/v1beta1/pipelineruns/alpha/pipelinerun-large-results.yaml @@ -0,0 +1,41 @@ +apiVersion: tekton.dev/v1beta1 +kind: Task +metadata: + name: large-result-task +spec: + results: + - name: result1 + - name: result2 + - name: result3 + - name: result4 + - name: result5 + steps: + - name: step1 + image: alpine + script: | + cat /dev/urandom | head -c 750 | base64 | tee $(results.result1.path); + cat /dev/urandom | head -c 750 | base64 | tee $(results.result2.path); + cat /dev/urandom | head -c 750 | base64 | tee $(results.result3.path); + cat /dev/urandom | head -c 750 | base64 | tee $(results.result4.path); + cat /dev/urandom | head -c 750 | base64 | tee $(results.result5.path); +--- +apiVersion: tekton.dev/v1beta1 +kind: Pipeline +metadata: + name: large-result-pipeline +spec: + tasks: + - name: large-task + taskRef: + name: large-result-task + results: + - name: large-result + value: $(tasks.large-task.results.result1) +--- +apiVersion: tekton.dev/v1beta1 +kind: PipelineRun +metadata: + name: large-result-pipeline-run +spec: + pipelineRef: + name: large-result-pipeline diff --git a/examples/v1beta1/taskruns/alpha/large-task-result.yaml b/examples/v1beta1/taskruns/alpha/large-task-result.yaml new file mode 100644 index 00000000000..8c2899beab2 --- /dev/null +++ b/examples/v1beta1/taskruns/alpha/large-task-result.yaml @@ -0,0 +1,28 @@ +apiVersion: tekton.dev/v1beta1 +kind: TaskRun +metadata: + generateName: larger-results- +spec: + taskSpec: + description: | + A task that creates results > termination message limit of 4K per pod! + results: + - name: result1 + - name: result2 + - name: result3 + - name: result4 + - name: result5 + steps: + - name: step1 + image: bash:latest + script: | + #!/usr/bin/env bash + cat /dev/urandom | head -c 750 | base64 | tee /tekton/results/result1 #about 1 K result + cat /dev/urandom | head -c 750 | base64 | tee /tekton/results/result2 #about 1 K result + - name: step2 + image: bash:latest + script: | + #!/usr/bin/env bash + cat /dev/urandom | head -c 750 | base64 | tee /tekton/results/result3 #about 1 K result + cat /dev/urandom | head -c 750 | base64 | tee /tekton/results/result4 #about 1 K result + cat /dev/urandom | head -c 750 | base64 | tee /tekton/results/result5 #about 1 K result diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 4707280092f..0d3239676dd 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -74,6 +74,8 @@ const ( DefaultEnableSpire = false // DefaultResourceVerificationMode is the default value for "resource-verification-mode". DefaultResourceVerificationMode = SkipResourceVerificationMode + // DefaultSidecarLogsResults is the default value for "enable-larger-results". + DefaultSidecarLogsResults = false disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" @@ -87,6 +89,7 @@ const ( embeddedStatus = "embedded-status" enableSpire = "enable-spire" verificationMode = "resource-verification-mode" + enableSidecarLogsResults = "enable-sidecar-logs-results" ) // FeatureFlags holds the features configurations @@ -105,6 +108,7 @@ type FeatureFlags struct { EmbeddedStatus string EnableSpire bool ResourceVerificationMode string + EnableSidecarLogsResults bool } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -159,6 +163,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setResourceVerificationMode(cfgMap, DefaultResourceVerificationMode, &tc.ResourceVerificationMode); err != nil { return nil, err } + if err := setFeature(enableSidecarLogsResults, DefaultSidecarLogsResults, &tc.EnableSidecarLogsResults); err != nil { + return nil, err + } // Given that they are alpha features, Tekton Bundles and Custom Tasks should be switched on if // enable-api-fields is "alpha". If enable-api-fields is not "alpha" then fall back to the value of diff --git a/pkg/apis/pipeline/images.go b/pkg/apis/pipeline/images.go index e40ebfe5639..8c74410e8bc 100644 --- a/pkg/apis/pipeline/images.go +++ b/pkg/apis/pipeline/images.go @@ -26,6 +26,8 @@ import ( type Images struct { // EntrypointImage is container image containing our entrypoint binary. EntrypointImage string + // SidecarLogResultsImage is container image containing the binary that fetches results from the steps and logs it to stdout. + SidecarLogResultsImage string // NopImage is the container image used to kill sidecars. NopImage string // GitImage is the container image with Git that we use to implement the Git source step. @@ -55,6 +57,7 @@ func (i Images) Validate() error { v, name string }{ {i.EntrypointImage, "entrypoint-image"}, + {i.SidecarLogResultsImage, "sidecarlogresults-image"}, {i.NopImage, "nop-image"}, {i.GitImage, "git-image"}, {i.KubeconfigWriterImage, "kubeconfig-writer-image"}, diff --git a/pkg/apis/pipeline/images_test.go b/pkg/apis/pipeline/images_test.go index 8f66cf31985..87222aa1d2e 100644 --- a/pkg/apis/pipeline/images_test.go +++ b/pkg/apis/pipeline/images_test.go @@ -9,6 +9,7 @@ import ( func TestValidate(t *testing.T) { valid := pipeline.Images{ EntrypointImage: "set", + SidecarLogResultsImage: "set", NopImage: "set", GitImage: "set", KubeconfigWriterImage: "set", @@ -25,6 +26,7 @@ func TestValidate(t *testing.T) { invalid := pipeline.Images{ EntrypointImage: "set", + SidecarLogResultsImage: "set", NopImage: "set", GitImage: "", // unset! KubeconfigWriterImage: "set", diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types.go b/pkg/apis/pipeline/v1beta1/taskrun_types.go index 76b3dee6474..1b23e57777b 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -181,6 +181,8 @@ const ( TaskRunReasonsResultsVerificationFailed TaskRunReason = "TaskRunResultsVerificationFailed" // AwaitingTaskRunResults is the reason set when waiting upon `TaskRun` results and signatures to verify AwaitingTaskRunResults TaskRunReason = "AwaitingTaskRunResults" + // TaskRunReasonResultLargerThanAllowedLimit is the reason set when one of the results exceeds its maximum allowed limit of 1 KB + TaskRunReasonResultLargerThanAllowedLimit TaskRunReason = "TaskRunResultLargerThanAllowedLimit" ) func (t TaskRunReason) String() string { diff --git a/pkg/entrypoint/entrypointer.go b/pkg/entrypoint/entrypointer.go index 5fc8c59f429..f5d1ea5a7cf 100644 --- a/pkg/entrypoint/entrypointer.go +++ b/pkg/entrypoint/entrypointer.go @@ -85,6 +85,8 @@ type Entrypointer struct { SpireWorkloadAPI spire.EntrypointerAPIClient // ResultsDirectory is the directory to find results, defaults to pipeline.DefaultResultPath ResultsDirectory string + // Dont Send results to the termination path + DontSendResultsToTerminationPath bool } // Waiter encapsulates waiting for files to exist. @@ -189,7 +191,7 @@ func (e Entrypointer) Go() error { // strings.Split(..) with an empty string returns an array that contains one element, an empty string. // This creates an error when trying to open the result folder as a file. - if len(e.Results) >= 1 && e.Results[0] != "" { + if !e.DontSendResultsToTerminationPath && len(e.Results) >= 1 && e.Results[0] != "" { resultPath := pipeline.DefaultResultPath if e.ResultsDirectory != "" { resultPath = e.ResultsDirectory diff --git a/pkg/pod/entrypoint_test.go b/pkg/pod/entrypoint_test.go index a18c8838e27..9b2795041e2 100644 --- a/pkg/pod/entrypoint_test.go +++ b/pkg/pod/entrypoint_test.go @@ -102,6 +102,74 @@ func TestOrderContainers(t *testing.T) { } } +func TestOrderContainersWithResultsSidecarLogs(t *testing.T) { + steps := []corev1.Container{{ + Image: "step-1", + Command: []string{"cmd"}, + Args: []string{"arg1", "arg2"}, + }, { + Image: "step-2", + Command: []string{"cmd1", "cmd2", "cmd3"}, // multiple cmd elements + Args: []string{"arg1", "arg2"}, + VolumeMounts: []corev1.VolumeMount{volumeMount}, // pre-existing volumeMount + }, { + Image: "step-3", + Command: []string{"cmd"}, + Args: []string{"arg1", "arg2"}, + }} + want := []corev1.Container{{ + Image: "step-1", + Command: []string{entrypointBinary}, + Args: []string{ + "-wait_file", "/tekton/downward/ready", + "-wait_file_content", + "-post_file", "/tekton/run/0/out", + "-termination_path", "/tekton/termination", + "-step_metadata_dir", "/tekton/run/0/status", + "-dont_send_results_to_termination_path", + "-entrypoint", "cmd", "--", + "arg1", "arg2", + }, + VolumeMounts: []corev1.VolumeMount{downwardMount}, + TerminationMessagePath: "/tekton/termination", + }, { + Image: "step-2", + Command: []string{entrypointBinary}, + Args: []string{ + "-wait_file", "/tekton/run/0/out", + "-post_file", "/tekton/run/1/out", + "-termination_path", "/tekton/termination", + "-step_metadata_dir", "/tekton/run/1/status", + "-dont_send_results_to_termination_path", + "-entrypoint", "cmd1", "--", + "cmd2", "cmd3", + "arg1", "arg2", + }, + VolumeMounts: []corev1.VolumeMount{volumeMount}, + TerminationMessagePath: "/tekton/termination", + }, { + Image: "step-3", + Command: []string{entrypointBinary}, + Args: []string{ + "-wait_file", "/tekton/run/1/out", + "-post_file", "/tekton/run/2/out", + "-termination_path", "/tekton/termination", + "-step_metadata_dir", "/tekton/run/2/status", + "-dont_send_results_to_termination_path", + "-entrypoint", "cmd", "--", + "arg1", "arg2", + }, + TerminationMessagePath: "/tekton/termination", + }} + got, err := orderContainers([]string{"-dont_send_results_to_termination_path"}, steps, nil, nil, true) + if err != nil { + t.Fatalf("orderContainers: %v", err) + } + if d := cmp.Diff(want, got); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } +} + func TestOrderContainersWithNoWait(t *testing.T) { steps := []corev1.Container{{ Image: "step-1", diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 30bf9d8fa1c..9828213c16e 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -119,6 +119,7 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec implicitEnvVars := []corev1.EnvVar{} featureFlags := config.FromContextOrDefaults(ctx).FeatureFlags alphaAPIEnabled := featureFlags.EnableAPIFields == config.AlphaAPIFields + sidecarLogsResultsEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSidecarLogsResults // Add our implicit volumes first, so they can be overridden by the user if they prefer. volumes = append(volumes, implicitVolumes...) @@ -127,10 +128,12 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec // Create Volumes and VolumeMounts for any credentials found in annotated // Secrets, along with any arguments needed by Step entrypoints to process // those secrets. + commonExtraEntrypointArgs := []string{} credEntrypointArgs, credVolumes, credVolumeMounts, err := credsInit(ctx, taskRun.Spec.ServiceAccountName, taskRun.Namespace, b.KubeClient) if err != nil { return nil, err } + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...) volumes = append(volumes, credVolumes...) volumeMounts = append(volumeMounts, credVolumeMounts...) @@ -147,6 +150,12 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec if alphaAPIEnabled && taskRun.Spec.ComputeResources != nil { tasklevel.ApplyTaskLevelComputeResources(steps, taskRun.Spec.ComputeResources) } + if sidecarLogsResultsEnabled { + // create a results sidecar + resultsSidecar := createResultsSidecar(taskSpec, b.Images.SidecarLogResultsImage) + taskSpec.Sidecars = append(taskSpec.Sidecars, resultsSidecar) + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-dont_send_results_to_termination_path") + } sidecars, err := v1beta1.MergeSidecarsWithOverrides(taskSpec.Sidecars, taskRun.Spec.SidecarOverrides) if err != nil { return nil, err @@ -192,9 +201,9 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars) if alphaAPIEnabled { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) } else { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) } if err != nil { return nil, err @@ -259,6 +268,25 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec stepContainers[i].VolumeMounts = vms } + if sidecarLogsResultsEnabled { + // Mount implicit volumes onto sidecarContainers + // so that they can access /tekton/results. + for i, s := range sidecarContainers { + requestedVolumeMounts := map[string]bool{} + for _, vm := range s.VolumeMounts { + requestedVolumeMounts[filepath.Clean(vm.MountPath)] = true + } + var toAdd []corev1.VolumeMount + for _, imp := range volumeMounts { + if !requestedVolumeMounts[filepath.Clean(imp.MountPath)] { + toAdd = append(toAdd, imp) + } + } + vms := append(s.VolumeMounts, toAdd...) //nolint + sidecarContainers[i].VolumeMounts = vms + } + } + // This loop: // - sets container name to add "step-" prefix or "step-unnamed-#" if not specified. // TODO(#1605): Remove this loop and make each transformation in @@ -435,3 +463,21 @@ func entrypointInitContainer(image string, steps []v1beta1.Step) corev1.Containe } return prepareInitContainer } + +// createResultsSidecar creates a sidecar that will run the sidecarlogresults binary. +func createResultsSidecar(taskSpec v1beta1.TaskSpec, image string) v1beta1.Sidecar { + resultsStr := "" + comma := "" + for i, tres := range taskSpec.Results { + if i > 0 { + comma = "," + } + resultsStr = fmt.Sprintf("%s%s%s", resultsStr, comma, tres.Name) + } + command := []string{"/ko-app/sidecarlogresults", "-results-dir", pipeline.DefaultResultPath, "-result-names", resultsStr} + return v1beta1.Sidecar{ + Name: "results", + Image: image, + Command: command, + } +} diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index ee9bcc31e71..b80a1bc9578 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -38,6 +38,7 @@ import ( "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" fakek8s "k8s.io/client-go/kubernetes/fake" @@ -1786,6 +1787,74 @@ _EOF_ }}, ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, + }, { + desc: "sidecar logs enabled", + featureFlags: map[string]string{"enable-sidecar-logs-results": "true"}, + ts: v1beta1.TaskSpec{ + Results: []v1beta1.TaskResult{{ + Name: "foo", + Type: v1beta1.ResultsTypeString, + }}, + Steps: []v1beta1.Step{{ + Name: "name", + Image: "image", + Command: []string{"cmd"}, // avoid entrypoint lookup. + }}, + }, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: []corev1.Container{ + entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), + }, + Containers: []corev1.Container{{ + Name: "step-name", + Image: "image", + Command: []string{"/tekton/bin/entrypoint"}, + Args: []string{ + "-wait_file", + "/tekton/downward/ready", + "-wait_file_content", + "-post_file", + "/tekton/run/0/out", + "-termination_path", + "/tekton/termination", + "-step_metadata_dir", + "/tekton/run/0/status", + "-dont_send_results_to_termination_path", + "-results", + "foo", + "-entrypoint", + "cmd", + "--", + }, + VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { + Name: "tekton-creds-init-home-0", + MountPath: "/tekton/creds", + }}, implicitVolumeMounts...), + TerminationMessagePath: "/tekton/termination", + }, { + Name: "sidecar-results", + Image: "", + Command: []string{ + "/ko-app/sidecarlogresults", + "-results-dir", + "/tekton/results", + "-result-names", + "foo", + }, + Resources: corev1.ResourceRequirements{ + Requests: nil, + }, + VolumeMounts: append([]v1.VolumeMount{ + {Name: "tekton-internal-bin", ReadOnly: true, MountPath: "/tekton/bin"}, + }, implicitVolumeMounts...), + }}, + Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ + Name: "tekton-creds-init-home-0", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }), + ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, + }, }} { t.Run(c.desc, func(t *testing.T) { names.TestingSeed() diff --git a/pkg/pod/status.go b/pkg/pod/status.go index 0a0894981eb..cdc232fe62e 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -17,18 +17,25 @@ limitations under the License. package pod import ( + "bufio" + "context" "encoding/json" + "errors" "fmt" + "io" "strconv" "strings" "time" "github.com/hashicorp/go-multierror" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "knative.dev/pkg/apis" ) @@ -74,10 +81,16 @@ const ( // timeFormat is RFC3339 with millisecond timeFormat = "2006-01-02T15:04:05.000Z07:00" + + // maxResultLimit is the Max allowed size of the Result : Setting to 1.0K + maxResultLimit = 1024 ) const oomKilled = "OOMKilled" +// ErrorReasonMaxResultSizeExceeded indicates that the result exceeded its maximum allowed size +var ErrorReasonMaxResultSizeExceeded = fmt.Errorf("%s", "MaxResultSizeExceeded") + // SidecarsReady returns true if all of the Pod's sidecars are Ready or // Terminated. func SidecarsReady(podStatus corev1.PodStatus) bool { @@ -104,7 +117,7 @@ func SidecarsReady(podStatus corev1.PodStatus) bool { } // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. -func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) { +func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) { trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running @@ -136,7 +149,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev } var merr *multierror.Error - if err := setTaskRunStatusBasedOnStepStatus(logger, stepStatuses, &tr); err != nil { + if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr); err != nil { merr = multierror.Append(merr, err) } @@ -147,15 +160,98 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev return *trs, merr.ErrorOrNil() } -func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error { +func getResultsFromSidecarLogs(ctx context.Context, clientset kubernetes.Interface, namespace string, name string, container string, stepResults []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + podLogOpts := corev1.PodLogOptions{Container: container} + req := clientset.CoreV1().Pods(namespace).GetLogs(name, &podLogOpts) + sidecarLogs, err := req.Stream(ctx) + if err != nil { + podInitializingError := fmt.Sprintf(`container "%v" in pod "%v" is waiting to start: PodInitializing`, container, name) + if err.Error() == podInitializingError { + return stepResults, nil + } + return stepResults, err + } + defer sidecarLogs.Close() + stepResults, err = extractResultsFromLogs(sidecarLogs, stepResults) + return stepResults, err +} + +func extractResultsFromLogs(logs io.Reader, stepResults []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + scanner := bufio.NewScanner(logs) + buf := make([]byte, 2) + scanner.Buffer(buf, 2*maxResultLimit) + resStr := "" + for scanner.Scan() { + text := scanner.Text() + if text != "---" { + resStr += text + } else { + result, err := parseResults(resStr) + if err != nil { + return stepResults, err + } + stepResults = append(stepResults, result) + resStr = "" + } + } + + if err := scanner.Err(); err != nil { + if errors.Is(err, bufio.ErrTooLong) { + return stepResults, ErrorReasonMaxResultSizeExceeded + } + return stepResults, err + } + return stepResults, nil +} + +func parseResults(resultStr string) (v1beta1.PipelineResourceResult, error) { + kv := strings.SplitN(resultStr, " ", 2) + result := v1beta1.PipelineResourceResult{} + if len(kv) == 2 { + key := kv[0] + value := kv[1] + if len(value) > maxResultLimit { + return result, ErrorReasonMaxResultSizeExceeded + } + result = v1beta1.PipelineResourceResult{ + Key: key, + Value: value, + ResultType: v1beta1.TaskRunResultType, + } + return result, nil + } + return result, fmt.Errorf("Invalid result %v", resultStr) +} + +func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error { trs := &tr.Status var merr *multierror.Error + stepResults := []v1beta1.PipelineResourceResult{} + sidecarLogsResultsEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSidecarLogsResults + if sidecarLogsResultsEnabled { + // creates the clientset + config, err := rest.InClusterConfig() + if err != nil { + merr = multierror.Append(merr, err) + } + if config != nil { + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + merr = multierror.Append(merr, err) + } + stepResults, err = getResultsFromSidecarLogs(ctx, clientset, tr.Namespace, tr.Status.PodName, "sidecar-results", stepResults) + if err != nil { + merr = multierror.Append(merr, err) + } + } + } for _, s := range stepStatuses { if s.State.Terminated != nil && len(s.State.Terminated.Message) != 0 { msg := s.State.Terminated.Message results, err := termination.ParseMessage(logger, msg) + results = append(results, stepResults...) if err != nil { logger.Errorf("termination message could not be parsed as JSON: %v", err) merr = multierror.Append(merr, err) @@ -175,12 +271,14 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) } - msg, err = createMessageFromResults(filteredResults) - if err != nil { - logger.Errorf("%v", err) - merr = multierror.Append(merr, err) - } else { - s.State.Terminated.Message = msg + if !sidecarLogsResultsEnabled { + msg, err = createMessageFromResults(filteredResults) + if err != nil { + logger.Errorf("%v", err) + merr = multierror.Append(merr, err) + } else { + s.State.Terminated.Message = msg + } } if time != nil { s.State.Terminated.StartedAt = *time diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index abb4842badc..870fca8d5a7 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "strings" "testing" "time" @@ -63,7 +64,8 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234` + strings.Repeat("a", 3072) + `","resourceName":"source-image"}]`, }, }, - }}, + }, + }, }} { t.Run(c.desc, func(t *testing.T) { startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) @@ -80,7 +82,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - merr := setTaskRunStatusBasedOnStepStatus(logger, c.ContainerStatuses, &tr) + merr := setTaskRunStatusBasedOnStepStatus(context.Background(), logger, c.ContainerStatuses, &tr) if merr != nil { t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr) } @@ -1061,7 +1063,7 @@ func TestMakeTaskRunStatus(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1275,7 +1277,7 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1396,7 +1398,7 @@ func TestMakeRunStatusJSONError(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - gotTr, err := MakeTaskRunStatus(logger, tr, pod) + gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod) if err == nil { t.Error("Expected error, got nil") } @@ -1660,3 +1662,64 @@ func TestSortPodContainerStatuses(t *testing.T) { } } } + +func TestExtractResultsFromLogs(t *testing.T) { + logs := strings.NewReader(` +result1 foo +--- +result2 bar +--- +`) + + results, err := extractResultsFromLogs(logs, []v1beta1.PipelineResourceResult{}) + if err != nil { + t.Error(err) + } + want := []v1beta1.PipelineResourceResult{ + { + Key: "result1", + Value: "foo", + ResultType: 1, + }, { + Key: "result2", + Value: "bar", + ResultType: 1, + }, + } + if d := cmp.Diff(want, results); d != "" { + t.Error(diff.PrintWantGot(d)) + } +} + +func TestParseResults(t *testing.T) { + podLogs := []string{ + `result1 foo`, + `result2 {"IMAGE_URL":"ar.com", "IMAGE_DIGEST":"sha234"}`, + `result3 ["hello","world"]`, + } + want := []v1beta1.PipelineResourceResult{{ + Key: "result1", + Value: "foo", + ResultType: 1, + }, { + Key: "result2", + Value: `{"IMAGE_URL":"ar.com", "IMAGE_DIGEST":"sha234"}`, + ResultType: 1, + }, { + Key: "result3", + Value: `["hello","world"]`, + ResultType: 1, + }} + stepResults := []v1beta1.PipelineResourceResult{} + for _, log := range podLogs { + res, err := parseResults(log) + if err != nil { + t.Error(err) + } + stepResults = append(stepResults, res) + } + if d := cmp.Diff(want, stepResults); d != "" { + t.Error(diff.PrintWantGot(d)) + } + +} diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index acde4d95d39..ec15abda8ad 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -185,6 +185,11 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // updates regardless of whether the reconciliation errored out. if err = c.reconcile(ctx, tr, rtr); err != nil { logger.Errorf("Reconcile: %v", err.Error()) + if errors.Is(err, podconvert.ErrorReasonMaxResultSizeExceeded) { + message := fmt.Sprintf("TaskRun %q failed to finish because atleast one of its results exceeded the max allowed limit of 1K.", tr.Name) + err := c.failTaskRun(ctx, tr, v1beta1.TaskRunReasonResultLargerThanAllowedLimit, message) + return c.finishReconcileUpdateEmitEvents(ctx, tr, before, err) + } } // Emit events (only when ConditionSucceeded was changed) @@ -249,9 +254,8 @@ func (c *Reconciler) stopSidecars(ctx context.Context, tr *v1beta1.TaskRun) erro if tr.Status.PodName == "" { return nil } - // do not continue if the TaskSpec had no sidecars - if tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Sidecars) == 0 { + if tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Sidecars) == 0 && len(tr.Status.Sidecars) == 0 { return nil } @@ -540,7 +544,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } // Convert the Pod's status to the equivalent TaskRun Status. - tr.Status, err = podconvert.MakeTaskRunStatus(logger, *tr, pod) + tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod) if err != nil { return err } diff --git a/pkg/sidecarlogresults/sidecarlogresults.go b/pkg/sidecarlogresults/sidecarlogresults.go new file mode 100644 index 00000000000..8a76df5c76b --- /dev/null +++ b/pkg/sidecarlogresults/sidecarlogresults.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sidecarlogresults + +import ( + "fmt" + "os" + "reflect" + "strings" + "time" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// SidecarLogResult holds fields for storing extracted results +type SidecarLogResult struct { + Name string + Value string +} + +func (result *SidecarLogResult) isValid() bool { + if len(result.Value) > 1024 { + return false + } + return true +} + +func (result *SidecarLogResult) log() { + fmt.Printf("%v %v", result.Name, result.Value) + fmt.Printf("\n---\n") +} + +// LookForResults waits for results to be written out by the steps +// in their results path and prints them in a structured way to its +// stdout so that the reconciler can parse those logs. +func LookForResults(resultsDir string, resultNames string) error { + expectedResults := sets.NewString() + for _, s := range strings.Split(resultNames, ",") { + expectedResults.Insert(s) + } + resultsFetched := sets.NewString() + results := map[string]string{} + resultParsed := map[string]bool{} + + for true { + f, err := os.Open(resultsDir) + if err != nil { + return err + } + files, err := f.Readdir(0) + if err != nil { + return err + } + defer f.Close() + for _, v := range files { + if _, ok := resultParsed[v.Name()]; !ok { + resultParsed[v.Name()] = false + } + if expectedResults.Has(v.Name()) && !resultsFetched.Has(v.Name()) { + value, err := os.ReadFile(fmt.Sprintf("%v/%v", resultsDir, v.Name())) + if err != nil { + return err + } + newResult := SidecarLogResult{ + Name: v.Name(), + Value: string(value), + } + if val, ok := results[newResult.Name]; ok { + if val == newResult.Value { + // value is not changing anymore. + resultParsed[newResult.Name] = true + } + } + results[newResult.Name] = newResult.Value + + if !newResult.isValid() { + return fmt.Errorf("The value for result %v is larger than the maximum allowed limit of 1024 bytes", v.Name()) + } + if resultParsed[newResult.Name] == true { + newResult.log() + resultsFetched.Insert(v.Name()) + } + } + } + time.Sleep(10 * time.Millisecond) + if reflect.DeepEqual(resultsFetched, expectedResults) { + break + } + } + return nil +} diff --git a/pkg/sidecarlogresults/sidecarlogresults_test.go b/pkg/sidecarlogresults/sidecarlogresults_test.go new file mode 100644 index 00000000000..d8a558c8b9a --- /dev/null +++ b/pkg/sidecarlogresults/sidecarlogresults_test.go @@ -0,0 +1,73 @@ +package sidecarlogresults + +import ( + "fmt" + "log" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/test/diff" +) + +func TestLookForResults(t *testing.T) { + for _, c := range []struct { + desc string + resultName string + resultValue string + generateResultDir bool + generateResult bool + want string + }{{ + desc: "missing results directory", + resultName: "foo", + resultValue: "bar", + generateResultDir: false, + generateResult: false, + want: "open does not exist: no such file or directory", + }, { + desc: "result too large", + resultName: "foo", + resultValue: strings.Repeat("a", 1025), + generateResultDir: true, + generateResult: true, + want: "The value for result foo is larger than the maximum allowed limit of 1024 bytes", + }, { + desc: "good result", + resultName: "foo", + resultValue: "bar", + generateResultDir: true, + generateResult: true, + want: "", + }} { + t.Run(c.desc, func(t *testing.T) { + dir := "does not exist" + if c.generateResultDir { + dir = t.TempDir() + if c.generateResult { + createResult(dir, c.resultName, c.resultValue) + } + } + err := LookForResults(dir, c.resultName) + log.Println(c.want, err) + if c.want == "" { + if err != nil { + t.Errorf("Did not expect any error but got: %v", err) + } + } else { + if d := cmp.Diff(c.want, err.Error()); d != "" { + t.Errorf("SidecarLogResult error diff %s", diff.PrintWantGot(d)) + } + } + }) + } +} + +func createResult(dir string, resultName string, resultValue string) { + resultFile := fmt.Sprintf("%v/%v", dir, resultName) + err := os.WriteFile(resultFile, []byte(resultValue), 0660) + if err != nil { + log.Fatal(err) + } +} diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 5ef47f77e81..646c7092a85 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -29,6 +29,7 @@ function install_pipeline_crd() { verify_pipeline_installation verify_resolvers_installation + verify_log_access_enabled export SYSTEM_NAMESPACE=tekton-pipelines } @@ -62,6 +63,19 @@ function verify_resolvers_installation() { wait_until_pods_running tekton-pipelines-resolvers || fail_test "Tekton Pipeline Resolvers did not come up" } +function verify_log_access_enabled() { + var=$(kubectl get clusterroles | grep tekton-pipelines-controller-pod-log-access) + if [ -z "$var" ] + then + fail_test "Failed to create clusterrole granting pod/logs access to the tekton controller." + fi + var=$(kubectl get clusterrolebindings | grep tekton-pipelines-controller-pod-log-access) + if [ -z "$var" ] + then + fail_test "Failed to create clusterrole binding granting pod/logs access to the tekton controller." + fi +} + function uninstall_pipeline_crd() { echo ">> Uninstalling Tekton Pipelines" ko delete --ignore-not-found=true -R -f config/ diff --git a/test/e2e-tests-kind-prow-alpha.env b/test/e2e-tests-kind-prow-alpha.env index eacb0ef5eb6..00f55752feb 100644 --- a/test/e2e-tests-kind-prow-alpha.env +++ b/test/e2e-tests-kind-prow-alpha.env @@ -4,3 +4,4 @@ EMBEDDED_STATUS_GATE=minimal RUN_YAML_TESTS=true KO_DOCKER_REPO=registry.local:5000 E2E_GO_TEST_TIMEOUT=40m +SIDECAR_LOGS_RESULTS=true diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 44c1c2ed94b..5203099771a 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -27,6 +27,8 @@ SKIP_INITIALIZE=${SKIP_INITIALIZE:="false"} RUN_YAML_TESTS=${RUN_YAML_TESTS:="true"} SKIP_GO_E2E_TESTS=${SKIP_GO_E2E_TESTS:="false"} E2E_GO_TEST_TIMEOUT=${E2E_GO_TEST_TIMEOUT:="20m"} +SIDECAR_LOGS_RESULTS=${SIDECAR_LOGS_RESULTS:="false"} + failed=0 # Script entry point. @@ -75,6 +77,18 @@ function set_embedded_status() { kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch" } +function set_sidecar_logs_results() { + local enable="$1" + if [ "$enable" != "true" ] && [ "$enable" != "false" ] ; then + printf "Invalid value for enable-sidecar-logs-results %s\n" ${enable} + exit 255 + fi + printf "Setting enable-sidecar-logs-results to %s\n", ${enable} + jsonpatch=$(printf "{\"data\": {\"enable-sidecar-logs-results\": \"%s\"}}" $1) + echo "feature-flags ConfigMap patch: ${jsonpatch}" + kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch" +} + function run_e2e() { # Run the integration tests header "Running Go e2e tests" @@ -93,6 +107,7 @@ function run_e2e() { set_feature_gate "$PIPELINE_FEATURE_GATE" set_embedded_status "$EMBEDDED_STATUS_GATE" +set_sidecar_logs_results "$SIDECAR_LOGS_RESULTS" run_e2e (( failed )) && fail_test