diff --git a/.golangci.yml b/.golangci.yml index cd89b614a7..695d2b979a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -105,6 +105,7 @@ linters-settings: - golang.zx2c4.com/wireguard - golang.zx2c4.com/wireguard/wgctrl - cloud.google.com/go + - kernel.org/pub/linux/libs/security/libcap/cap # fd-leak related replacements: https://github.com/siderolabs/talos/issues/9412 - github.com/insomniacslk/dhcp - github.com/safchain/ethtool diff --git a/Dockerfile b/Dockerfile index 6081963314..eacac4c0ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -658,8 +658,6 @@ RUN < github.com/unix4ever/yaml v0.0.0-20220527175918-f17b0f05cf2c + + // improved error logging + kernel.org/pub/linux/libs/security/libcap/cap => github.com/dsseng/go-libcap/cap v0.0.0-20241015195416-c3ab072bd718 ) // fd-leak related replacements: https://github.com/siderolabs/talos/issues/9412 diff --git a/go.sum b/go.sum index 63d07601fc..bfed2449da 100644 --- a/go.sum +++ b/go.sum @@ -186,6 +186,8 @@ github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dsseng/go-libcap/cap v0.0.0-20241015195416-c3ab072bd718 h1:0rpLmrwXjXf7ySDkCDOmzlKFi8w2d1fhQ7Z9BCXXIQU= +github.com/dsseng/go-libcap/cap v0.0.0-20241015195416-c3ab072bd718/go.mod h1:/iBwcj9nbLejQitYvUm9caurITQ6WyNHibJk6Q9fiS4= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ecks/uefi v0.0.0-20221116212947-caef65d070eb h1:LZBZtPpqHDydudNAs2sHmo4Zp9bxEyxHdGCk3Fr6tv8= @@ -1065,8 +1067,6 @@ k8s.io/pod-security-admission v0.31.1 h1:j++ISpfQU0mWpKhoS4tY06Wm5EKdn65teL4lPJh k8s.io/pod-security-admission v0.31.1/go.mod h1:0aE5T6MGm/50Nr/diBrC6+wwpxsT2E7NECe+TepUuEg= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -kernel.org/pub/linux/libs/security/libcap/cap v1.2.70 h1:QnLPkuDWWbD5C+3DUA2IUXai5TK6w2zff+MAGccqdsw= -kernel.org/pub/linux/libs/security/libcap/cap v1.2.70/go.mod h1:/iBwcj9nbLejQitYvUm9caurITQ6WyNHibJk6Q9fiS4= kernel.org/pub/linux/libs/security/libcap/psx v1.2.70 h1:HsB2G/rEQiYyo1bGoQqHZ/Bvd6x1rERQTNdPr1FyWjI= kernel.org/pub/linux/libs/security/libcap/psx v1.2.70/go.mod h1:+l6Ee2F59XiJ2I6WR5ObpC1utCQJZ/VLsEbQCD8RG24= rsc.io/qr v0.2.0 h1:6vBLea5/NRMVTz8V66gipeLycZMl/+UlFmk8DvqQ6WY= diff --git a/internal/app/machined/main.go b/internal/app/machined/main.go index f6c8ff2af9..eba4c9c72a 100644 --- a/internal/app/machined/main.go +++ b/internal/app/machined/main.go @@ -35,7 +35,6 @@ import ( "github.com/siderolabs/talos/internal/app/maintenance" "github.com/siderolabs/talos/internal/app/poweroff" "github.com/siderolabs/talos/internal/app/trustd" - "github.com/siderolabs/talos/internal/app/wrapperd" "github.com/siderolabs/talos/internal/pkg/mount" "github.com/siderolabs/talos/pkg/httpdefaults" "github.com/siderolabs/talos/pkg/machinery/api/common" @@ -313,10 +312,6 @@ func main() { case "poweroff", "shutdown": poweroff.Main(os.Args) - return - case "wrapperd": - wrapperd.Main() - return case "dashboard": dashboard.Main() diff --git a/internal/app/machined/pkg/system/runner/process/process.go b/internal/app/machined/pkg/system/runner/process/process.go index 47fc3bfea6..2f68520b91 100644 --- a/internal/app/machined/pkg/system/runner/process/process.go +++ b/internal/app/machined/pkg/system/runner/process/process.go @@ -8,12 +8,19 @@ import ( "fmt" "io" "os" - "os/exec" + "slices" "strings" "syscall" "time" + "github.com/containerd/cgroups/v3" + "github.com/containerd/cgroups/v3/cgroup1" + "github.com/containerd/cgroups/v3/cgroup2" + "github.com/containerd/containerd/v2/pkg/sys" + "github.com/siderolabs/gen/optional" + "github.com/siderolabs/gen/xslices" "github.com/siderolabs/go-cmd/pkg/cmd/proc/reaper" + "kernel.org/pub/linux/libs/security/libcap/cap" "github.com/siderolabs/talos/internal/app/machined/pkg/system/events" "github.com/siderolabs/talos/internal/app/machined/pkg/system/runner" @@ -78,27 +85,85 @@ func (p *processRunner) Close() error { } type commandWrapper struct { - cmd *exec.Cmd + launcher *cap.Launcher + ctty optional.Optional[int] + stdin uintptr + stdout uintptr + stderr uintptr afterStart func() afterTermination func() error } +func dropCaps(droppedCapabilities []string, launcher *cap.Launcher) error { + droppedCaps := strings.Join(droppedCapabilities, ",") + + if droppedCaps != "" { + caps := strings.Split(droppedCaps, ",") + dropCaps := xslices.Map(caps, func(c string) cap.Value { + capability, capErr := cap.FromName(c) + if capErr != nil { + fmt.Printf("failed to parse capability: %s", capErr) + } + + return capability + }) + + iab := cap.IABGetProc() + if err := iab.SetVector(cap.Bound, true, dropCaps...); err != nil { + return fmt.Errorf("failed to set capabilities: %w", err) + } + + launcher.SetIAB(iab) + } + + return nil +} + +// This callback is run in the thread before executing child process. +func beforeExecCallback(pa *syscall.ProcAttr, data interface{}) error { + wrapper, ok := data.(*commandWrapper) + if !ok { + return fmt.Errorf("failed to get command info") + } + + ctty, cttySet := wrapper.ctty.Get() + if cttySet { + if pa.Sys == nil { + pa.Sys = &syscall.SysProcAttr{} + } + + pa.Sys.Ctty = ctty + pa.Sys.Setsid = true + pa.Sys.Setctty = true + } + + pa.Files = []uintptr{ + wrapper.stdin, + wrapper.stdout, + wrapper.stderr, + } + + // TODO: use pa.Sys.CgroupFD here when we can be sure clone3 is available + return nil +} + //nolint:gocyclo func (p *processRunner) build() (commandWrapper, error) { - args := []string{ - fmt.Sprintf("-name=%s", p.args.ID), - fmt.Sprintf("-dropped-caps=%s", strings.Join(p.opts.DroppedCapabilities, ",")), - fmt.Sprintf("-cgroup-path=%s", cgroup.Path(p.opts.CgroupPath)), - fmt.Sprintf("-oom-score=%d", p.opts.OOMScoreAdj), - fmt.Sprintf("-uid=%d", p.opts.UID), - } + wrapper := commandWrapper{} + + env := slices.Concat([]string{"PATH=" + constants.PATH}, p.opts.Env, os.Environ()) + launcher := cap.NewLauncher(p.args.ProcessArgs[0], p.args.ProcessArgs, env) - args = append(args, p.args.ProcessArgs...) + if p.opts.UID > 0 { + launcher.SetUID(int(p.opts.UID)) + } - cmd := exec.Command("/sbin/wrapperd", args...) + // reduce capabilities and assign them to launcher + if err := dropCaps(p.opts.DroppedCapabilities, launcher); err != nil { + return commandWrapper{}, err + } - // Set the environment for the service. - cmd.Env = append([]string{fmt.Sprintf("PATH=%s", constants.PATH)}, p.opts.Env...) + launcher.Callback(beforeExecCallback) // Setup logging. w, err := p.opts.LoggingManager.ServiceLog(p.args.ID).Writer() @@ -113,12 +178,40 @@ func (p *processRunner) build() (commandWrapper, error) { writer = w } + // As MultiWriter is not a file, we need to create a pipe + // Pipe writer is passed to the child process while we read from the read side + pr, pw, err := os.Pipe() + if err != nil { + return commandWrapper{}, err + } + + go io.Copy(writer, pr) //nolint:errcheck + // close the writer if we exit early due to an error closeWriter := true + closeLogging := func() (e error) { + err := w.Close() + if err != nil { + e = err + } + + err = pr.Close() + if err != nil { + e = err + } + + err = pw.Close() + if err != nil { + e = err + } + + return e + } + defer func() { if closeWriter { - w.Close() //nolint:errcheck + closeLogging() //nolint:errcheck } }() @@ -130,7 +223,7 @@ func (p *processRunner) build() (commandWrapper, error) { return commandWrapper{}, err } - cmd.Stdin = stdin + wrapper.stdin = stdin.Fd() afterStartFuncs = append(afterStartFuncs, func() { stdin.Close() //nolint:errcheck @@ -143,13 +236,13 @@ func (p *processRunner) build() (commandWrapper, error) { return commandWrapper{}, err } - cmd.Stdout = stdout + wrapper.stdout = stdout.Fd() afterStartFuncs = append(afterStartFuncs, func() { stdout.Close() //nolint:errcheck }) } else { - cmd.Stdout = writer + wrapper.stdout = pw.Fd() } if p.opts.StderrFile != "" { @@ -158,37 +251,60 @@ func (p *processRunner) build() (commandWrapper, error) { return commandWrapper{}, err } - cmd.Stderr = stderr + wrapper.stderr = stderr.Fd() afterStartFuncs = append(afterStartFuncs, func() { stderr.Close() //nolint:errcheck }) } else { - cmd.Stderr = writer + wrapper.stderr = pw.Fd() } - ctty, cttySet := p.opts.Ctty.Get() - if cttySet { - cmd.SysProcAttr = &syscall.SysProcAttr{ - Setsid: true, - Setctty: true, - Ctty: ctty, + closeWriter = false + + wrapper.launcher = launcher + wrapper.afterStart = func() { + for _, f := range afterStartFuncs { + f() } } + wrapper.afterTermination = closeLogging + wrapper.ctty = p.opts.Ctty - closeWriter = false + return wrapper, nil +} - return commandWrapper{ - cmd: cmd, - afterStart: func() { - for _, f := range afterStartFuncs { - f() - } - }, - afterTermination: func() error { - return w.Close() - }, - }, nil +// Apply cgroup and OOM score after the process is launched. +func applyProperties(p *processRunner, pid int) error { + path := cgroup.Path(p.opts.CgroupPath) + + if cgroups.Mode() == cgroups.Unified { + cgv2, err := cgroup2.Load(path) + if err != nil { + return fmt.Errorf("failed to load cgroup %s: %w", path, err) + } + + if err := cgv2.AddProc(uint64(pid)); err != nil { + return fmt.Errorf("failed to move process %s to cgroup: %w", p, err) + } + } else { + cgv1, err := cgroup1.Load(cgroup1.StaticPath(path)) + if err != nil { + return fmt.Errorf("failed to load cgroup %s: %w", path, err) + } + + if err := cgv1.Add(cgroup1.Process{ + Pid: pid, + }); err != nil { + return fmt.Errorf("failed to move process %s to cgroup: %w", p, err) + } + } + + if err := sys.AdjustOOMScore(pid, p.opts.OOMScoreAdj); err != nil { + return fmt.Errorf("failed to change OOMScoreAdj of process %s to %d: %w", p, p.opts.OOMScoreAdj, err) + } + + return nil } func (p *processRunner) run(eventSink events.Recorder) error { @@ -206,20 +322,29 @@ func (p *processRunner) run(eventSink events.Recorder) error { defer reaper.Stop(notifyCh) } - err = cmdWrapper.cmd.Start() + pid, err := cmdWrapper.launcher.Launch(&cmdWrapper) + if err != nil { + return fmt.Errorf("error starting process: %w", err) + } + + if err := applyProperties(p, pid); err != nil { + return err + } cmdWrapper.afterStart() + eventSink(events.StateRunning, "Process %s started with PID %d", p, pid) + + process, err := os.FindProcess(pid) if err != nil { - return fmt.Errorf("error starting process: %w", err) + return fmt.Errorf("could not find process: %w", err) } - eventSink(events.StateRunning, "Process %s started with PID %d", p, cmdWrapper.cmd.Process.Pid) - waitCh := make(chan error) go func() { - waitCh <- reaper.WaitWrapper(usingReaper, notifyCh, cmdWrapper.cmd) + _, err := process.Wait() + waitCh <- err }() select { @@ -231,7 +356,7 @@ func (p *processRunner) run(eventSink events.Recorder) error { eventSink(events.StateStopping, "Sending SIGTERM to %s", p) //nolint:errcheck - _ = cmdWrapper.cmd.Process.Signal(syscall.SIGTERM) + _ = process.Signal(syscall.SIGTERM) } select { @@ -243,7 +368,7 @@ func (p *processRunner) run(eventSink events.Recorder) error { eventSink(events.StateStopping, "Sending SIGKILL to %s", p) //nolint:errcheck - _ = cmdWrapper.cmd.Process.Signal(syscall.SIGKILL) + _ = process.Signal(syscall.SIGKILL) } // wait for process to terminate diff --git a/internal/app/machined/pkg/system/runner/process/process_test.go b/internal/app/machined/pkg/system/runner/process/process_test.go index 3ebb38a34c..dfcc90ff5a 100644 --- a/internal/app/machined/pkg/system/runner/process/process_test.go +++ b/internal/app/machined/pkg/system/runner/process/process_test.go @@ -224,6 +224,7 @@ func TestProcessSuite(t *testing.T) { t.Skip("wrapperd not found") } + // What's the purpose of this test? Should it be replaced for new subprocess start method? for _, runReaper := range []bool{true, false} { func(runReaper bool) { t.Run(fmt.Sprintf("runReaper=%v", runReaper), func(t *testing.T) { suite.Run(t, &ProcessSuite{runReaper: runReaper}) }) diff --git a/internal/app/machined/pkg/system/services/dashboard.go b/internal/app/machined/pkg/system/services/dashboard.go index 0f5100a6d8..696c31785e 100644 --- a/internal/app/machined/pkg/system/services/dashboard.go +++ b/internal/app/machined/pkg/system/services/dashboard.go @@ -66,7 +66,7 @@ func (d *Dashboard) Runner(r runtime.Runtime) (runner.Runner, error) { }), runner.WithStdinFile(tty), runner.WithStdoutFile(tty), - runner.WithCtty(1), + runner.WithCtty(0), runner.WithOOMScoreAdj(-400), runner.WithDroppedCapabilities(capability.AllCapabilitiesSetLowercase()), runner.WithCgroupPath(constants.CgroupDashboard), diff --git a/internal/app/wrapperd/main.go b/internal/app/wrapperd/main.go deleted file mode 100644 index f2b64fb958..0000000000 --- a/internal/app/wrapperd/main.go +++ /dev/null @@ -1,113 +0,0 @@ -// This Source Code Form is subject to the terms of the Mozilla Public -// License, v. 2.0. If a copy of the MPL was not distributed with this -// file, You can obtain one at http://mozilla.org/MPL/2.0/. - -// Package wrapperd provides a wrapper for running services. -package wrapperd - -import ( - "flag" - "log" - "os" - "strings" - - "github.com/containerd/cgroups/v3" - "github.com/containerd/cgroups/v3/cgroup1" - "github.com/containerd/cgroups/v3/cgroup2" - "github.com/containerd/containerd/v2/pkg/sys" - "github.com/siderolabs/gen/xslices" - "golang.org/x/sys/unix" - "kernel.org/pub/linux/libs/security/libcap/cap" - - krnl "github.com/siderolabs/talos/pkg/kernel" - "github.com/siderolabs/talos/pkg/machinery/kernel" -) - -var ( - name string - droppedCaps string - cgroupPath string - oomScore int - uid int -) - -// Main is the entrypoint into /sbin/wrapperd. -// -//nolint:gocyclo -func Main() { - flag.StringVar(&name, "name", "", "process name") - flag.StringVar(&droppedCaps, "dropped-caps", "", "comma-separated list of capabilities to drop") - flag.StringVar(&cgroupPath, "cgroup-path", "", "cgroup path to use") - flag.IntVar(&oomScore, "oom-score", 0, "oom score to set") - flag.IntVar(&uid, "uid", 0, "uid to set for the process") - flag.Parse() - - currentPid := os.Getpid() - - if oomScore != 0 { - if err := sys.AdjustOOMScore(currentPid, oomScore); err != nil { - log.Fatalf("Failed to change OOMScoreAdj of process %s to %d", name, oomScore) - } - } - - // load the cgroup and put the process into the cgroup - if cgroupPath != "" { - if cgroups.Mode() == cgroups.Unified { - cgv2, err := cgroup2.Load(cgroupPath) - if err != nil { - log.Fatalf("failed to load cgroup %s: %v", cgroupPath, err) - } - - if err := cgv2.AddProc(uint64(currentPid)); err != nil { - log.Fatalf("Failed to move process %s to cgroup: %v", name, err) - } - } else { - cgv1, err := cgroup1.Load(cgroup1.StaticPath(cgroupPath)) - if err != nil { - log.Fatalf("failed to load cgroup %s: %v", cgroupPath, err) - } - - if err := cgv1.Add(cgroup1.Process{ - Pid: currentPid, - }); err != nil { - log.Fatalf("Failed to move process %s to cgroup: %v", name, err) - } - } - } - - prop, err := krnl.ReadParam(&kernel.Param{Key: "proc.sys.kernel.kexec_load_disabled"}) - if v := strings.TrimSpace(string(prop)); err == nil && v != "0" { - log.Printf("kernel.kexec_load_disabled is %v, skipping dropping capabilities", v) - } else if droppedCaps != "" { - caps := strings.Split(droppedCaps, ",") - dropCaps := xslices.Map(caps, func(c string) cap.Value { - capability, capErr := cap.FromName(c) - if capErr != nil { - log.Fatalf("failed to parse capability: %v", capErr) - } - - return capability - }) - - // drop capabilities - iab := cap.IABGetProc() - if err = iab.SetVector(cap.Bound, true, dropCaps...); err != nil { - log.Fatalf("failed to set capabilities: %v", err) - } - - if err = iab.SetProc(); err != nil { - log.Fatalf("failed to apply capabilities: %v", err) - } - } - - if uid > 0 { - err = unix.Setuid(uid) - if err != nil { - log.Fatalf("failed to setuid: %v", err) - } - } - - if err := unix.Exec(flag.Args()[0], flag.Args()[0:], os.Environ()); err != nil { - log.Fatalf("failed to exec: %v", err) - } -} diff --git a/internal/integration/api/process.go b/internal/integration/api/process.go new file mode 100644 index 0000000000..4f09a69887 --- /dev/null +++ b/internal/integration/api/process.go @@ -0,0 +1,173 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. + +//go:build integration_api + +package api + +import ( + "bytes" + "context" + "io" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/siderolabs/talos/internal/integration/base" + "github.com/siderolabs/talos/pkg/machinery/client" +) + +// ProcessSuite ... +type ProcessSuite struct { + base.APISuite + + ctx context.Context //nolint:containedctx + ctxCancel context.CancelFunc +} + +// SuiteName ... +func (suite *ProcessSuite) SuiteName() string { + return "api.ProcessSuite" +} + +// SetupTest ... +func (suite *ProcessSuite) SetupTest() { + suite.ctx, suite.ctxCancel = context.WithTimeout(context.Background(), 15*time.Second) + + if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU { + // TODO: should we test caps and cgroups in Docker? + suite.T().Skip("skipping process test since provisioner is not qemu") + } +} + +// TearDownTest ... +func (suite *ProcessSuite) TearDownTest() { + if suite.ctxCancel != nil { + suite.ctxCancel() + } +} + +func (suite *ProcessSuite) readProcfs(nodeCtx context.Context, pid int32, property string) string { + r, err := suite.Client.Read(nodeCtx, filepath.Join("/proc", strconv.Itoa(int(pid)), property)) + suite.Require().NoError(err) + + value, err := io.ReadAll(r) + suite.Require().NoError(err) + + suite.Require().NoError(r.Close()) + + return string(bytes.TrimSpace(value)) +} + +// TestProcessCapabilities reads capabilities of processes from procfs +// and validates system services get necessary capabilities dropped. +func (suite *ProcessSuite) TestProcessCapabilities() { + nodes := suite.DiscoverNodeInternalIPs(suite.ctx) + + for _, node := range nodes { + nodeCtx := client.WithNode(suite.ctx, node) + + r, err := suite.Client.Processes(nodeCtx) + suite.Require().NoError(err) + + found := 0 + + for _, msg := range r.Messages { + procs := msg.Processes + + for _, p := range procs { + switch p.Command { + case "systemd-udevd": + found++ + + // All but cap_sys_boot + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "CapPrm:\t000001ffffbfffff\nCapEff:\t000001ffffbfffff\nCapBnd:\t000001ffffbfffff", + ) + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "cgroup"), + "0::/system/udevd", + ) + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "environ"), + "XDG_RUNTIME_DIR=/run", + ) + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "Uid:\t0", + ) + case "dashboard": + found++ + + // None + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "CapPrm:\t0000000000000000\nCapEff:\t0000000000000000\nCapBnd:\t0000000000000000", + ) + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "cgroup"), + "0::/system/dashboard", + ) + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "oom_score_adj"), + "-400", + ) + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "environ"), + "TERM=linux", + ) + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "Uid:\t50", + ) + case "containerd": + found++ + + // All but cap_sys_boot, cap_sys_module + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "CapPrm:\t000001ffffbeffff\nCapEff:\t000001ffffbeffff\nCapBnd:\t000001ffffbeffff", + ) + + if strings.Contains(p.Args, "/system/run/containerd") { + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "cgroup"), + "0::/system/runtime", + ) + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "oom_score_adj"), + "-999", + ) + } else { + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "cgroup"), + "0::/podruntime/runtime", + ) + suite.Require().Equal( + suite.readProcfs(nodeCtx, p.Pid, "oom_score_adj"), + "-500", + ) + } + + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "environ"), + "XDG_RUNTIME_DIR=/run", + ) + suite.Require().Contains( + suite.readProcfs(nodeCtx, p.Pid, "status"), + "Uid:\t0", + ) + } + } + } + + suite.Require().Equal(4, found, "Not all processes found") + } +} + +func init() { + allSuites = append(allSuites, new(ProcessSuite)) +}