From 31f065740730665b02be4ab3e304fbd65d5809c8 Mon Sep 17 00:00:00 2001 From: Jens Elkner Date: Fri, 21 Mar 2025 04:46:32 +0100 Subject: [PATCH 1/2] process metrics linux: better performance + tests --- process_metrics_linux.go | 291 ++++++++++++++++++++++++-------- process_metrics_linux_test.go | 133 ++++++++++----- testdata/limits | 17 -- testdata/linux.fd_metrics.out | 6 + testdata/linux.proc_metrics.out | 60 +++++++ testdata/linux.ps_io | 57 +++++++ testdata/linux.ps_limits | 17 ++ testdata/linux.ps_stat | 1 + testdata/linux.ps_status | 57 +++++++ 9 files changed, 513 insertions(+), 126 deletions(-) delete mode 100644 testdata/limits create mode 100644 testdata/linux.fd_metrics.out create mode 100644 testdata/linux.proc_metrics.out create mode 100644 testdata/linux.ps_io create mode 100644 testdata/linux.ps_limits create mode 100644 testdata/linux.ps_stat create mode 100644 testdata/linux.ps_status diff --git a/process_metrics_linux.go b/process_metrics_linux.go index e4587b7..ede0fe6 100644 --- a/process_metrics_linux.go +++ b/process_metrics_linux.go @@ -4,13 +4,14 @@ import ( "bytes" "fmt" "io" - "io/ioutil" "log" + "math" "os" "strconv" "strings" - "sync/atomic" + "syscall" "time" + "unsafe" ) // See https://github.com/prometheus/procfs/blob/a4ac0826abceb44c40fc71daed2b301db498b93e/proc_stat.go#L40 . @@ -47,29 +48,163 @@ type procStat struct { Rss int } -func writeProcessMetrics(w io.Writer) { - statFilepath := "/proc/self/stat" - data, err := ioutil.ReadFile(statFilepath) +type ProcFd uint32 + +const ( + FD_LIMITS ProcFd = iota + FD_STAT + FD_IO + FD_MEM + FD_COUNT +) + +// Testfiles in the same order as above. +var testfiles = [FD_COUNT]string{ + "/linux.ps_limits", + "/linux.ps_stat", + "/linux.ps_io", + "/linux.ps_status", +} + +/* +process metrics related file descriptors for files we always need, and + + do not want to open/close all the time +*/ +var pm_fd [FD_COUNT]int + +/* +to avaid, that go closes the files in the background, which makes the FDs + + above useless, we need to keep the reference to them as well +*/ +var pm_file [FD_COUNT]*os.File + +/* path used to count open FDs */ +var fd_path string + +/* path to get fd limits */ +var limits_path string + +/* Max open files soft limit for this process */ +var maxOpenFDs float64 = 0 + +var STAT_START = 0 +var NO_OUTPUT = false + +func init2() { + var testdata_dir = "" + var onTest = len(os.Args) > 1 && strings.HasSuffix(os.Args[0], ".test") + if onTest { + cwd, err := os.Getwd() + if err != nil { + panic("Unknwon current working directory: " + err.Error()) + } + testdata_dir = cwd + "/testdata" + fmt.Printf("Using test data in %s ...\n", testdata_dir) + } + for i := 0; i < int(FD_COUNT); i++ { + pm_fd[i] = -1 + } + if onTest { + fd_path = testdata_dir + "/fd" + limits_path = testdata_dir + testfiles[FD_LIMITS] + } else { + fd_path = "/proc/self/fd" + limits_path = "/proc/self/limits" + } + maxOpenFDs = float64(getMaxFilesLimit()) + + // files to keep open + var path string + if onTest { + path = testdata_dir + testfiles[FD_STAT] + } else { + path = "/proc/self/stat" + } + f, err := os.OpenFile(path, os.O_RDONLY, 0) if err != nil { - log.Printf("ERROR: metrics: cannot open %s: %s", statFilepath, err) - return + log.Printf("WARN: Unable to open %s (%v).", path, err) + } else { + // pid and "comm" field do not change over this process lifetime, so lets + // precompute the number of bytes that can always be skipped (max 8+17+2). + var data [32]byte + pm_file[FD_STAT] = f + pm_fd[FD_STAT] = int(f.Fd()) + n, err := syscall.Pread(pm_fd[FD_STAT], + (*(*[unsafe.Sizeof(data) - 1]byte)(unsafe.Pointer(&data)))[:], 0) + if err != nil { + log.Printf("WARN: %s read error (%s).", path, err) + pm_fd[FD_STAT] = -1 + f.Close() + } else { + for i := 0; i < n; i++ { + // lookup the ') ' suffix for the 2nd field. If someone renames it + // to something stupid, it does not deserve getting stats ;-) + if data[i] == 0x29 && data[i+1] == 0x20 { + STAT_START = i + 2 + break + } + } + if STAT_START == 0 { + pm_fd[FD_STAT] = -1 // should never happen + f.Close() + } + } } - // Search for the end of command. - n := bytes.LastIndex(data, []byte(") ")) - if n < 0 { - log.Printf("ERROR: metrics: cannot find command in parentheses in %q read from %s", data, statFilepath) - return + if onTest { + path = testdata_dir + testfiles[FD_IO] + } else { + path = "/proc/self/io" + } + f, err = os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + log.Printf("WARN: Unable to open %s (%v).", path, err) + } else { + pm_file[FD_IO] = f + pm_fd[FD_IO] = int(f.Fd()) + } + + if onTest { + path = testdata_dir + testfiles[FD_MEM] + } else { + path = "/proc/self/status" + } + f, err = os.OpenFile(path, os.O_RDONLY, 0) + if err != nil { + log.Printf("WARN: Unable to open %s (%v).", path, err) + } else { + pm_file[FD_MEM] = f + pm_fd[FD_MEM] = int(f.Fd()) } - data = data[n+2:] +} +func init() { + init2() +} + +func writeProcessMetrics(w io.Writer) { + writeProcessMemMetrics(w) + writeIOMetrics(w) + var data [512]byte + if pm_fd[FD_STAT] < 0 { + return + } + n, err := syscall.Pread(pm_fd[FD_STAT], + (*(*[unsafe.Sizeof(data) - 1]byte)(unsafe.Pointer(&data)))[:], 0) + if err != nil { + log.Printf("WARN: %s read error (%s).", pm_file[FD_STAT].Name(), err) + return + } + data[n] = 0 var p procStat - bb := bytes.NewBuffer(data) - _, err = fmt.Fscanf(bb, "%c %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d", + _, err = fmt.Fscanf(bytes.NewReader(data[STAT_START:n]), + "%c %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d", &p.State, &p.Ppid, &p.Pgrp, &p.Session, &p.TtyNr, &p.Tpgid, &p.Flags, &p.Minflt, &p.Cminflt, &p.Majflt, &p.Cmajflt, &p.Utime, &p.Stime, &p.Cutime, &p.Cstime, &p.Priority, &p.Nice, &p.NumThreads, &p.ItrealValue, &p.Starttime, &p.Vsize, &p.Rss) if err != nil { - log.Printf("ERROR: metrics: cannot parse %q read from %s: %s", data, statFilepath, err) + log.Printf("WARN: %s parse error in '%q' (%s).", pm_file[FD_STAT].Name(), data, err) return } @@ -88,39 +223,35 @@ func writeProcessMetrics(w io.Writer) { WriteGaugeUint64(w, "process_resident_memory_bytes", uint64(p.Rss)*pageSizeBytes) WriteGaugeUint64(w, "process_start_time_seconds", uint64(startTimeSeconds)) WriteGaugeUint64(w, "process_virtual_memory_bytes", uint64(p.Vsize)) - writeProcessMemMetrics(w) - writeIOMetrics(w) } -var procSelfIOErrLogged uint32 - func writeIOMetrics(w io.Writer) { - ioFilepath := "/proc/self/io" - data, err := ioutil.ReadFile(ioFilepath) + var data [256]byte // 83 + 7*20 = 223 + if pm_fd[FD_IO] < 0 { + return + } + n, err := syscall.Pread(pm_fd[FD_IO], + (*(*[unsafe.Sizeof(data) - 1]byte)(unsafe.Pointer(&data)))[:], 0) if err != nil { - // Do not spam the logs with errors - this error cannot be fixed without process restart. - // See https://github.com/VictoriaMetrics/metrics/issues/42 - if atomic.CompareAndSwapUint32(&procSelfIOErrLogged, 0, 1) { - log.Printf("ERROR: metrics: cannot read process_io_* metrics from %q, so these metrics won't be updated until the error is fixed; "+ - "see https://github.com/VictoriaMetrics/metrics/issues/42 ; The error: %s", ioFilepath, err) - } + log.Printf("WARN: %s read error (%s)", pm_file[FD_IO].Name(), err) + return } - + data[n] = 0 getInt := func(s string) int64 { n := strings.IndexByte(s, ' ') if n < 0 { - log.Printf("ERROR: metrics: cannot find whitespace in %q at %q", s, ioFilepath) + log.Printf("WARN: %s no whitespace in '%q'.", pm_file[FD_IO].Name(), s) return 0 } v, err := strconv.ParseInt(s[n+1:], 10, 64) if err != nil { - log.Printf("ERROR: metrics: cannot parse %q at %q: %s", s, ioFilepath, err) + log.Printf("WARN: %s parse error in '%q' (%s)", pm_file[FD_IO].Name(), s, err) return 0 } return v } var rchar, wchar, syscr, syscw, readBytes, writeBytes int64 - lines := strings.Split(string(data), "\n") + lines := strings.Split(string(data[:n]), "\n") for _, s := range lines { s = strings.TrimSpace(s) switch { @@ -146,48 +277,53 @@ func writeIOMetrics(w io.Writer) { WriteGaugeUint64(w, "process_io_storage_written_bytes_total", uint64(writeBytes)) } +// In Linux the startime shown in /proc//stat field 22 is in ticks since +// boot and thus the exact starttime since epoch in seconds would be: +// +// Now() - $( 0 { + WriteGaugeUint64(w, "process_open_fds", uint64(totalOpenFDs)) } - WriteGaugeUint64(w, "process_max_fds", maxOpenFDs) - WriteGaugeUint64(w, "process_open_fds", totalOpenFDs) } -func getOpenFDsCount(path string) (uint64, error) { - f, err := os.Open(path) +/** return 0 on error, the number of open files otherwise */ +func getOpenFDsCount() int32 { + f, err := os.Open(fd_path) if err != nil { - return 0, err + return 0 } defer f.Close() - var totalOpenFDs uint64 + var totalOpenFDs = 0 for { names, err := f.Readdirnames(512) if err == io.EOF { break } if err != nil { - return 0, fmt.Errorf("unexpected error at Readdirnames: %s", err) + log.Printf("WARN: %s read error (%s)", fd_path, err) + } else { + totalOpenFDs += len(names) } - totalOpenFDs += uint64(len(names)) } - return totalOpenFDs, nil + return int32(totalOpenFDs) } -func getMaxFilesLimit(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) +/* returns 0 on error, -1 for unlimited, the limit otherwise */ +func getMaxFilesLimit() int32 { + data, err := os.ReadFile(limits_path) if err != nil { - return 0, err + return 0 } lines := strings.Split(string(data), "\n") const prefix = "Max open files" @@ -199,19 +335,22 @@ func getMaxFilesLimit(path string) (uint64, error) { // Extract soft limit. n := strings.IndexByte(text, ' ') if n < 0 { - return 0, fmt.Errorf("cannot extract soft limit from %q", s) + log.Printf("WARN: %s no soft limit found in '%q'", limits_path, s) + return 0 } text = text[:n] if text == "unlimited" { - return 1<<64 - 1, nil + return -1 } - limit, err := strconv.ParseUint(text, 10, 64) - if err != nil { - return 0, fmt.Errorf("cannot parse soft limit from %q: %s", s, err) + limit, err := strconv.ParseInt(text, 10, 64) + if err != nil || limit < 0 || limit > math.MaxInt32 { + log.Printf("WARN: %s no valid soft limit in '%q' (%s).", limits_path, s, err) + return 0 } - return limit, nil + return int32(limit) } - return 0, fmt.Errorf("cannot find max open files limit") + log.Printf("WARN: %s no max open files limit found", limits_path) + return 0 } // https://man7.org/linux/man-pages/man5/procfs.5.html @@ -224,9 +363,8 @@ type memStats struct { } func writeProcessMemMetrics(w io.Writer) { - ms, err := getMemStats("/proc/self/status") - if err != nil { - log.Printf("ERROR: metrics: cannot determine memory status: %s", err) + ms := getMemStats() + if ms == nil { return } WriteGaugeUint64(w, "process_virtual_memory_peak_bytes", ms.vmPeak) @@ -237,13 +375,20 @@ func writeProcessMemMetrics(w io.Writer) { } -func getMemStats(path string) (*memStats, error) { - data, err := ioutil.ReadFile(path) +func getMemStats() *memStats { + var data [2048]byte // 571 + 2*57 + 57*20 = 1825 so 2048 should be safe + if pm_fd[FD_MEM] < 0 { + return nil + } + n, err := syscall.Pread(pm_fd[FD_MEM], + (*(*[unsafe.Sizeof(data) - 1]byte)(unsafe.Pointer(&data)))[:], 0) if err != nil { - return nil, err + log.Printf("WARN: %s read error (%s).", pm_file[FD_MEM].Name(), err) + return nil } + data[n] = 0 var ms memStats - lines := strings.Split(string(data), "\n") + lines := strings.Split(string(data[:n]), "\n") for _, s := range lines { if !strings.HasPrefix(s, "Vm") && !strings.HasPrefix(s, "Rss") { continue @@ -251,18 +396,22 @@ func getMemStats(path string) (*memStats, error) { // Extract key value. line := strings.Fields(s) if len(line) != 3 { - return nil, fmt.Errorf("unexpected number of fields found in %q; got %d; want %d", s, len(line), 3) + log.Printf("WARN: %s unexpected number of fields in '%q' (%d != %d).", + pm_file[FD_MEM].Name(), s, len(line), 3) + return nil } memStatName := line[0] memStatValue := line[1] value, err := strconv.ParseUint(memStatValue, 10, 64) if err != nil { - return nil, fmt.Errorf("cannot parse number from %q: %w", s, err) + log.Printf("WARN: %s number parse error in '%q' (%s)", pm_file[FD_MEM].Name(), s, err) + return nil } if line[2] != "kB" { - return nil, fmt.Errorf("expecting kB value in %q; got %q", s, line[2]) + log.Printf("WARN: %s expecting kB value in '%q' (got '%q')", pm_file[FD_MEM].Name(), s, line[2]) + return nil } - value *= 1024 + value <<= 10 switch memStatName { case "VmPeak:": ms.vmPeak = value @@ -276,5 +425,5 @@ func getMemStats(path string) (*memStats, error) { ms.rssShmem = value } } - return &ms, nil + return &ms } diff --git a/process_metrics_linux_test.go b/process_metrics_linux_test.go index 88dac03..5c46a24 100644 --- a/process_metrics_linux_test.go +++ b/process_metrics_linux_test.go @@ -1,51 +1,108 @@ package metrics -import "testing" - -func TestGetMaxFilesLimit(t *testing.T) { - f := func(want uint64, path string, wantErr bool) { - t.Helper() - got, err := getMaxFilesLimit(path) - if err != nil && !wantErr { - t.Fatalf("unexpected error: %v", err) - } - if got != want { - t.Fatalf("unexpected result: %d, want: %d at getMaxFilesLimit", got, want) - } +import ( + "bytes" + "fmt" + "io" + "os" + "regexp" + "strings" + "testing" +) +var testdir string + +func init() { + testdir, _ = os.Getwd() + testdir += "/testdata/" +} + +func getTestData(filename string, t *testing.T) string { + data, err := os.ReadFile(testdir + filename) + if err != nil { + t.Fatalf("%v", err) + } + s := string(data) + if filename == "linux.proc_metrics.out" { + // since linux stat.starttime is relative to boot, we need to adjust + // the expected results regarding this. + m := regexp.MustCompile("process_start_time_seconds [0-9]+") + n := fmt.Sprintf("process_start_time_seconds %d", startTimeSeconds) + return m.ReplaceAllString(s, n) } - f(1024, "testdata/limits", false) - f(0, "testdata/bad_path", true) - f(0, "testdata/limits_bad", true) + return s } -func TestGetOpenFDsCount(t *testing.T) { - f := func(want uint64, path string, wantErr bool) { - t.Helper() - got, err := getOpenFDsCount(path) - if (err != nil && !wantErr) || (err == nil && wantErr) { - t.Fatalf("unexpected error: %v", err) - } - if got != want { - t.Fatalf("unexpected result: %d, want: %d at getOpenFDsCount", got, want) +func stripComments(input string) string { + var builder strings.Builder + lines := strings.Split(input, "\n") + for _, line := range lines { + s := strings.TrimSpace(line) + if strings.HasPrefix(s, "#") || s == "" { + continue } + builder.WriteString(line + "\n") } - f(5, "testdata/fd/", false) - f(0, "testdata/fd/0", true) - f(0, "testdata/limits", true) + return builder.String() } -func TestGetMemStats(t *testing.T) { - f := func(want memStats, path string, wantErr bool) { - t.Helper() - got, err := getMemStats(path) - if (err != nil && !wantErr) || (err == nil && wantErr) { - t.Fatalf("unexpected error: %v", err) - } - if got != nil && *got != want { - t.Fatalf("unexpected result: %d, want: %d at getMemStats", *got, want) +func Test_processMetrics(t *testing.T) { + diffFormat := "Test %s:\n\tgot:\n'%v'\n\twant:\n'%v'" + tests := []struct { + name string + wantW string + fn func(w io.Writer) + }{ + {"pm", getTestData("linux.proc_metrics.out", t), writeProcessMetrics}, + {"fdm", getTestData("linux.fd_metrics.out", t), writeFDMetrics}, + } + for _, compact := range []bool{true, false} { + ExposeMetadata(!compact) + for _, tt := range tests { + want := tt.wantW + if compact { + want = stripComments(want) + } + t.Run(tt.name, func(t *testing.T) { + w := &bytes.Buffer{} + tt.fn(w) + if gotW := w.String(); gotW != want { + t.Errorf(diffFormat, tt.name, gotW, want) + } + }) } } - f(memStats{vmPeak: 2130489344, rssPeak: 200679424, rssAnon: 121602048, rssFile: 11362304}, "testdata/status", false) - f(memStats{}, "testdata/status_bad", true) + + // missing /proc//io file - just omit the process_io_* metric entries + // see https://github.com/VictoriaMetrics/metrics/issues/42 + tt := tests[0] + want := stripComments(tt.wantW) + m := regexp.MustCompile("process_io_[_a-z]+ [0-9]+\n") + wantW := m.ReplaceAllString(want, "") + testfiles[FD_IO] = "/doesNotExist" + ExposeMetadata(false) // no need to check comments again + init2() + t.Run(tt.name, func(t *testing.T) { + w := &bytes.Buffer{} + tt.fn(w) + if gotW := w.String(); gotW != wantW { + t.Errorf(diffFormat, tt.name, gotW, wantW) + } + }) + + // bad limits: just omit the process_max_fds metric entry + tt = tests[1] + want = stripComments(tt.wantW) + m = regexp.MustCompile("process_max_fds [0-9]+\n") + wantW = m.ReplaceAllString(want, "") + testfiles[FD_LIMITS] = "/limits_bad" + init2() + t.Run(tt.name, func(t *testing.T) { + w := &bytes.Buffer{} + tt.fn(w) + if gotW := w.String(); gotW != wantW { + t.Errorf(diffFormat, tt.name, gotW, wantW) + } + }) + } diff --git a/testdata/limits b/testdata/limits deleted file mode 100644 index fb520d3..0000000 --- a/testdata/limits +++ /dev/null @@ -1,17 +0,0 @@ -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 127458 127458 processes -Max open files 1024 1048576 files -Max locked memory 67108864 67108864 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 127458 127458 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us \ No newline at end of file diff --git a/testdata/linux.fd_metrics.out b/testdata/linux.fd_metrics.out new file mode 100644 index 0000000..54905a2 --- /dev/null +++ b/testdata/linux.fd_metrics.out @@ -0,0 +1,6 @@ +# HELP process_max_fds +# TYPE process_max_fds gauge +process_max_fds 2048 +# HELP process_open_fds +# TYPE process_open_fds gauge +process_open_fds 5 diff --git a/testdata/linux.proc_metrics.out b/testdata/linux.proc_metrics.out new file mode 100644 index 0000000..34655be --- /dev/null +++ b/testdata/linux.proc_metrics.out @@ -0,0 +1,60 @@ +# HELP process_virtual_memory_peak_bytes +# TYPE process_virtual_memory_peak_bytes gauge +process_virtual_memory_peak_bytes 25751552 +# HELP process_resident_memory_peak_bytes +# TYPE process_resident_memory_peak_bytes gauge +process_resident_memory_peak_bytes 4231168 +# HELP process_resident_memory_anon_bytes +# TYPE process_resident_memory_anon_bytes gauge +process_resident_memory_anon_bytes 1626112 +# HELP process_resident_memory_file_bytes +# TYPE process_resident_memory_file_bytes gauge +process_resident_memory_file_bytes 2605056 +# HELP process_resident_memory_shared_bytes +# TYPE process_resident_memory_shared_bytes gauge +process_resident_memory_shared_bytes 0 +# HELP process_io_read_bytes_total +# TYPE process_io_read_bytes_total gauge +process_io_read_bytes_total 0 +# HELP process_io_written_bytes_total +# TYPE process_io_written_bytes_total gauge +process_io_written_bytes_total 0 +# HELP process_io_read_syscalls_total +# TYPE process_io_read_syscalls_total gauge +process_io_read_syscalls_total 0 +# HELP process_io_write_syscalls_total +# TYPE process_io_write_syscalls_total gauge +process_io_write_syscalls_total 0 +# HELP process_io_storage_read_bytes_total +# TYPE process_io_storage_read_bytes_total gauge +process_io_storage_read_bytes_total 0 +# HELP process_io_storage_written_bytes_total +# TYPE process_io_storage_written_bytes_total gauge +process_io_storage_written_bytes_total 0 +# HELP process_cpu_seconds_system_total +# TYPE process_cpu_seconds_system_total counter +process_cpu_seconds_system_total 0.08 +# HELP process_cpu_seconds_total +# TYPE process_cpu_seconds_total counter +process_cpu_seconds_total 0.18 +# HELP process_cpu_seconds_user_total +# TYPE process_cpu_seconds_user_total counter +process_cpu_seconds_user_total 0.1 +# HELP process_major_pagefaults_total +# TYPE process_major_pagefaults_total counter +process_major_pagefaults_total 0 +# HELP process_minor_pagefaults_total +# TYPE process_minor_pagefaults_total counter +process_minor_pagefaults_total 3111 +# HELP process_num_threads +# TYPE process_num_threads gauge +process_num_threads 1 +# HELP process_resident_memory_bytes +# TYPE process_resident_memory_bytes gauge +process_resident_memory_bytes 4231168 +# HELP process_start_time_seconds +# TYPE process_start_time_seconds gauge +process_start_time_seconds 1742515804 +# HELP process_virtual_memory_bytes +# TYPE process_virtual_memory_bytes gauge +process_virtual_memory_bytes 25751552 diff --git a/testdata/linux.ps_io b/testdata/linux.ps_io new file mode 100644 index 0000000..ef90e6f --- /dev/null +++ b/testdata/linux.ps_io @@ -0,0 +1,57 @@ +Name: tcsh +Umask: 0022 +State: S (sleeping) +Tgid: 847024 +Ngid: 0 +Pid: 847024 +PPid: 847023 +TracerPid: 0 +Uid: 6018 6018 6018 6018 +Gid: 1502 1502 1502 1502 +FDSize: 64 +Groups: 14 1501 1502 1504 1510 1520 1530 +NStgid: 847024 +NSpid: 847024 +NSpgid: 847024 +NSsid: 847024 +VmPeak: 25148 kB +VmSize: 25148 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 4132 kB +VmRSS: 4132 kB +RssAnon: 1588 kB +RssFile: 2544 kB +RssShmem: 0 kB +VmData: 2584 kB +VmStk: 132 kB +VmExe: 304 kB +VmLib: 1972 kB +VmPTE: 68 kB +VmSwap: 0 kB +HugetlbPages: 0 kB +CoreDumping: 0 +THP_enabled: 1 +Threads: 1 +SigQ: 0/254739 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 0000000000000002 +SigIgn: 0000000000384004 +SigCgt: 0000000009812003 +CapInh: 0000000000000000 +CapPrm: 0000000000000000 +CapEff: 0000000000000000 +CapBnd: 000001ffffffffff +CapAmb: 0000000000000000 +NoNewPrivs: 0 +Seccomp: 0 +Seccomp_filters: 0 +Speculation_Store_Bypass: thread vulnerable +SpeculationIndirectBranch: conditional enabled +Cpus_allowed: ffffff +Cpus_allowed_list: 0-23 +Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 821 +nonvoluntary_ctxt_switches: 6 diff --git a/testdata/linux.ps_limits b/testdata/linux.ps_limits new file mode 100644 index 0000000..6d72e94 --- /dev/null +++ b/testdata/linux.ps_limits @@ -0,0 +1,17 @@ +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size unlimited unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 254739 254739 processes +Max open files 2048 1048576 files +Max locked memory 8351289344 8351289344 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 254739 254739 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us diff --git a/testdata/linux.ps_stat b/testdata/linux.ps_stat new file mode 100644 index 0000000..a937f6c --- /dev/null +++ b/testdata/linux.ps_stat @@ -0,0 +1 @@ +847024 (tcsh) S 847023 847024 847024 34820 946306 4194304 3111 8084 0 5 10 8 13 10 20 0 1 0 70598402 25751552 1033 18446744073709551615 94248001904640 94248002214501 140727550087072 0 0 0 2 3686404 159457283 1 0 0 17 14 0 0 0 0 0 94248002292016 94248002313406 94249019904000 140727550090699 140727550090705 140727550090705 140727550091246 0 diff --git a/testdata/linux.ps_status b/testdata/linux.ps_status new file mode 100644 index 0000000..27c83cd --- /dev/null +++ b/testdata/linux.ps_status @@ -0,0 +1,57 @@ +Name: tcsh +Umask: 0022 +State: S (sleeping) +Tgid: 847024 +Ngid: 0 +Pid: 847024 +PPid: 847023 +TracerPid: 0 +Uid: 6018 6018 6018 6018 +Gid: 1502 1502 1502 1502 +FDSize: 64 +Groups: 14 1501 1502 1504 1510 1520 1530 +NStgid: 847024 +NSpid: 847024 +NSpgid: 847024 +NSsid: 847024 +VmPeak: 25148 kB +VmSize: 25148 kB +VmLck: 0 kB +VmPin: 0 kB +VmHWM: 4132 kB +VmRSS: 4132 kB +RssAnon: 1588 kB +RssFile: 2544 kB +RssShmem: 0 kB +VmData: 2584 kB +VmStk: 132 kB +VmExe: 304 kB +VmLib: 1972 kB +VmPTE: 68 kB +VmSwap: 0 kB +HugetlbPages: 0 kB +CoreDumping: 0 +THP_enabled: 1 +Threads: 1 +SigQ: 0/254739 +SigPnd: 0000000000000000 +ShdPnd: 0000000000000000 +SigBlk: 0000000000000002 +SigIgn: 0000000000384004 +SigCgt: 0000000009812003 +CapInh: 0000000000000000 +CapPrm: 0000000000000000 +CapEff: 0000000000000000 +CapBnd: 000001ffffffffff +CapAmb: 0000000000000000 +NoNewPrivs: 0 +Seccomp: 0 +Seccomp_filters: 0 +Speculation_Store_Bypass: thread vulnerable +SpeculationIndirectBranch: conditional enabled +Cpus_allowed: ffffff +Cpus_allowed_list: 0-23 +Mems_allowed: 00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000000,00000001 +Mems_allowed_list: 0 +voluntary_ctxt_switches: 810 +nonvoluntary_ctxt_switches: 6 From 5f3c208bfa603c91fa58eadfadc406cb27e2fd56 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 3 Aug 2025 20:51:33 +0200 Subject: [PATCH 2/2] Apply suggestions from code review Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- process_metrics_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/process_metrics_linux.go b/process_metrics_linux.go index ede0fe6..125a2f1 100644 --- a/process_metrics_linux.go +++ b/process_metrics_linux.go @@ -74,7 +74,7 @@ process metrics related file descriptors for files we always need, and var pm_fd [FD_COUNT]int /* -to avaid, that go closes the files in the background, which makes the FDs +to avoid, that go closes the files in the background, which makes the FDs above useless, we need to keep the reference to them as well */