diff --git a/pkg/collector/cgroup.go b/pkg/collector/cgroup.go index 42a11e0e..be28ad00 100644 --- a/pkg/collector/cgroup.go +++ b/pkg/collector/cgroup.go @@ -98,9 +98,11 @@ var ( For v2 possibilities are /machine.slice/machine-qemu\x2d2\x2dinstance\x2d00000001.scope /machine.slice/machine-qemu\x2d2\x2dinstance\x2d00000001.scope/libvirt + + For non-systemd layouts: machine/qemu-1-instance1.libvirt-qemu */ var ( - libvirtCgroupPathRegex = regexp.MustCompile("^.*/(?:.+?)-qemu-(?:[0-9]+)-(?Pinstance-[0-9a-f]+)(?:.*$)") + libvirtCgroupPathRegex = regexp.MustCompile("^.*/(?:.+?)qemu-(?:[0-9]+)-(?Pinstance-[0-9a-f]+)(?:.*$)") ) // Ref: https://linuxera.org/cpu-memory-management-kubernetes-cgroupsv2/ @@ -199,6 +201,7 @@ type cgroupManager struct { idRegex *regexp.Regexp // Regular expression to capture cgroup ID set by resource manager isChild func(string) bool // Function to identify child cgroup paths. Function must return true if cgroup is a child to root cgroup ignoreProc func(string) bool // Function to filter processes in cgroup based on cmdline. Function must return true if process must be ignored + nonSystemdLayout bool // Libvirt collector only. Whether Libvirt is using a non-systemd cgroup layout } // NewCgroupManager returns an instance of cgroupManager based on resource manager. @@ -311,7 +314,7 @@ func NewCgroupManager(name manager, logger *slog.Logger) (*cgroupManager, error) fs: fs, mode: cgroups.Unified, root: *cgroupfsPath, - slices: []string{"machine.slice"}, + slices: []string{}, } } else { var mode cgroups.CGMode @@ -330,7 +333,28 @@ func NewCgroupManager(name manager, logger *slog.Logger) (*cgroupManager, error) mode: mode, root: *cgroupfsPath, activeController: activeSubsystem, - slices: []string{"machine.slice"}, + slices: []string{}, + } + } + + // Discover cgroup layout depending on if nova-libvirt uses systemd + var slicesPrefix string + + switch manager.mode { //nolint:exhaustive + case cgroups.Unified: + slicesPrefix = *cgroupfsPath + default: + slicesPrefix = filepath.Join(*cgroupfsPath, manager.activeController) + } + + for _, slice := range []string{"machine", "machine.slice"} { + if _, err := os.Stat(filepath.Join(slicesPrefix, slice)); err == nil { + manager.slices = append(manager.slices, slice) + if slice == "machine" { + manager.nonSystemdLayout = true + } + + break // This should be fine as there will atmost one of machine or machine.slice exist at any given time } } @@ -342,7 +366,7 @@ func NewCgroupManager(name manager, logger *slog.Logger) (*cgroupManager, error) manager.idRegex = libvirtCgroupPathRegex // Identify child cgroup - // In cgroups v1, all the child cgroups like emulator, vcpu* are flat whereas + // In cgroups v1 or on a non-systemd host, all the child cgroups like emulator, vcpu* are flat whereas // in v2 they are all inside libvirt child manager.isChild = func(p string) bool { return strings.Contains(p, "/libvirt") || strings.Contains(p, "/emulator") || strings.Contains(p, "/vcpu") @@ -1078,10 +1102,9 @@ func (c *cgroupCollector) cpusFromChildren(path string) (int, error) { // In cgroup v1, they are flat whereas in cgroup v2 they are inside libvirt folder var vcpuPath string - switch c.cgroupManager.mode { //nolint:exhaustive - case cgroups.Unified: + if c.cgroupManager.mode == cgroups.Unified && !(c.cgroupManager.nonSystemdLayout) { vcpuPath = fmt.Sprintf("%s%s/libvirt/vcpu*", c.cgroupManager.root, path) - default: + } else { vcpuPath = fmt.Sprintf("%s%s/vcpu*", c.cgroupManager.root, path) }