forgejo-runner-optimiser/internal/summary/accumulator.go
Waldemar Kindler 7201a527d8
All checks were successful
ci / build (push) Successful in 1m39s
feat(collector): Summaries metrics at the end of the process
2026-02-04 16:21:17 +01:00

138 lines
3.5 KiB
Go

// ABOUTME: Accumulates system metrics samples across a collection run.
// ABOUTME: Computes peak, average, and P95 statistics for CPU and memory on demand.
package summary
import (
"fmt"
"sort"
"time"
"edp.buildth.ing/DevFW-CICD/forgejo-runner-resource-collector/internal/metrics"
)
// Accumulator collects metric samples and computes run-level statistics
type Accumulator struct {
topN int
cpuValues []float64
memBytesValues []float64
memPctValues []float64
processPeaks map[string]*ProcessPeak
startTime time.Time
endTime time.Time
sampleCount int
}
// NewAccumulator creates an accumulator that tracks the top N processes
func NewAccumulator(topN int) *Accumulator {
return &Accumulator{
topN: topN,
processPeaks: make(map[string]*ProcessPeak),
}
}
// Add records a single metrics sample
func (a *Accumulator) Add(m *metrics.SystemMetrics) {
a.sampleCount++
if a.sampleCount == 1 {
a.startTime = m.Timestamp
}
a.endTime = m.Timestamp
a.cpuValues = append(a.cpuValues, m.CPU.TotalPercent)
a.memBytesValues = append(a.memBytesValues, float64(m.Memory.UsedBytes))
a.memPctValues = append(a.memPctValues, m.Memory.UsedPercent)
for _, p := range m.TopCPU {
a.updateProcessPeak(p)
}
for _, p := range m.TopMemory {
a.updateProcessPeak(p)
}
}
// Summarize computes and returns the run summary, or nil if no samples were added
func (a *Accumulator) Summarize() *RunSummary {
if a.sampleCount == 0 {
return nil
}
return &RunSummary{
StartTime: a.startTime,
EndTime: a.endTime,
DurationSeconds: a.endTime.Sub(a.startTime).Seconds(),
SampleCount: a.sampleCount,
CPUTotal: computeStats(a.cpuValues),
MemUsedBytes: computeStats(a.memBytesValues),
MemUsedPercent: computeStats(a.memPctValues),
TopCPUProcesses: a.topProcesses(func(p *ProcessPeak) float64 { return p.PeakCPU }),
TopMemProcesses: a.topProcesses(func(p *ProcessPeak) float64 { return float64(p.PeakMem) }),
}
}
// SampleCount returns the number of samples added
func (a *Accumulator) SampleCount() int {
return a.sampleCount
}
// computeStats calculates peak, average, and P95 from a sorted copy of the values
func computeStats(values []float64) StatSummary {
n := len(values)
if n == 0 {
return StatSummary{}
}
sorted := make([]float64, n)
copy(sorted, values)
sort.Float64s(sorted)
var sum float64
for _, v := range sorted {
sum += v
}
p95Index := int(float64(n-1) * 0.95)
return StatSummary{
Peak: sorted[n-1],
Avg: sum / float64(n),
P95: sorted[p95Index],
}
}
// updateProcessPeak merges a process observation into the peak tracking map
func (a *Accumulator) updateProcessPeak(p metrics.ProcessMetrics) {
key := fmt.Sprintf("%d:%s", p.PID, p.Name)
existing, ok := a.processPeaks[key]
if !ok {
a.processPeaks[key] = &ProcessPeak{
PID: p.PID,
Name: p.Name,
PeakCPU: p.CPUPercent,
PeakMem: p.MemRSS,
}
return
}
if p.CPUPercent > existing.PeakCPU {
existing.PeakCPU = p.CPUPercent
}
if p.MemRSS > existing.PeakMem {
existing.PeakMem = p.MemRSS
}
}
// topProcesses returns the top N processes sorted by the given key function (descending)
func (a *Accumulator) topProcesses(keyFn func(*ProcessPeak) float64) []ProcessPeak {
all := make([]ProcessPeak, 0, len(a.processPeaks))
for _, p := range a.processPeaks {
all = append(all, *p)
}
sort.Slice(all, func(i, j int) bool {
return keyFn(&all[i]) > keyFn(&all[j])
})
if len(all) > a.topN {
all = all[:a.topN]
}
return all
}