forgejo-runner-optimiser/internal/receiver/sizing.go
Martin McCaffery d0aea88a5b
Some checks failed
ci / build (push) Failing after 58s
refactor: Rename recommender to sizer
2026-02-13 16:42:37 +01:00

221 lines
5.6 KiB
Go

// ABOUTME: Computes ideal container sizes from historical run data.
// ABOUTME: Provides Kubernetes-style resource sizes.
package receiver
import (
"encoding/json"
"fmt"
"math"
"sort"
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
)
// ResourceSize holds Kubernetes-formatted resource values
type ResourceSize struct {
Request string `json:"request"`
Limit string `json:"limit"`
}
// ContainerSizing holds computed sizing for a single container
type ContainerSizing struct {
Name string `json:"name"`
CPU ResourceSize `json:"cpu"`
Memory ResourceSize `json:"memory"`
}
// SizingMeta provides context about the sizing calculation
type SizingMeta struct {
RunsAnalyzed int `json:"runs_analyzed"`
BufferPercent int `json:"buffer_percent"`
CPUPercentile string `json:"cpu_percentile"`
}
// SizingResponse is the API response for the sizing endpoint
type SizingResponse struct {
Containers []ContainerSizing `json:"containers"`
Total struct {
CPU ResourceSize `json:"cpu"`
Memory ResourceSize `json:"memory"`
} `json:"total"`
Meta SizingMeta `json:"meta"`
}
// validPercentiles lists the allowed percentile values
var validPercentiles = map[string]bool{
"peak": true,
"p99": true,
"p95": true,
"p75": true,
"p50": true,
"avg": true,
}
// IsValidPercentile checks if the given percentile string is valid
func IsValidPercentile(p string) bool {
return validPercentiles[p]
}
// selectCPUValue extracts the appropriate value from StatSummary based on percentile
func selectCPUValue(stats summary.StatSummary, percentile string) float64 {
switch percentile {
case "peak":
return stats.Peak
case "p99":
return stats.P99
case "p95":
return stats.P95
case "p75":
return stats.P75
case "p50":
return stats.P50
case "avg":
return stats.Avg
default:
return stats.P95 // default to p95
}
}
// formatMemoryK8s converts bytes to Kubernetes memory format (Mi)
func formatMemoryK8s(bytes float64) string {
const Mi = 1024 * 1024
return fmt.Sprintf("%.0fMi", math.Ceil(bytes/Mi))
}
// formatCPUK8s converts cores to Kubernetes CPU format (millicores or whole cores)
func formatCPUK8s(cores float64) string {
millicores := cores * 1000
if millicores >= 1000 && math.Mod(millicores, 1000) == 0 {
return fmt.Sprintf("%.0f", cores)
}
return fmt.Sprintf("%.0fm", math.Ceil(millicores))
}
// roundUpMemoryLimit rounds bytes up to the next power of 2 in Mi
func roundUpMemoryLimit(bytes float64) float64 {
const Mi = 1024 * 1024
if bytes <= 0 {
return Mi // minimum 1Mi
}
miValue := bytes / Mi
if miValue <= 1 {
return Mi // minimum 1Mi
}
// Find next power of 2
power := math.Ceil(math.Log2(miValue))
return math.Pow(2, power) * Mi
}
// roundUpCPULimit rounds cores up to the next 0.5 increment
func roundUpCPULimit(cores float64) float64 {
if cores <= 0 {
return 0.5 // minimum 0.5 cores
}
return math.Ceil(cores*2) / 2
}
// containerAggregation holds accumulated stats for a single container across runs
type containerAggregation struct {
cpuValues []float64
memoryPeaks []float64
}
// computeSizing calculates ideal container sizes from metrics
func computeSizing(metrics []Metric, bufferPercent int, cpuPercentile string) (*SizingResponse, error) {
if len(metrics) == 0 {
return nil, fmt.Errorf("no metrics provided")
}
// Aggregate container stats across all runs
containerStats := make(map[string]*containerAggregation)
for _, m := range metrics {
var runSummary summary.RunSummary
if err := json.Unmarshal([]byte(m.Payload), &runSummary); err != nil {
continue // skip invalid payloads
}
for _, c := range runSummary.Containers {
if _, exists := containerStats[c.Name]; !exists {
containerStats[c.Name] = &containerAggregation{
cpuValues: make([]float64, 0),
memoryPeaks: make([]float64, 0),
}
}
agg := containerStats[c.Name]
agg.cpuValues = append(agg.cpuValues, selectCPUValue(c.CPUCores, cpuPercentile))
agg.memoryPeaks = append(agg.memoryPeaks, c.MemoryBytes.Peak)
}
}
// Calculate sizing for each container
bufferMultiplier := 1.0 + float64(bufferPercent)/100.0
var containers []ContainerSizing
var totalCPU, totalMemory float64
// Sort container names for consistent output
names := make([]string, 0, len(containerStats))
for name := range containerStats {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
agg := containerStats[name]
// CPU: max of selected percentile values across runs
maxCPU := 0.0
for _, v := range agg.cpuValues {
if v > maxCPU {
maxCPU = v
}
}
// Memory: peak of peaks
maxMemory := 0.0
for _, v := range agg.memoryPeaks {
if v > maxMemory {
maxMemory = v
}
}
// Apply buffer
cpuWithBuffer := maxCPU * bufferMultiplier
memoryWithBuffer := maxMemory * bufferMultiplier
containers = append(containers, ContainerSizing{
Name: name,
CPU: ResourceSize{
Request: formatCPUK8s(cpuWithBuffer),
Limit: formatCPUK8s(roundUpCPULimit(cpuWithBuffer)),
},
Memory: ResourceSize{
Request: formatMemoryK8s(memoryWithBuffer),
Limit: formatMemoryK8s(roundUpMemoryLimit(memoryWithBuffer)),
},
})
totalCPU += cpuWithBuffer
totalMemory += memoryWithBuffer
}
response := &SizingResponse{
Containers: containers,
Meta: SizingMeta{
RunsAnalyzed: len(metrics),
BufferPercent: bufferPercent,
CPUPercentile: cpuPercentile,
},
}
response.Total.CPU = ResourceSize{
Request: formatCPUK8s(totalCPU),
Limit: formatCPUK8s(roundUpCPULimit(totalCPU)),
}
response.Total.Memory = ResourceSize{
Request: formatMemoryK8s(totalMemory),
Limit: formatMemoryK8s(roundUpMemoryLimit(totalMemory)),
}
return response, nil
}