feat: added first iteration sizes not recomender
This commit is contained in:
parent
e1a4e9c579
commit
8101e9b20e
4 changed files with 802 additions and 0 deletions
|
|
@ -5,6 +5,7 @@ package receiver
|
|||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
|
@ -30,6 +31,7 @@ func (h *Handler) RegisterRoutes(mux *http.ServeMux) {
|
|||
mux.HandleFunc("POST /api/v1/metrics", h.handleReceiveMetrics)
|
||||
mux.HandleFunc("POST /api/v1/token", h.handleGenerateToken)
|
||||
mux.HandleFunc("GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}", h.handleGetByWorkflowJob)
|
||||
mux.HandleFunc("GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}", h.handleGetSizing)
|
||||
mux.HandleFunc("GET /health", h.handleHealth)
|
||||
}
|
||||
|
||||
|
|
@ -194,3 +196,71 @@ func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
|
||||
}
|
||||
|
||||
func (h *Handler) handleGetSizing(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.validateReadToken(w, r) {
|
||||
return
|
||||
}
|
||||
|
||||
org := r.PathValue("org")
|
||||
repo := r.PathValue("repo")
|
||||
workflow := r.PathValue("workflow")
|
||||
job := r.PathValue("job")
|
||||
if org == "" || repo == "" || workflow == "" || job == "" {
|
||||
http.Error(w, "org, repo, workflow and job are required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse query parameters with defaults
|
||||
runs := parseIntQueryParam(r, "runs", 5, 1, 100)
|
||||
buffer := parseIntQueryParam(r, "buffer", 20, 0, 100)
|
||||
cpuPercentile := r.URL.Query().Get("cpu_percentile")
|
||||
if cpuPercentile == "" {
|
||||
cpuPercentile = "p95"
|
||||
}
|
||||
if !IsValidPercentile(cpuPercentile) {
|
||||
http.Error(w, "invalid cpu_percentile: must be one of peak, p99, p95, p75, p50, avg", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
metrics, err := h.store.GetRecentMetricsByWorkflowJob(org, repo, workflow, job, runs)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to get metrics", slog.String("error", err.Error()))
|
||||
http.Error(w, "failed to get metrics", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
if len(metrics) == 0 {
|
||||
http.Error(w, "no metrics found for this workflow/job", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
response, err := computeSizing(metrics, buffer, cpuPercentile)
|
||||
if err != nil {
|
||||
h.logger.Error("failed to compute sizing", slog.String("error", err.Error()))
|
||||
http.Error(w, "failed to compute sizing", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(response)
|
||||
}
|
||||
|
||||
// parseIntQueryParam parses an integer query parameter with default, min, and max values
|
||||
func parseIntQueryParam(r *http.Request, name string, defaultVal, minVal, maxVal int) int {
|
||||
strVal := r.URL.Query().Get(name)
|
||||
if strVal == "" {
|
||||
return defaultVal
|
||||
}
|
||||
var val int
|
||||
if _, err := fmt.Sscanf(strVal, "%d", &val); err != nil {
|
||||
return defaultVal
|
||||
}
|
||||
if val < minVal {
|
||||
return minVal
|
||||
}
|
||||
if val > maxVal {
|
||||
return maxVal
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
|
|
|||
228
internal/receiver/sizing.go
Normal file
228
internal/receiver/sizing.go
Normal file
|
|
@ -0,0 +1,228 @@
|
|||
// ABOUTME: Computes ideal container sizes from historical run data.
|
||||
// ABOUTME: Provides Kubernetes-style resource recommendations.
|
||||
package receiver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
|
||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
||||
)
|
||||
|
||||
// ResourceSize holds Kubernetes-formatted resource values
|
||||
type ResourceSize struct {
|
||||
Request string `json:"request"`
|
||||
Limit string `json:"limit"`
|
||||
}
|
||||
|
||||
// ContainerSizing holds computed sizing for a single container
|
||||
type ContainerSizing struct {
|
||||
Name string `json:"name"`
|
||||
CPU ResourceSize `json:"cpu"`
|
||||
Memory ResourceSize `json:"memory"`
|
||||
}
|
||||
|
||||
// SizingMeta provides context about the sizing calculation
|
||||
type SizingMeta struct {
|
||||
RunsAnalyzed int `json:"runs_analyzed"`
|
||||
BufferPercent int `json:"buffer_percent"`
|
||||
CPUPercentile string `json:"cpu_percentile"`
|
||||
}
|
||||
|
||||
// SizingResponse is the API response for the sizing endpoint
|
||||
type SizingResponse struct {
|
||||
Containers []ContainerSizing `json:"containers"`
|
||||
Total struct {
|
||||
CPU ResourceSize `json:"cpu"`
|
||||
Memory ResourceSize `json:"memory"`
|
||||
} `json:"total"`
|
||||
Meta SizingMeta `json:"meta"`
|
||||
}
|
||||
|
||||
// validPercentiles lists the allowed percentile values
|
||||
var validPercentiles = map[string]bool{
|
||||
"peak": true,
|
||||
"p99": true,
|
||||
"p95": true,
|
||||
"p75": true,
|
||||
"p50": true,
|
||||
"avg": true,
|
||||
}
|
||||
|
||||
// IsValidPercentile checks if the given percentile string is valid
|
||||
func IsValidPercentile(p string) bool {
|
||||
return validPercentiles[p]
|
||||
}
|
||||
|
||||
// selectCPUValue extracts the appropriate value from StatSummary based on percentile
|
||||
func selectCPUValue(stats summary.StatSummary, percentile string) float64 {
|
||||
switch percentile {
|
||||
case "peak":
|
||||
return stats.Peak
|
||||
case "p99":
|
||||
return stats.P99
|
||||
case "p95":
|
||||
return stats.P95
|
||||
case "p75":
|
||||
return stats.P75
|
||||
case "p50":
|
||||
return stats.P50
|
||||
case "avg":
|
||||
return stats.Avg
|
||||
default:
|
||||
return stats.P95 // default to p95
|
||||
}
|
||||
}
|
||||
|
||||
// formatMemoryK8s converts bytes to Kubernetes memory format (Mi or Gi)
|
||||
func formatMemoryK8s(bytes float64) string {
|
||||
const (
|
||||
Mi = 1024 * 1024
|
||||
Gi = 1024 * 1024 * 1024
|
||||
)
|
||||
|
||||
if bytes >= Gi {
|
||||
return fmt.Sprintf("%.0fGi", math.Ceil(bytes/Gi))
|
||||
}
|
||||
return fmt.Sprintf("%.0fMi", math.Ceil(bytes/Mi))
|
||||
}
|
||||
|
||||
// formatCPUK8s converts cores to Kubernetes CPU format (millicores or whole cores)
|
||||
func formatCPUK8s(cores float64) string {
|
||||
millicores := cores * 1000
|
||||
if millicores >= 1000 && math.Mod(millicores, 1000) == 0 {
|
||||
return fmt.Sprintf("%.0f", cores)
|
||||
}
|
||||
return fmt.Sprintf("%.0fm", math.Ceil(millicores))
|
||||
}
|
||||
|
||||
// roundUpMemoryLimit rounds bytes up to the next power of 2 in Mi
|
||||
func roundUpMemoryLimit(bytes float64) float64 {
|
||||
const Mi = 1024 * 1024
|
||||
if bytes <= 0 {
|
||||
return Mi // minimum 1Mi
|
||||
}
|
||||
miValue := bytes / Mi
|
||||
if miValue <= 1 {
|
||||
return Mi // minimum 1Mi
|
||||
}
|
||||
// Find next power of 2
|
||||
power := math.Ceil(math.Log2(miValue))
|
||||
return math.Pow(2, power) * Mi
|
||||
}
|
||||
|
||||
// roundUpCPULimit rounds cores up to the next 0.5 increment
|
||||
func roundUpCPULimit(cores float64) float64 {
|
||||
if cores <= 0 {
|
||||
return 0.5 // minimum 0.5 cores
|
||||
}
|
||||
return math.Ceil(cores*2) / 2
|
||||
}
|
||||
|
||||
// containerAggregation holds accumulated stats for a single container across runs
|
||||
type containerAggregation struct {
|
||||
cpuValues []float64
|
||||
memoryPeaks []float64
|
||||
}
|
||||
|
||||
// computeSizing calculates ideal container sizes from metrics
|
||||
func computeSizing(metrics []Metric, bufferPercent int, cpuPercentile string) (*SizingResponse, error) {
|
||||
if len(metrics) == 0 {
|
||||
return nil, fmt.Errorf("no metrics provided")
|
||||
}
|
||||
|
||||
// Aggregate container stats across all runs
|
||||
containerStats := make(map[string]*containerAggregation)
|
||||
|
||||
for _, m := range metrics {
|
||||
var runSummary summary.RunSummary
|
||||
if err := json.Unmarshal([]byte(m.Payload), &runSummary); err != nil {
|
||||
continue // skip invalid payloads
|
||||
}
|
||||
|
||||
for _, c := range runSummary.Containers {
|
||||
if _, exists := containerStats[c.Name]; !exists {
|
||||
containerStats[c.Name] = &containerAggregation{
|
||||
cpuValues: make([]float64, 0),
|
||||
memoryPeaks: make([]float64, 0),
|
||||
}
|
||||
}
|
||||
agg := containerStats[c.Name]
|
||||
agg.cpuValues = append(agg.cpuValues, selectCPUValue(c.CPUCores, cpuPercentile))
|
||||
agg.memoryPeaks = append(agg.memoryPeaks, c.MemoryBytes.Peak)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate sizing for each container
|
||||
bufferMultiplier := 1.0 + float64(bufferPercent)/100.0
|
||||
var containers []ContainerSizing
|
||||
var totalCPU, totalMemory float64
|
||||
|
||||
// Sort container names for consistent output
|
||||
names := make([]string, 0, len(containerStats))
|
||||
for name := range containerStats {
|
||||
names = append(names, name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
|
||||
for _, name := range names {
|
||||
agg := containerStats[name]
|
||||
|
||||
// CPU: max of selected percentile values across runs
|
||||
maxCPU := 0.0
|
||||
for _, v := range agg.cpuValues {
|
||||
if v > maxCPU {
|
||||
maxCPU = v
|
||||
}
|
||||
}
|
||||
|
||||
// Memory: peak of peaks
|
||||
maxMemory := 0.0
|
||||
for _, v := range agg.memoryPeaks {
|
||||
if v > maxMemory {
|
||||
maxMemory = v
|
||||
}
|
||||
}
|
||||
|
||||
// Apply buffer
|
||||
cpuWithBuffer := maxCPU * bufferMultiplier
|
||||
memoryWithBuffer := maxMemory * bufferMultiplier
|
||||
|
||||
containers = append(containers, ContainerSizing{
|
||||
Name: name,
|
||||
CPU: ResourceSize{
|
||||
Request: formatCPUK8s(cpuWithBuffer),
|
||||
Limit: formatCPUK8s(roundUpCPULimit(cpuWithBuffer)),
|
||||
},
|
||||
Memory: ResourceSize{
|
||||
Request: formatMemoryK8s(memoryWithBuffer),
|
||||
Limit: formatMemoryK8s(roundUpMemoryLimit(memoryWithBuffer)),
|
||||
},
|
||||
})
|
||||
|
||||
totalCPU += cpuWithBuffer
|
||||
totalMemory += memoryWithBuffer
|
||||
}
|
||||
|
||||
response := &SizingResponse{
|
||||
Containers: containers,
|
||||
Meta: SizingMeta{
|
||||
RunsAnalyzed: len(metrics),
|
||||
BufferPercent: bufferPercent,
|
||||
CPUPercentile: cpuPercentile,
|
||||
},
|
||||
}
|
||||
|
||||
response.Total.CPU = ResourceSize{
|
||||
Request: formatCPUK8s(totalCPU),
|
||||
Limit: formatCPUK8s(roundUpCPULimit(totalCPU)),
|
||||
}
|
||||
response.Total.Memory = ResourceSize{
|
||||
Request: formatMemoryK8s(totalMemory),
|
||||
Limit: formatMemoryK8s(roundUpMemoryLimit(totalMemory)),
|
||||
}
|
||||
|
||||
return response, nil
|
||||
}
|
||||
494
internal/receiver/sizing_test.go
Normal file
494
internal/receiver/sizing_test.go
Normal file
|
|
@ -0,0 +1,494 @@
|
|||
package receiver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
||||
)
|
||||
|
||||
func TestFormatMemoryK8s(t *testing.T) {
|
||||
tests := []struct {
|
||||
bytes float64
|
||||
want string
|
||||
}{
|
||||
{0, "0Mi"},
|
||||
{1024 * 1024, "1Mi"},
|
||||
{256 * 1024 * 1024, "256Mi"},
|
||||
{512 * 1024 * 1024, "512Mi"},
|
||||
{1024 * 1024 * 1024, "1Gi"},
|
||||
{2 * 1024 * 1024 * 1024, "2Gi"},
|
||||
{1.5 * 1024 * 1024 * 1024, "2Gi"}, // rounds up
|
||||
{100 * 1024 * 1024, "100Mi"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := formatMemoryK8s(tt.bytes)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatMemoryK8s(%v) = %q, want %q", tt.bytes, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFormatCPUK8s(t *testing.T) {
|
||||
tests := []struct {
|
||||
cores float64
|
||||
want string
|
||||
}{
|
||||
{0, "0m"},
|
||||
{0.1, "100m"},
|
||||
{0.5, "500m"},
|
||||
{1.0, "1"},
|
||||
{1.5, "1500m"},
|
||||
{2.0, "2"},
|
||||
{2.5, "2500m"},
|
||||
{0.123, "123m"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := formatCPUK8s(tt.cores)
|
||||
if got != tt.want {
|
||||
t.Errorf("formatCPUK8s(%v) = %q, want %q", tt.cores, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundUpMemoryLimit(t *testing.T) {
|
||||
Mi := float64(1024 * 1024)
|
||||
tests := []struct {
|
||||
bytes float64
|
||||
want float64
|
||||
}{
|
||||
{0, Mi}, // minimum 1Mi
|
||||
{100, Mi}, // rounds up to 1Mi
|
||||
{Mi, Mi}, // exactly 1Mi stays 1Mi
|
||||
{1.5 * Mi, 2 * Mi},
|
||||
{200 * Mi, 256 * Mi},
|
||||
{300 * Mi, 512 * Mi},
|
||||
{600 * Mi, 1024 * Mi},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := roundUpMemoryLimit(tt.bytes)
|
||||
if got != tt.want {
|
||||
t.Errorf("roundUpMemoryLimit(%v) = %v, want %v", tt.bytes, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundUpCPULimit(t *testing.T) {
|
||||
tests := []struct {
|
||||
cores float64
|
||||
want float64
|
||||
}{
|
||||
{0, 0.5}, // minimum 0.5
|
||||
{0.1, 0.5},
|
||||
{0.5, 0.5},
|
||||
{0.6, 1.0},
|
||||
{1.0, 1.0},
|
||||
{1.1, 1.5},
|
||||
{1.5, 1.5},
|
||||
{2.0, 2.0},
|
||||
{2.3, 2.5},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := roundUpCPULimit(tt.cores)
|
||||
if got != tt.want {
|
||||
t.Errorf("roundUpCPULimit(%v) = %v, want %v", tt.cores, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSelectCPUValue(t *testing.T) {
|
||||
stats := summary.StatSummary{
|
||||
Peak: 10.0,
|
||||
P99: 9.0,
|
||||
P95: 8.0,
|
||||
P75: 6.0,
|
||||
P50: 5.0,
|
||||
Avg: 4.0,
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
percentile string
|
||||
want float64
|
||||
}{
|
||||
{"peak", 10.0},
|
||||
{"p99", 9.0},
|
||||
{"p95", 8.0},
|
||||
{"p75", 6.0},
|
||||
{"p50", 5.0},
|
||||
{"avg", 4.0},
|
||||
{"invalid", 8.0}, // defaults to p95
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
got := selectCPUValue(stats, tt.percentile)
|
||||
if got != tt.want {
|
||||
t.Errorf("selectCPUValue(stats, %q) = %v, want %v", tt.percentile, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidPercentile(t *testing.T) {
|
||||
valid := []string{"peak", "p99", "p95", "p75", "p50", "avg"}
|
||||
for _, p := range valid {
|
||||
if !IsValidPercentile(p) {
|
||||
t.Errorf("IsValidPercentile(%q) = false, want true", p)
|
||||
}
|
||||
}
|
||||
|
||||
invalid := []string{"p80", "p90", "max", ""}
|
||||
for _, p := range invalid {
|
||||
if IsValidPercentile(p) {
|
||||
t.Errorf("IsValidPercentile(%q) = true, want false", p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSizing_SingleRun(t *testing.T) {
|
||||
runSummary := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, // 512Mi
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
payload, _ := json.Marshal(runSummary)
|
||||
metrics := []Metric{{Payload: string(payload)}}
|
||||
|
||||
resp, err := computeSizing(metrics, 20, "p95")
|
||||
if err != nil {
|
||||
t.Fatalf("computeSizing() error = %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Containers) != 1 {
|
||||
t.Fatalf("got %d containers, want 1", len(resp.Containers))
|
||||
}
|
||||
|
||||
c := resp.Containers[0]
|
||||
if c.Name != "runner" {
|
||||
t.Errorf("container name = %q, want %q", c.Name, "runner")
|
||||
}
|
||||
|
||||
// CPU: 0.8 * 1.2 = 0.96 -> 960m request, 1 limit
|
||||
if c.CPU.Request != "960m" {
|
||||
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "960m")
|
||||
}
|
||||
if c.CPU.Limit != "1" {
|
||||
t.Errorf("CPU limit = %q, want %q", c.CPU.Limit, "1")
|
||||
}
|
||||
|
||||
// Memory: 512Mi * 1.2 = 614.4Mi -> 615Mi request, 1Gi limit (1024Mi = 1Gi)
|
||||
if c.Memory.Request != "615Mi" {
|
||||
t.Errorf("Memory request = %q, want %q", c.Memory.Request, "615Mi")
|
||||
}
|
||||
if c.Memory.Limit != "1Gi" {
|
||||
t.Errorf("Memory limit = %q, want %q", c.Memory.Limit, "1Gi")
|
||||
}
|
||||
|
||||
if resp.Meta.RunsAnalyzed != 1 {
|
||||
t.Errorf("runs_analyzed = %d, want 1", resp.Meta.RunsAnalyzed)
|
||||
}
|
||||
if resp.Meta.BufferPercent != 20 {
|
||||
t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent)
|
||||
}
|
||||
if resp.Meta.CPUPercentile != "p95" {
|
||||
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSizing_MultipleRuns(t *testing.T) {
|
||||
// Run 1: lower values
|
||||
run1 := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{Peak: 0.5, P95: 0.4},
|
||||
MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Run 2: higher values (should be used)
|
||||
run2 := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{Peak: 1.0, P95: 0.8},
|
||||
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
payload1, _ := json.Marshal(run1)
|
||||
payload2, _ := json.Marshal(run2)
|
||||
metrics := []Metric{
|
||||
{Payload: string(payload1)},
|
||||
{Payload: string(payload2)},
|
||||
}
|
||||
|
||||
resp, err := computeSizing(metrics, 0, "p95") // no buffer for easier math
|
||||
if err != nil {
|
||||
t.Fatalf("computeSizing() error = %v", err)
|
||||
}
|
||||
|
||||
c := resp.Containers[0]
|
||||
|
||||
// CPU: max(0.4, 0.8) = 0.8
|
||||
if c.CPU.Request != "800m" {
|
||||
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "800m")
|
||||
}
|
||||
|
||||
// Memory: max(256, 512) = 512Mi
|
||||
if c.Memory.Request != "512Mi" {
|
||||
t.Errorf("Memory request = %q, want %q", c.Memory.Request, "512Mi")
|
||||
}
|
||||
|
||||
if resp.Meta.RunsAnalyzed != 2 {
|
||||
t.Errorf("runs_analyzed = %d, want 2", resp.Meta.RunsAnalyzed)
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSizing_MultipleContainers(t *testing.T) {
|
||||
runSummary := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{P95: 1.0},
|
||||
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||
},
|
||||
{
|
||||
Name: "dind",
|
||||
CPUCores: summary.StatSummary{P95: 0.5},
|
||||
MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
payload, _ := json.Marshal(runSummary)
|
||||
metrics := []Metric{{Payload: string(payload)}}
|
||||
|
||||
resp, err := computeSizing(metrics, 0, "p95")
|
||||
if err != nil {
|
||||
t.Fatalf("computeSizing() error = %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Containers) != 2 {
|
||||
t.Fatalf("got %d containers, want 2", len(resp.Containers))
|
||||
}
|
||||
|
||||
// Containers should be sorted alphabetically
|
||||
if resp.Containers[0].Name != "dind" {
|
||||
t.Errorf("first container = %q, want %q", resp.Containers[0].Name, "dind")
|
||||
}
|
||||
if resp.Containers[1].Name != "runner" {
|
||||
t.Errorf("second container = %q, want %q", resp.Containers[1].Name, "runner")
|
||||
}
|
||||
|
||||
// Total should be sum
|
||||
if resp.Total.CPU.Request != "1500m" {
|
||||
t.Errorf("total CPU request = %q, want %q", resp.Total.CPU.Request, "1500m")
|
||||
}
|
||||
if resp.Total.Memory.Request != "768Mi" {
|
||||
t.Errorf("total memory request = %q, want %q", resp.Total.Memory.Request, "768Mi")
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeSizing_NoMetrics(t *testing.T) {
|
||||
_, err := computeSizing([]Metric{}, 20, "p95")
|
||||
if err == nil {
|
||||
t.Error("computeSizing() with no metrics should return error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_GetSizing(t *testing.T) {
|
||||
const readToken = "test-token"
|
||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||
defer cleanup()
|
||||
|
||||
// Save metrics with container data
|
||||
for i := 0; i < 3; i++ {
|
||||
runSummary := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
payload := &MetricsPayload{
|
||||
Execution: ExecutionContext{
|
||||
Organization: "org",
|
||||
Repository: "repo",
|
||||
Workflow: "ci.yml",
|
||||
Job: "build",
|
||||
RunID: "run-" + string(rune('1'+i)),
|
||||
},
|
||||
Summary: runSummary,
|
||||
}
|
||||
if _, err := h.store.SaveMetric(payload); err != nil {
|
||||
t.Fatalf("SaveMetric() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h.RegisterRoutes(mux)
|
||||
mux.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var resp SizingResponse
|
||||
if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if len(resp.Containers) != 1 {
|
||||
t.Errorf("got %d containers, want 1", len(resp.Containers))
|
||||
}
|
||||
if resp.Meta.RunsAnalyzed != 3 {
|
||||
t.Errorf("runs_analyzed = %d, want 3", resp.Meta.RunsAnalyzed)
|
||||
}
|
||||
if resp.Meta.BufferPercent != 20 {
|
||||
t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent)
|
||||
}
|
||||
if resp.Meta.CPUPercentile != "p95" {
|
||||
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_GetSizing_CustomParams(t *testing.T) {
|
||||
const readToken = "test-token"
|
||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||
defer cleanup()
|
||||
|
||||
// Save one metric
|
||||
runSummary := summary.RunSummary{
|
||||
Containers: []summary.ContainerSummary{
|
||||
{
|
||||
Name: "runner",
|
||||
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||
},
|
||||
},
|
||||
}
|
||||
payload := &MetricsPayload{
|
||||
Execution: ExecutionContext{Organization: "org", Repository: "repo", Workflow: "ci.yml", Job: "build", RunID: "run-1"},
|
||||
Summary: runSummary,
|
||||
}
|
||||
if _, err := h.store.SaveMetric(payload); err != nil {
|
||||
t.Fatalf("SaveMetric() error = %v", err)
|
||||
}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?runs=10&buffer=10&cpu_percentile=p75", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h.RegisterRoutes(mux)
|
||||
mux.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusOK {
|
||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||
}
|
||||
|
||||
var resp SizingResponse
|
||||
if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil {
|
||||
t.Fatalf("failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
if resp.Meta.BufferPercent != 10 {
|
||||
t.Errorf("buffer_percent = %d, want 10", resp.Meta.BufferPercent)
|
||||
}
|
||||
if resp.Meta.CPUPercentile != "p75" {
|
||||
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p75")
|
||||
}
|
||||
|
||||
// CPU: 0.6 * 1.1 = 0.66
|
||||
c := resp.Containers[0]
|
||||
if c.CPU.Request != "660m" {
|
||||
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "660m")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_GetSizing_NotFound(t *testing.T) {
|
||||
const readToken = "test-token"
|
||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||
defer cleanup()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h.RegisterRoutes(mux)
|
||||
mux.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusNotFound {
|
||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_GetSizing_InvalidPercentile(t *testing.T) {
|
||||
const readToken = "test-token"
|
||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||
defer cleanup()
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?cpu_percentile=p80", nil)
|
||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||
rec := httptest.NewRecorder()
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h.RegisterRoutes(mux)
|
||||
mux.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != http.StatusBadRequest {
|
||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandler_GetSizing_AuthRequired(t *testing.T) {
|
||||
const readToken = "test-token"
|
||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||
defer cleanup()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
authHeader string
|
||||
wantCode int
|
||||
}{
|
||||
{"no auth", "", http.StatusUnauthorized},
|
||||
{"wrong token", "Bearer wrong-token", http.StatusUnauthorized},
|
||||
{"valid token", "Bearer " + readToken, http.StatusNotFound}, // no metrics, but auth works
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
h.RegisterRoutes(mux)
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||
if tt.authHeader != "" {
|
||||
req.Header.Set("Authorization", tt.authHeader)
|
||||
}
|
||||
rec := httptest.NewRecorder()
|
||||
mux.ServeHTTP(rec, req)
|
||||
|
||||
if rec.Code != tt.wantCode {
|
||||
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -103,6 +103,16 @@ func (s *Store) GetMetricsByWorkflowJob(org, repo, workflow, job string) ([]Metr
|
|||
return metrics, result.Error
|
||||
}
|
||||
|
||||
// GetRecentMetricsByWorkflowJob retrieves the last N metrics ordered by received_at DESC
|
||||
func (s *Store) GetRecentMetricsByWorkflowJob(org, repo, workflow, job string, limit int) ([]Metric, error) {
|
||||
var metrics []Metric
|
||||
result := s.db.Where(
|
||||
"organization = ? AND repository = ? AND workflow = ? AND job = ?",
|
||||
org, repo, workflow, job,
|
||||
).Order("received_at DESC").Limit(limit).Find(&metrics)
|
||||
return metrics, result.Error
|
||||
}
|
||||
|
||||
// Close closes the database connection
|
||||
func (s *Store) Close() error {
|
||||
sqlDB, err := s.db.DB()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue