From 8101e9b20e926c42157852a69d6722f41c3294bf Mon Sep 17 00:00:00 2001 From: Manuel Ganter Date: Fri, 13 Feb 2026 12:02:53 +0100 Subject: [PATCH 1/9] feat: added first iteration sizes not recomender --- internal/receiver/handler.go | 70 +++++ internal/receiver/sizing.go | 228 ++++++++++++++ internal/receiver/sizing_test.go | 494 +++++++++++++++++++++++++++++++ internal/receiver/store.go | 10 + 4 files changed, 802 insertions(+) create mode 100644 internal/receiver/sizing.go create mode 100644 internal/receiver/sizing_test.go diff --git a/internal/receiver/handler.go b/internal/receiver/handler.go index d847f62..eb1069d 100644 --- a/internal/receiver/handler.go +++ b/internal/receiver/handler.go @@ -5,6 +5,7 @@ package receiver import ( "crypto/subtle" "encoding/json" + "fmt" "log/slog" "net/http" "strings" @@ -30,6 +31,7 @@ func (h *Handler) RegisterRoutes(mux *http.ServeMux) { mux.HandleFunc("POST /api/v1/metrics", h.handleReceiveMetrics) mux.HandleFunc("POST /api/v1/token", h.handleGenerateToken) mux.HandleFunc("GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}", h.handleGetByWorkflowJob) + mux.HandleFunc("GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}", h.handleGetSizing) mux.HandleFunc("GET /health", h.handleHealth) } @@ -194,3 +196,71 @@ func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(map[string]string{"status": "ok"}) } + +func (h *Handler) handleGetSizing(w http.ResponseWriter, r *http.Request) { + if !h.validateReadToken(w, r) { + return + } + + org := r.PathValue("org") + repo := r.PathValue("repo") + workflow := r.PathValue("workflow") + job := r.PathValue("job") + if org == "" || repo == "" || workflow == "" || job == "" { + http.Error(w, "org, repo, workflow and job are required", http.StatusBadRequest) + return + } + + // Parse query parameters with defaults + runs := parseIntQueryParam(r, "runs", 5, 1, 100) + buffer := parseIntQueryParam(r, "buffer", 20, 0, 100) + cpuPercentile := r.URL.Query().Get("cpu_percentile") + if cpuPercentile == "" { + cpuPercentile = "p95" + } + if !IsValidPercentile(cpuPercentile) { + http.Error(w, "invalid cpu_percentile: must be one of peak, p99, p95, p75, p50, avg", http.StatusBadRequest) + return + } + + metrics, err := h.store.GetRecentMetricsByWorkflowJob(org, repo, workflow, job, runs) + if err != nil { + h.logger.Error("failed to get metrics", slog.String("error", err.Error())) + http.Error(w, "failed to get metrics", http.StatusInternalServerError) + return + } + + if len(metrics) == 0 { + http.Error(w, "no metrics found for this workflow/job", http.StatusNotFound) + return + } + + response, err := computeSizing(metrics, buffer, cpuPercentile) + if err != nil { + h.logger.Error("failed to compute sizing", slog.String("error", err.Error())) + http.Error(w, "failed to compute sizing", http.StatusInternalServerError) + return + } + + w.Header().Set("Content-Type", "application/json") + _ = json.NewEncoder(w).Encode(response) +} + +// parseIntQueryParam parses an integer query parameter with default, min, and max values +func parseIntQueryParam(r *http.Request, name string, defaultVal, minVal, maxVal int) int { + strVal := r.URL.Query().Get(name) + if strVal == "" { + return defaultVal + } + var val int + if _, err := fmt.Sscanf(strVal, "%d", &val); err != nil { + return defaultVal + } + if val < minVal { + return minVal + } + if val > maxVal { + return maxVal + } + return val +} diff --git a/internal/receiver/sizing.go b/internal/receiver/sizing.go new file mode 100644 index 0000000..f037585 --- /dev/null +++ b/internal/receiver/sizing.go @@ -0,0 +1,228 @@ +// ABOUTME: Computes ideal container sizes from historical run data. +// ABOUTME: Provides Kubernetes-style resource recommendations. +package receiver + +import ( + "encoding/json" + "fmt" + "math" + "sort" + + "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" +) + +// ResourceSize holds Kubernetes-formatted resource values +type ResourceSize struct { + Request string `json:"request"` + Limit string `json:"limit"` +} + +// ContainerSizing holds computed sizing for a single container +type ContainerSizing struct { + Name string `json:"name"` + CPU ResourceSize `json:"cpu"` + Memory ResourceSize `json:"memory"` +} + +// SizingMeta provides context about the sizing calculation +type SizingMeta struct { + RunsAnalyzed int `json:"runs_analyzed"` + BufferPercent int `json:"buffer_percent"` + CPUPercentile string `json:"cpu_percentile"` +} + +// SizingResponse is the API response for the sizing endpoint +type SizingResponse struct { + Containers []ContainerSizing `json:"containers"` + Total struct { + CPU ResourceSize `json:"cpu"` + Memory ResourceSize `json:"memory"` + } `json:"total"` + Meta SizingMeta `json:"meta"` +} + +// validPercentiles lists the allowed percentile values +var validPercentiles = map[string]bool{ + "peak": true, + "p99": true, + "p95": true, + "p75": true, + "p50": true, + "avg": true, +} + +// IsValidPercentile checks if the given percentile string is valid +func IsValidPercentile(p string) bool { + return validPercentiles[p] +} + +// selectCPUValue extracts the appropriate value from StatSummary based on percentile +func selectCPUValue(stats summary.StatSummary, percentile string) float64 { + switch percentile { + case "peak": + return stats.Peak + case "p99": + return stats.P99 + case "p95": + return stats.P95 + case "p75": + return stats.P75 + case "p50": + return stats.P50 + case "avg": + return stats.Avg + default: + return stats.P95 // default to p95 + } +} + +// formatMemoryK8s converts bytes to Kubernetes memory format (Mi or Gi) +func formatMemoryK8s(bytes float64) string { + const ( + Mi = 1024 * 1024 + Gi = 1024 * 1024 * 1024 + ) + + if bytes >= Gi { + return fmt.Sprintf("%.0fGi", math.Ceil(bytes/Gi)) + } + return fmt.Sprintf("%.0fMi", math.Ceil(bytes/Mi)) +} + +// formatCPUK8s converts cores to Kubernetes CPU format (millicores or whole cores) +func formatCPUK8s(cores float64) string { + millicores := cores * 1000 + if millicores >= 1000 && math.Mod(millicores, 1000) == 0 { + return fmt.Sprintf("%.0f", cores) + } + return fmt.Sprintf("%.0fm", math.Ceil(millicores)) +} + +// roundUpMemoryLimit rounds bytes up to the next power of 2 in Mi +func roundUpMemoryLimit(bytes float64) float64 { + const Mi = 1024 * 1024 + if bytes <= 0 { + return Mi // minimum 1Mi + } + miValue := bytes / Mi + if miValue <= 1 { + return Mi // minimum 1Mi + } + // Find next power of 2 + power := math.Ceil(math.Log2(miValue)) + return math.Pow(2, power) * Mi +} + +// roundUpCPULimit rounds cores up to the next 0.5 increment +func roundUpCPULimit(cores float64) float64 { + if cores <= 0 { + return 0.5 // minimum 0.5 cores + } + return math.Ceil(cores*2) / 2 +} + +// containerAggregation holds accumulated stats for a single container across runs +type containerAggregation struct { + cpuValues []float64 + memoryPeaks []float64 +} + +// computeSizing calculates ideal container sizes from metrics +func computeSizing(metrics []Metric, bufferPercent int, cpuPercentile string) (*SizingResponse, error) { + if len(metrics) == 0 { + return nil, fmt.Errorf("no metrics provided") + } + + // Aggregate container stats across all runs + containerStats := make(map[string]*containerAggregation) + + for _, m := range metrics { + var runSummary summary.RunSummary + if err := json.Unmarshal([]byte(m.Payload), &runSummary); err != nil { + continue // skip invalid payloads + } + + for _, c := range runSummary.Containers { + if _, exists := containerStats[c.Name]; !exists { + containerStats[c.Name] = &containerAggregation{ + cpuValues: make([]float64, 0), + memoryPeaks: make([]float64, 0), + } + } + agg := containerStats[c.Name] + agg.cpuValues = append(agg.cpuValues, selectCPUValue(c.CPUCores, cpuPercentile)) + agg.memoryPeaks = append(agg.memoryPeaks, c.MemoryBytes.Peak) + } + } + + // Calculate sizing for each container + bufferMultiplier := 1.0 + float64(bufferPercent)/100.0 + var containers []ContainerSizing + var totalCPU, totalMemory float64 + + // Sort container names for consistent output + names := make([]string, 0, len(containerStats)) + for name := range containerStats { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + agg := containerStats[name] + + // CPU: max of selected percentile values across runs + maxCPU := 0.0 + for _, v := range agg.cpuValues { + if v > maxCPU { + maxCPU = v + } + } + + // Memory: peak of peaks + maxMemory := 0.0 + for _, v := range agg.memoryPeaks { + if v > maxMemory { + maxMemory = v + } + } + + // Apply buffer + cpuWithBuffer := maxCPU * bufferMultiplier + memoryWithBuffer := maxMemory * bufferMultiplier + + containers = append(containers, ContainerSizing{ + Name: name, + CPU: ResourceSize{ + Request: formatCPUK8s(cpuWithBuffer), + Limit: formatCPUK8s(roundUpCPULimit(cpuWithBuffer)), + }, + Memory: ResourceSize{ + Request: formatMemoryK8s(memoryWithBuffer), + Limit: formatMemoryK8s(roundUpMemoryLimit(memoryWithBuffer)), + }, + }) + + totalCPU += cpuWithBuffer + totalMemory += memoryWithBuffer + } + + response := &SizingResponse{ + Containers: containers, + Meta: SizingMeta{ + RunsAnalyzed: len(metrics), + BufferPercent: bufferPercent, + CPUPercentile: cpuPercentile, + }, + } + + response.Total.CPU = ResourceSize{ + Request: formatCPUK8s(totalCPU), + Limit: formatCPUK8s(roundUpCPULimit(totalCPU)), + } + response.Total.Memory = ResourceSize{ + Request: formatMemoryK8s(totalMemory), + Limit: formatMemoryK8s(roundUpMemoryLimit(totalMemory)), + } + + return response, nil +} diff --git a/internal/receiver/sizing_test.go b/internal/receiver/sizing_test.go new file mode 100644 index 0000000..fc16129 --- /dev/null +++ b/internal/receiver/sizing_test.go @@ -0,0 +1,494 @@ +package receiver + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" +) + +func TestFormatMemoryK8s(t *testing.T) { + tests := []struct { + bytes float64 + want string + }{ + {0, "0Mi"}, + {1024 * 1024, "1Mi"}, + {256 * 1024 * 1024, "256Mi"}, + {512 * 1024 * 1024, "512Mi"}, + {1024 * 1024 * 1024, "1Gi"}, + {2 * 1024 * 1024 * 1024, "2Gi"}, + {1.5 * 1024 * 1024 * 1024, "2Gi"}, // rounds up + {100 * 1024 * 1024, "100Mi"}, + } + + for _, tt := range tests { + got := formatMemoryK8s(tt.bytes) + if got != tt.want { + t.Errorf("formatMemoryK8s(%v) = %q, want %q", tt.bytes, got, tt.want) + } + } +} + +func TestFormatCPUK8s(t *testing.T) { + tests := []struct { + cores float64 + want string + }{ + {0, "0m"}, + {0.1, "100m"}, + {0.5, "500m"}, + {1.0, "1"}, + {1.5, "1500m"}, + {2.0, "2"}, + {2.5, "2500m"}, + {0.123, "123m"}, + } + + for _, tt := range tests { + got := formatCPUK8s(tt.cores) + if got != tt.want { + t.Errorf("formatCPUK8s(%v) = %q, want %q", tt.cores, got, tt.want) + } + } +} + +func TestRoundUpMemoryLimit(t *testing.T) { + Mi := float64(1024 * 1024) + tests := []struct { + bytes float64 + want float64 + }{ + {0, Mi}, // minimum 1Mi + {100, Mi}, // rounds up to 1Mi + {Mi, Mi}, // exactly 1Mi stays 1Mi + {1.5 * Mi, 2 * Mi}, + {200 * Mi, 256 * Mi}, + {300 * Mi, 512 * Mi}, + {600 * Mi, 1024 * Mi}, + } + + for _, tt := range tests { + got := roundUpMemoryLimit(tt.bytes) + if got != tt.want { + t.Errorf("roundUpMemoryLimit(%v) = %v, want %v", tt.bytes, got, tt.want) + } + } +} + +func TestRoundUpCPULimit(t *testing.T) { + tests := []struct { + cores float64 + want float64 + }{ + {0, 0.5}, // minimum 0.5 + {0.1, 0.5}, + {0.5, 0.5}, + {0.6, 1.0}, + {1.0, 1.0}, + {1.1, 1.5}, + {1.5, 1.5}, + {2.0, 2.0}, + {2.3, 2.5}, + } + + for _, tt := range tests { + got := roundUpCPULimit(tt.cores) + if got != tt.want { + t.Errorf("roundUpCPULimit(%v) = %v, want %v", tt.cores, got, tt.want) + } + } +} + +func TestSelectCPUValue(t *testing.T) { + stats := summary.StatSummary{ + Peak: 10.0, + P99: 9.0, + P95: 8.0, + P75: 6.0, + P50: 5.0, + Avg: 4.0, + } + + tests := []struct { + percentile string + want float64 + }{ + {"peak", 10.0}, + {"p99", 9.0}, + {"p95", 8.0}, + {"p75", 6.0}, + {"p50", 5.0}, + {"avg", 4.0}, + {"invalid", 8.0}, // defaults to p95 + } + + for _, tt := range tests { + got := selectCPUValue(stats, tt.percentile) + if got != tt.want { + t.Errorf("selectCPUValue(stats, %q) = %v, want %v", tt.percentile, got, tt.want) + } + } +} + +func TestIsValidPercentile(t *testing.T) { + valid := []string{"peak", "p99", "p95", "p75", "p50", "avg"} + for _, p := range valid { + if !IsValidPercentile(p) { + t.Errorf("IsValidPercentile(%q) = false, want true", p) + } + } + + invalid := []string{"p80", "p90", "max", ""} + for _, p := range invalid { + if IsValidPercentile(p) { + t.Errorf("IsValidPercentile(%q) = true, want false", p) + } + } +} + +func TestComputeSizing_SingleRun(t *testing.T) { + runSummary := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4}, + MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, // 512Mi + }, + }, + } + + payload, _ := json.Marshal(runSummary) + metrics := []Metric{{Payload: string(payload)}} + + resp, err := computeSizing(metrics, 20, "p95") + if err != nil { + t.Fatalf("computeSizing() error = %v", err) + } + + if len(resp.Containers) != 1 { + t.Fatalf("got %d containers, want 1", len(resp.Containers)) + } + + c := resp.Containers[0] + if c.Name != "runner" { + t.Errorf("container name = %q, want %q", c.Name, "runner") + } + + // CPU: 0.8 * 1.2 = 0.96 -> 960m request, 1 limit + if c.CPU.Request != "960m" { + t.Errorf("CPU request = %q, want %q", c.CPU.Request, "960m") + } + if c.CPU.Limit != "1" { + t.Errorf("CPU limit = %q, want %q", c.CPU.Limit, "1") + } + + // Memory: 512Mi * 1.2 = 614.4Mi -> 615Mi request, 1Gi limit (1024Mi = 1Gi) + if c.Memory.Request != "615Mi" { + t.Errorf("Memory request = %q, want %q", c.Memory.Request, "615Mi") + } + if c.Memory.Limit != "1Gi" { + t.Errorf("Memory limit = %q, want %q", c.Memory.Limit, "1Gi") + } + + if resp.Meta.RunsAnalyzed != 1 { + t.Errorf("runs_analyzed = %d, want 1", resp.Meta.RunsAnalyzed) + } + if resp.Meta.BufferPercent != 20 { + t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent) + } + if resp.Meta.CPUPercentile != "p95" { + t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95") + } +} + +func TestComputeSizing_MultipleRuns(t *testing.T) { + // Run 1: lower values + run1 := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{Peak: 0.5, P95: 0.4}, + MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024}, + }, + }, + } + // Run 2: higher values (should be used) + run2 := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{Peak: 1.0, P95: 0.8}, + MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, + }, + }, + } + + payload1, _ := json.Marshal(run1) + payload2, _ := json.Marshal(run2) + metrics := []Metric{ + {Payload: string(payload1)}, + {Payload: string(payload2)}, + } + + resp, err := computeSizing(metrics, 0, "p95") // no buffer for easier math + if err != nil { + t.Fatalf("computeSizing() error = %v", err) + } + + c := resp.Containers[0] + + // CPU: max(0.4, 0.8) = 0.8 + if c.CPU.Request != "800m" { + t.Errorf("CPU request = %q, want %q", c.CPU.Request, "800m") + } + + // Memory: max(256, 512) = 512Mi + if c.Memory.Request != "512Mi" { + t.Errorf("Memory request = %q, want %q", c.Memory.Request, "512Mi") + } + + if resp.Meta.RunsAnalyzed != 2 { + t.Errorf("runs_analyzed = %d, want 2", resp.Meta.RunsAnalyzed) + } +} + +func TestComputeSizing_MultipleContainers(t *testing.T) { + runSummary := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{P95: 1.0}, + MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, + }, + { + Name: "dind", + CPUCores: summary.StatSummary{P95: 0.5}, + MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024}, + }, + }, + } + + payload, _ := json.Marshal(runSummary) + metrics := []Metric{{Payload: string(payload)}} + + resp, err := computeSizing(metrics, 0, "p95") + if err != nil { + t.Fatalf("computeSizing() error = %v", err) + } + + if len(resp.Containers) != 2 { + t.Fatalf("got %d containers, want 2", len(resp.Containers)) + } + + // Containers should be sorted alphabetically + if resp.Containers[0].Name != "dind" { + t.Errorf("first container = %q, want %q", resp.Containers[0].Name, "dind") + } + if resp.Containers[1].Name != "runner" { + t.Errorf("second container = %q, want %q", resp.Containers[1].Name, "runner") + } + + // Total should be sum + if resp.Total.CPU.Request != "1500m" { + t.Errorf("total CPU request = %q, want %q", resp.Total.CPU.Request, "1500m") + } + if resp.Total.Memory.Request != "768Mi" { + t.Errorf("total memory request = %q, want %q", resp.Total.Memory.Request, "768Mi") + } +} + +func TestComputeSizing_NoMetrics(t *testing.T) { + _, err := computeSizing([]Metric{}, 20, "p95") + if err == nil { + t.Error("computeSizing() with no metrics should return error") + } +} + +func TestHandler_GetSizing(t *testing.T) { + const readToken = "test-token" + h, cleanup := newTestHandlerWithToken(t, readToken) + defer cleanup() + + // Save metrics with container data + for i := 0; i < 3; i++ { + runSummary := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4}, + MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, + }, + }, + } + payload := &MetricsPayload{ + Execution: ExecutionContext{ + Organization: "org", + Repository: "repo", + Workflow: "ci.yml", + Job: "build", + RunID: "run-" + string(rune('1'+i)), + }, + Summary: runSummary, + } + if _, err := h.store.SaveMetric(payload); err != nil { + t.Fatalf("SaveMetric() error = %v", err) + } + } + + req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil) + req.Header.Set("Authorization", "Bearer "+readToken) + rec := httptest.NewRecorder() + + mux := http.NewServeMux() + h.RegisterRoutes(mux) + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var resp SizingResponse + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if len(resp.Containers) != 1 { + t.Errorf("got %d containers, want 1", len(resp.Containers)) + } + if resp.Meta.RunsAnalyzed != 3 { + t.Errorf("runs_analyzed = %d, want 3", resp.Meta.RunsAnalyzed) + } + if resp.Meta.BufferPercent != 20 { + t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent) + } + if resp.Meta.CPUPercentile != "p95" { + t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95") + } +} + +func TestHandler_GetSizing_CustomParams(t *testing.T) { + const readToken = "test-token" + h, cleanup := newTestHandlerWithToken(t, readToken) + defer cleanup() + + // Save one metric + runSummary := summary.RunSummary{ + Containers: []summary.ContainerSummary{ + { + Name: "runner", + CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4}, + MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, + }, + }, + } + payload := &MetricsPayload{ + Execution: ExecutionContext{Organization: "org", Repository: "repo", Workflow: "ci.yml", Job: "build", RunID: "run-1"}, + Summary: runSummary, + } + if _, err := h.store.SaveMetric(payload); err != nil { + t.Fatalf("SaveMetric() error = %v", err) + } + + req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?runs=10&buffer=10&cpu_percentile=p75", nil) + req.Header.Set("Authorization", "Bearer "+readToken) + rec := httptest.NewRecorder() + + mux := http.NewServeMux() + h.RegisterRoutes(mux) + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusOK { + t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) + } + + var resp SizingResponse + if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil { + t.Fatalf("failed to decode response: %v", err) + } + + if resp.Meta.BufferPercent != 10 { + t.Errorf("buffer_percent = %d, want 10", resp.Meta.BufferPercent) + } + if resp.Meta.CPUPercentile != "p75" { + t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p75") + } + + // CPU: 0.6 * 1.1 = 0.66 + c := resp.Containers[0] + if c.CPU.Request != "660m" { + t.Errorf("CPU request = %q, want %q", c.CPU.Request, "660m") + } +} + +func TestHandler_GetSizing_NotFound(t *testing.T) { + const readToken = "test-token" + h, cleanup := newTestHandlerWithToken(t, readToken) + defer cleanup() + + req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil) + req.Header.Set("Authorization", "Bearer "+readToken) + rec := httptest.NewRecorder() + + mux := http.NewServeMux() + h.RegisterRoutes(mux) + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusNotFound { + t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) + } +} + +func TestHandler_GetSizing_InvalidPercentile(t *testing.T) { + const readToken = "test-token" + h, cleanup := newTestHandlerWithToken(t, readToken) + defer cleanup() + + req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?cpu_percentile=p80", nil) + req.Header.Set("Authorization", "Bearer "+readToken) + rec := httptest.NewRecorder() + + mux := http.NewServeMux() + h.RegisterRoutes(mux) + mux.ServeHTTP(rec, req) + + if rec.Code != http.StatusBadRequest { + t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + } +} + +func TestHandler_GetSizing_AuthRequired(t *testing.T) { + const readToken = "test-token" + h, cleanup := newTestHandlerWithToken(t, readToken) + defer cleanup() + + tests := []struct { + name string + authHeader string + wantCode int + }{ + {"no auth", "", http.StatusUnauthorized}, + {"wrong token", "Bearer wrong-token", http.StatusUnauthorized}, + {"valid token", "Bearer " + readToken, http.StatusNotFound}, // no metrics, but auth works + } + + mux := http.NewServeMux() + h.RegisterRoutes(mux) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil) + if tt.authHeader != "" { + req.Header.Set("Authorization", tt.authHeader) + } + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + + if rec.Code != tt.wantCode { + t.Errorf("status = %d, want %d", rec.Code, tt.wantCode) + } + }) + } +} diff --git a/internal/receiver/store.go b/internal/receiver/store.go index 1b934de..7d81959 100644 --- a/internal/receiver/store.go +++ b/internal/receiver/store.go @@ -103,6 +103,16 @@ func (s *Store) GetMetricsByWorkflowJob(org, repo, workflow, job string) ([]Metr return metrics, result.Error } +// GetRecentMetricsByWorkflowJob retrieves the last N metrics ordered by received_at DESC +func (s *Store) GetRecentMetricsByWorkflowJob(org, repo, workflow, job string, limit int) ([]Metric, error) { + var metrics []Metric + result := s.db.Where( + "organization = ? AND repository = ? AND workflow = ? AND job = ?", + org, repo, workflow, job, + ).Order("received_at DESC").Limit(limit).Find(&metrics) + return metrics, result.Error +} + // Close closes the database connection func (s *Store) Close() error { sqlDB, err := s.db.DB() From a96a1079eb98c1f236a0def49b8343ac1a7d965b Mon Sep 17 00:00:00 2001 From: Manuel Ganter Date: Fri, 13 Feb 2026 12:30:32 +0100 Subject: [PATCH 2/9] fix: now sizer does not round up to the next Gi when in between two --- internal/receiver/sizing.go | 11 ++--------- internal/receiver/sizing_test.go | 12 ++++++------ 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/internal/receiver/sizing.go b/internal/receiver/sizing.go index f037585..928a2f5 100644 --- a/internal/receiver/sizing.go +++ b/internal/receiver/sizing.go @@ -76,16 +76,9 @@ func selectCPUValue(stats summary.StatSummary, percentile string) float64 { } } -// formatMemoryK8s converts bytes to Kubernetes memory format (Mi or Gi) +// formatMemoryK8s converts bytes to Kubernetes memory format (Mi) func formatMemoryK8s(bytes float64) string { - const ( - Mi = 1024 * 1024 - Gi = 1024 * 1024 * 1024 - ) - - if bytes >= Gi { - return fmt.Sprintf("%.0fGi", math.Ceil(bytes/Gi)) - } + const Mi = 1024 * 1024 return fmt.Sprintf("%.0fMi", math.Ceil(bytes/Mi)) } diff --git a/internal/receiver/sizing_test.go b/internal/receiver/sizing_test.go index fc16129..a1ac3c5 100644 --- a/internal/receiver/sizing_test.go +++ b/internal/receiver/sizing_test.go @@ -18,9 +18,9 @@ func TestFormatMemoryK8s(t *testing.T) { {1024 * 1024, "1Mi"}, {256 * 1024 * 1024, "256Mi"}, {512 * 1024 * 1024, "512Mi"}, - {1024 * 1024 * 1024, "1Gi"}, - {2 * 1024 * 1024 * 1024, "2Gi"}, - {1.5 * 1024 * 1024 * 1024, "2Gi"}, // rounds up + {1024 * 1024 * 1024, "1024Mi"}, + {2 * 1024 * 1024 * 1024, "2048Mi"}, + {1.5 * 1024 * 1024 * 1024, "1536Mi"}, {100 * 1024 * 1024, "100Mi"}, } @@ -185,12 +185,12 @@ func TestComputeSizing_SingleRun(t *testing.T) { t.Errorf("CPU limit = %q, want %q", c.CPU.Limit, "1") } - // Memory: 512Mi * 1.2 = 614.4Mi -> 615Mi request, 1Gi limit (1024Mi = 1Gi) + // Memory: 512Mi * 1.2 = 614.4Mi -> 615Mi request, 1024Mi limit if c.Memory.Request != "615Mi" { t.Errorf("Memory request = %q, want %q", c.Memory.Request, "615Mi") } - if c.Memory.Limit != "1Gi" { - t.Errorf("Memory limit = %q, want %q", c.Memory.Limit, "1Gi") + if c.Memory.Limit != "1024Mi" { + t.Errorf("Memory limit = %q, want %q", c.Memory.Limit, "1024Mi") } if resp.Meta.RunsAnalyzed != 1 { From 7e3a4efb2dca1f6085ba2a4201404a19430212c1 Mon Sep 17 00:00:00 2001 From: Manuel Ganter Date: Fri, 13 Feb 2026 12:48:57 +0100 Subject: [PATCH 3/9] feat: added timestamp to HMAC to allow a TTL for the token --- cmd/receiver/main.go | 3 +- internal/integration/integration_test.go | 12 +- internal/receiver/handler.go | 14 +- internal/receiver/handler_test.go | 20 ++- internal/receiver/token.go | 76 +++++++++-- internal/receiver/token_test.go | 158 +++++++++++++++++++---- 6 files changed, 225 insertions(+), 58 deletions(-) diff --git a/cmd/receiver/main.go b/cmd/receiver/main.go index 42e688e..c540736 100644 --- a/cmd/receiver/main.go +++ b/cmd/receiver/main.go @@ -24,6 +24,7 @@ func main() { dbPath := flag.String("db", defaultDBPath, "SQLite database path") readToken := flag.String("read-token", os.Getenv("RECEIVER_READ_TOKEN"), "Pre-shared token for read endpoints (or set RECEIVER_READ_TOKEN)") hmacKey := flag.String("hmac-key", os.Getenv("RECEIVER_HMAC_KEY"), "Secret key for push token generation/validation (or set RECEIVER_HMAC_KEY)") + tokenTTL := flag.Duration("token-ttl", 2*time.Hour, "Time-to-live for push tokens (default 2h)") flag.Parse() logger := slog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{ @@ -37,7 +38,7 @@ func main() { } defer func() { _ = store.Close() }() - handler := receiver.NewHandler(store, logger, *readToken, *hmacKey) + handler := receiver.NewHandler(store, logger, *readToken, *hmacKey, *tokenTTL) mux := http.NewServeMux() handler.RegisterRoutes(mux) diff --git a/internal/integration/integration_test.go b/internal/integration/integration_test.go index 326e3d5..f21fa6f 100644 --- a/internal/integration/integration_test.go +++ b/internal/integration/integration_test.go @@ -32,7 +32,7 @@ func setupTestReceiver(t *testing.T) (*receiver.Store, *httptest.Server, func()) t.Fatalf("NewStore() error = %v", err) } - handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), testReadToken, testHMACKey) + handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), testReadToken, testHMACKey, 0) mux := http.NewServeMux() handler.RegisterRoutes(mux) @@ -46,9 +46,9 @@ func setupTestReceiver(t *testing.T) (*receiver.Store, *httptest.Server, func()) return store, server, cleanup } -// generatePushToken generates a scoped push token for an execution context +// generatePushToken generates a push token for an execution context func generatePushToken(exec summary.ExecutionContext) string { - return receiver.GenerateScopedToken(testHMACKey, exec.Organization, exec.Repository, exec.Workflow, exec.Job) + return receiver.GenerateToken(testHMACKey, exec.Organization, exec.Repository, exec.Workflow, exec.Job) } func TestPushClientToReceiver(t *testing.T) { @@ -166,8 +166,8 @@ func TestPushClientIntegration(t *testing.T) { t.Setenv("GITHUB_JOB", "push-job") t.Setenv("GITHUB_RUN_ID", "push-run-456") - // Generate scoped push token - pushToken := receiver.GenerateScopedToken(testHMACKey, "push-client-org", "push-client-repo", "push-test.yml", "push-job") + // Generate push token + pushToken := receiver.GenerateToken(testHMACKey, "push-client-org", "push-client-repo", "push-test.yml", "push-job") // Create push client with token - it reads execution context from env vars pushClient := summary.NewPushClient(server.URL+"/api/v1/metrics", pushToken) @@ -371,7 +371,7 @@ func setupTestReceiverWithToken(t *testing.T, readToken, hmacKey string) (*recei t.Fatalf("NewStore() error = %v", err) } - handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), readToken, hmacKey) + handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), readToken, hmacKey, 0) mux := http.NewServeMux() handler.RegisterRoutes(mux) diff --git a/internal/receiver/handler.go b/internal/receiver/handler.go index eb1069d..57c09b5 100644 --- a/internal/receiver/handler.go +++ b/internal/receiver/handler.go @@ -9,6 +9,7 @@ import ( "log/slog" "net/http" "strings" + "time" ) // Handler handles HTTP requests for the metrics receiver @@ -17,13 +18,18 @@ type Handler struct { logger *slog.Logger readToken string // Pre-shared token for read endpoint authentication hmacKey string // Separate key for HMAC-based push token generation/validation + tokenTTL time.Duration } // NewHandler creates a new HTTP handler with the given store. // readToken authenticates read endpoints and the token generation endpoint. // hmacKey is the secret used to derive scoped push tokens. -func NewHandler(store *Store, logger *slog.Logger, readToken, hmacKey string) *Handler { - return &Handler{store: store, logger: logger, readToken: readToken, hmacKey: hmacKey} +// tokenTTL specifies how long push tokens are valid (0 uses DefaultTokenTTL). +func NewHandler(store *Store, logger *slog.Logger, readToken, hmacKey string, tokenTTL time.Duration) *Handler { + if tokenTTL == 0 { + tokenTTL = DefaultTokenTTL + } + return &Handler{store: store, logger: logger, readToken: readToken, hmacKey: hmacKey, tokenTTL: tokenTTL} } // RegisterRoutes registers all HTTP routes on the given mux @@ -88,7 +94,7 @@ func (h *Handler) handleGenerateToken(w http.ResponseWriter, r *http.Request) { return } - token := GenerateScopedToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job) + token := GenerateToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job) w.Header().Set("Content-Type", "application/json") _ = json.NewEncoder(w).Encode(TokenResponse{Token: token}) @@ -117,7 +123,7 @@ func (h *Handler) validatePushToken(w http.ResponseWriter, r *http.Request, exec } token := strings.TrimPrefix(authHeader, bearerPrefix) - if !ValidateScopedToken(h.hmacKey, token, exec.Organization, exec.Repository, exec.Workflow, exec.Job) { + if !ValidateToken(h.hmacKey, token, exec.Organization, exec.Repository, exec.Workflow, exec.Job, h.tokenTTL) { h.logger.Warn("invalid push token", slog.String("path", r.URL.Path)) http.Error(w, "invalid token", http.StatusUnauthorized) return false diff --git a/internal/receiver/handler_test.go b/internal/receiver/handler_test.go index 70d12d9..12b327e 100644 --- a/internal/receiver/handler_test.go +++ b/internal/receiver/handler_test.go @@ -8,6 +8,7 @@ import ( "net/http" "net/http/httptest" "path/filepath" + "strings" "testing" "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" @@ -25,7 +26,7 @@ func TestHandler_ReceiveMetrics(t *testing.T) { Job: "build", RunID: "run-123", } - pushToken := GenerateScopedToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job) + pushToken := GenerateToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job) payload := MetricsPayload{ Execution: exec, @@ -264,8 +265,13 @@ func TestHandler_GenerateToken(t *testing.T) { if resp.Token == "" { t.Error("expected non-empty token") } - if len(resp.Token) != 64 { - t.Errorf("token length = %d, want 64", len(resp.Token)) + // Token format is "timestamp:hmac" where hmac is 64 hex chars + parts := strings.SplitN(resp.Token, ":", 2) + if len(parts) != 2 { + t.Errorf("token should have format 'timestamp:hmac', got %q", resp.Token) + } + if len(parts[1]) != 64 { + t.Errorf("HMAC part length = %d, want 64", len(parts[1])) } } @@ -357,8 +363,8 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) { RunID: "run-1", } - validToken := GenerateScopedToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job) - wrongScopeToken := GenerateScopedToken(readToken, "other-org", "repo", "ci.yml", "build") + validToken := GenerateToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job) + wrongScopeToken := GenerateToken(readToken, "other-org", "repo", "ci.yml", "build") tests := []struct { name string @@ -448,7 +454,7 @@ func newTestHandler(t *testing.T) (*Handler, func()) { } logger := slog.New(slog.NewTextHandler(io.Discard, nil)) - handler := NewHandler(store, logger, "", "") // no auth — endpoints will reject + handler := NewHandler(store, logger, "", "", 0) // no auth — endpoints will reject return handler, func() { _ = store.Close() } } @@ -467,7 +473,7 @@ func newTestHandlerWithKeys(t *testing.T, readToken, hmacKey string) (*Handler, } logger := slog.New(slog.NewTextHandler(io.Discard, nil)) - handler := NewHandler(store, logger, readToken, hmacKey) + handler := NewHandler(store, logger, readToken, hmacKey, 0) // 0 uses DefaultTokenTTL return handler, func() { _ = store.Close() } } diff --git a/internal/receiver/token.go b/internal/receiver/token.go index 087546c..47721fa 100644 --- a/internal/receiver/token.go +++ b/internal/receiver/token.go @@ -1,5 +1,5 @@ // ABOUTME: HMAC-SHA256 token generation and validation for scoped push authentication. -// ABOUTME: Tokens are derived from a key + scope, enabling stateless validation without DB storage. +// ABOUTME: Tokens are derived from a key + scope + timestamp, enabling stateless validation with expiration. package receiver import ( @@ -7,19 +7,71 @@ import ( "crypto/sha256" "crypto/subtle" "encoding/hex" + "fmt" + "strconv" + "strings" + "time" ) -// GenerateScopedToken computes an HMAC-SHA256 token scoped to a specific org/repo/workflow/job. -// The canonical input is "v1\x00\x00\x00\x00". -func GenerateScopedToken(key, org, repo, workflow, job string) string { - mac := hmac.New(sha256.New, []byte(key)) - mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job)) - return hex.EncodeToString(mac.Sum(nil)) +// DefaultTokenTTL is the default time-to-live for push tokens. +const DefaultTokenTTL = 2 * time.Hour + +// GenerateToken creates a token with embedded timestamp for expiration support. +// Format: ":" +func GenerateToken(key, org, repo, workflow, job string) string { + return GenerateTokenAt(key, org, repo, workflow, job, time.Now()) } -// ValidateScopedToken checks whether a token matches the expected HMAC for the given scope. -// Uses constant-time comparison to prevent timing attacks. -func ValidateScopedToken(key, token, org, repo, workflow, job string) bool { - expected := GenerateScopedToken(key, org, repo, workflow, job) - return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1 +// GenerateTokenAt creates a token with the specified timestamp. +// The HMAC input is "v1\x00\x00\x00\x00\x00". +func GenerateTokenAt(key, org, repo, workflow, job string, timestamp time.Time) string { + ts := strconv.FormatInt(timestamp.Unix(), 10) + mac := hmac.New(sha256.New, []byte(key)) + mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job + "\x00" + ts)) + return ts + ":" + hex.EncodeToString(mac.Sum(nil)) +} + +// ValidateToken validates a token and checks expiration. +// Returns true if the token is valid and not expired. +func ValidateToken(key, token, org, repo, workflow, job string, ttl time.Duration) bool { + return ValidateTokenAt(key, token, org, repo, workflow, job, ttl, time.Now()) +} + +// ValidateTokenAt validates a token against a specific reference time. +func ValidateTokenAt(key, token, org, repo, workflow, job string, ttl time.Duration, now time.Time) bool { + parts := strings.SplitN(token, ":", 2) + if len(parts) != 2 { + return false + } + + tsStr, hmacHex := parts[0], parts[1] + ts, err := strconv.ParseInt(tsStr, 10, 64) + if err != nil { + return false + } + + tokenTime := time.Unix(ts, 0) + if now.Sub(tokenTime) > ttl { + return false + } + + // Recompute expected HMAC + mac := hmac.New(sha256.New, []byte(key)) + mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job + "\x00" + tsStr)) + expected := hex.EncodeToString(mac.Sum(nil)) + + return subtle.ConstantTimeCompare([]byte(hmacHex), []byte(expected)) == 1 +} + +// ParseTokenTimestamp extracts the timestamp from a timestamped token without validating it. +func ParseTokenTimestamp(token string) (time.Time, error) { + parts := strings.SplitN(token, ":", 2) + if len(parts) != 2 { + return time.Time{}, fmt.Errorf("invalid token format") + } + ts, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return time.Time{}, fmt.Errorf("invalid timestamp: %w", err) + } + return time.Unix(ts, 0), nil } diff --git a/internal/receiver/token_test.go b/internal/receiver/token_test.go index 2140ecd..897ab1a 100644 --- a/internal/receiver/token_test.go +++ b/internal/receiver/token_test.go @@ -1,20 +1,35 @@ package receiver import ( - "encoding/hex" + "strconv" + "strings" "testing" + "time" ) -func TestGenerateScopedToken_Deterministic(t *testing.T) { - token1 := GenerateScopedToken("key", "org", "repo", "wf", "job") - token2 := GenerateScopedToken("key", "org", "repo", "wf", "job") +func TestGenerateToken_Format(t *testing.T) { + token := GenerateToken("key", "org", "repo", "wf", "job") + parts := strings.SplitN(token, ":", 2) + if len(parts) != 2 { + t.Fatalf("token should have format 'timestamp:hmac', got %q", token) + } + if len(parts[1]) != 64 { + t.Errorf("HMAC part length = %d, want 64", len(parts[1])) + } +} + +func TestGenerateTokenAt_Deterministic(t *testing.T) { + ts := time.Unix(1700000000, 0) + token1 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + token2 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) if token1 != token2 { t.Errorf("tokens differ: %q vs %q", token1, token2) } } -func TestGenerateScopedToken_ScopePinning(t *testing.T) { - base := GenerateScopedToken("key", "org", "repo", "wf", "job") +func TestGenerateTokenAt_ScopePinning(t *testing.T) { + ts := time.Unix(1700000000, 0) + base := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) variants := []struct { name string @@ -31,7 +46,7 @@ func TestGenerateScopedToken_ScopePinning(t *testing.T) { for _, v := range variants { t.Run(v.name, func(t *testing.T) { - token := GenerateScopedToken("key", v.org, v.repo, v.wf, v.job) + token := GenerateTokenAt("key", v.org, v.repo, v.wf, v.job, ts) if token == base { t.Errorf("token for %s should differ from base", v.name) } @@ -39,40 +54,127 @@ func TestGenerateScopedToken_ScopePinning(t *testing.T) { } } -func TestGenerateScopedToken_DifferentKeys(t *testing.T) { - token1 := GenerateScopedToken("key-a", "org", "repo", "wf", "job") - token2 := GenerateScopedToken("key-b", "org", "repo", "wf", "job") +func TestGenerateTokenAt_DifferentKeys(t *testing.T) { + ts := time.Unix(1700000000, 0) + token1 := GenerateTokenAt("key-a", "org", "repo", "wf", "job", ts) + token2 := GenerateTokenAt("key-b", "org", "repo", "wf", "job", ts) if token1 == token2 { t.Error("different keys should produce different tokens") } } -func TestGenerateScopedToken_ValidHex(t *testing.T) { - token := GenerateScopedToken("key", "org", "repo", "wf", "job") - if len(token) != 64 { - t.Errorf("token length = %d, want 64", len(token)) - } - if _, err := hex.DecodeString(token); err != nil { - t.Errorf("token is not valid hex: %v", err) +func TestGenerateTokenAt_DifferentTimestamps(t *testing.T) { + ts1 := time.Unix(1700000000, 0) + ts2 := time.Unix(1700000001, 0) + token1 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts1) + token2 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts2) + if token1 == token2 { + t.Error("different timestamps should produce different tokens") } } -func TestValidateScopedToken_Correct(t *testing.T) { - token := GenerateScopedToken("key", "org", "repo", "wf", "job") - if !ValidateScopedToken("key", token, "org", "repo", "wf", "job") { - t.Error("ValidateScopedToken should accept correct token") +func TestValidateToken_Correct(t *testing.T) { + ts := time.Now() + token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + if !ValidateToken("key", token, "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should accept correct token") } } -func TestValidateScopedToken_WrongToken(t *testing.T) { - if ValidateScopedToken("key", "deadbeef", "org", "repo", "wf", "job") { - t.Error("ValidateScopedToken should reject wrong token") +func TestValidateToken_WrongToken(t *testing.T) { + if ValidateToken("key", "12345:deadbeef", "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should reject wrong token") } } -func TestValidateScopedToken_WrongScope(t *testing.T) { - token := GenerateScopedToken("key", "org", "repo", "wf", "job") - if ValidateScopedToken("key", token, "org", "repo", "wf", "other-job") { - t.Error("ValidateScopedToken should reject token for different scope") +func TestValidateToken_WrongScope(t *testing.T) { + ts := time.Now() + token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + if ValidateToken("key", token, "org", "repo", "wf", "other-job", 5*time.Minute) { + t.Error("ValidateToken should reject token for different scope") + } +} + +func TestValidateToken_Expired(t *testing.T) { + ts := time.Now().Add(-10 * time.Minute) + token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + if ValidateToken("key", token, "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should reject expired token") + } +} + +func TestValidateTokenAt_NotExpired(t *testing.T) { + tokenTime := time.Unix(1700000000, 0) + token := GenerateTokenAt("key", "org", "repo", "wf", "job", tokenTime) + + // Validate at 4 minutes later (within 5 minute TTL) + now := tokenTime.Add(4 * time.Minute) + if !ValidateTokenAt("key", token, "org", "repo", "wf", "job", 5*time.Minute, now) { + t.Error("ValidateTokenAt should accept token within TTL") + } +} + +func TestValidateTokenAt_JustExpired(t *testing.T) { + tokenTime := time.Unix(1700000000, 0) + token := GenerateTokenAt("key", "org", "repo", "wf", "job", tokenTime) + + // Validate at 6 minutes later (beyond 5 minute TTL) + now := tokenTime.Add(6 * time.Minute) + if ValidateTokenAt("key", token, "org", "repo", "wf", "job", 5*time.Minute, now) { + t.Error("ValidateTokenAt should reject token beyond TTL") + } +} + +func TestValidateToken_InvalidFormat(t *testing.T) { + if ValidateToken("key", "no-colon-here", "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should reject token without colon") + } + if ValidateToken("key", "not-a-number:abc123", "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should reject token with invalid timestamp") + } +} + +func TestParseTokenTimestamp(t *testing.T) { + ts := time.Unix(1700000000, 0) + token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + + parsed, err := ParseTokenTimestamp(token) + if err != nil { + t.Fatalf("ParseTokenTimestamp failed: %v", err) + } + if !parsed.Equal(ts) { + t.Errorf("parsed timestamp = %v, want %v", parsed, ts) + } +} + +func TestParseTokenTimestamp_Invalid(t *testing.T) { + _, err := ParseTokenTimestamp("no-colon") + if err == nil { + t.Error("ParseTokenTimestamp should fail on missing colon") + } + + _, err = ParseTokenTimestamp("not-a-number:abc123") + if err == nil { + t.Error("ParseTokenTimestamp should fail on invalid timestamp") + } +} + +func TestValidateToken_TamperedTimestamp(t *testing.T) { + // Generate a valid token + ts := time.Now() + token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts) + + parts := strings.SplitN(token, ":", 2) + if len(parts) != 2 { + t.Fatalf("unexpected token format: %q", token) + } + hmacPart := parts[1] + + // Tamper with timestamp (e.g., attacker tries to extend token lifetime) + tamperedTimestamp := strconv.FormatInt(time.Now().Add(1*time.Hour).Unix(), 10) + tamperedToken := tamperedTimestamp + ":" + hmacPart + + if ValidateToken("key", tamperedToken, "org", "repo", "wf", "job", 5*time.Minute) { + t.Error("ValidateToken should reject token with tampered timestamp") } } From 862fc073284cd3febec3877b28c102cffdf1df54 Mon Sep 17 00:00:00 2001 From: Martin McCaffery Date: Thu, 12 Feb 2026 11:47:51 +0100 Subject: [PATCH 4/9] ci: generate two separate binaries --- .goreleaser.yaml | 38 ++++++++++++++++++++++++++---- Dockerfile | 13 ++++------ Dockerfile.goreleaser | 5 ++-- Makefile | 4 ++-- go.mod | 13 ++++++++-- go.sum | 30 +++++++++++++++++++---- internal/receiver/store.go | 2 +- test/k8s/test-cgroup-grouping.yaml | 4 ++-- 8 files changed, 83 insertions(+), 26 deletions(-) diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 7e27b2c..3f5f26e 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,6 @@ version: 2 -project_name: resource-collector +project_name: optimiser gitea_urls: api: "{{ .Env.GITHUB_SERVER_URL }}/api/v1" @@ -11,9 +11,21 @@ before: - go mod tidy builds: - - id: resource-collector + - id: collector main: ./cmd/collector - binary: resource-collector + binary: collector + env: + - CGO_ENABLED=0 + goos: + - linux + goarch: + - amd64 + - arm64 + ldflags: + - -s -w + - id: receiver + main: ./cmd/receiver + binary: receiver env: - CGO_ENABLED=0 goos: @@ -37,12 +49,28 @@ snapshot: version_template: "{{ incpatch .Version }}-next" dockers_v2: - - images: - - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/resource-collector" + - id: collector + ids: + - collector + images: + - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-optimiser-collector" tags: - "{{ .Version }}" - latest dockerfile: Dockerfile.goreleaser + build_args: + BINARY: collector + - id: receiver + ids: + - receiver + images: + - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-optimiser-receiver" + tags: + - "{{ .Version }}" + - latest + dockerfile: Dockerfile.goreleaser + build_args: + BINARY: receiver changelog: sort: asc diff --git a/Dockerfile b/Dockerfile index 75f7b7f..61ae4e0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,26 +10,23 @@ COPY . . # Collector build (no CGO needed) FROM builder-base AS builder-collector -RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /resource-collector ./cmd/collector +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /optimiser ./cmd/collector -# Receiver build (CGO needed for SQLite) +# Receiver build FROM builder-base AS builder-receiver -RUN apk add --no-cache gcc musl-dev -RUN CGO_ENABLED=1 GOOS=linux go build -ldflags="-s -w" -o /metrics-receiver ./cmd/receiver +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /metrics-receiver ./cmd/receiver # Collector image FROM alpine:3.19 AS collector -COPY --from=builder-collector /resource-collector /usr/local/bin/resource-collector +COPY --from=builder-collector /optimiser /usr/local/bin/optimiser -ENTRYPOINT ["/usr/local/bin/resource-collector"] +ENTRYPOINT ["/usr/local/bin/optimiser"] # Receiver image FROM alpine:3.19 AS receiver -RUN apk add --no-cache sqlite-libs - COPY --from=builder-receiver /metrics-receiver /usr/local/bin/metrics-receiver EXPOSE 8080 diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser index 69c2616..dc792e1 100644 --- a/Dockerfile.goreleaser +++ b/Dockerfile.goreleaser @@ -1,4 +1,5 @@ FROM gcr.io/distroless/static:nonroot ARG TARGETPLATFORM -COPY ${TARGETPLATFORM}/resource-collector /resource-collector -ENTRYPOINT ["/resource-collector"] +ARG BINARY +COPY ${TARGETPLATFORM}/${BINARY} /app +ENTRYPOINT ["/app"] diff --git a/Makefile b/Makefile index cb32d30..8bb918a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ -# ABOUTME: Makefile for forgejo-runner-resource-collector project. +# ABOUTME: Makefile for forgejo-runner-optimiser project. # ABOUTME: Provides targets for building, formatting, linting, and testing. -BINARY_NAME := resource-collector +BINARY_NAME := optimiser CMD_PATH := ./cmd/collector GO := go GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2 diff --git a/go.mod b/go.mod index 300d84c..898904b 100644 --- a/go.mod +++ b/go.mod @@ -3,13 +3,22 @@ module edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser go 1.25.6 require ( - gorm.io/driver/sqlite v1.6.0 + github.com/glebarez/sqlite v1.11.0 gorm.io/gorm v1.31.1 ) require ( + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/glebarez/go-sqlite v1.21.2 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect - github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + golang.org/x/sys v0.7.0 // indirect golang.org/x/text v0.20.0 // indirect + modernc.org/libc v1.22.5 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/sqlite v1.23.1 // indirect ) diff --git a/go.sum b/go.sum index 330dd09..95df11c 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,34 @@ +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= +github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= +github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= +github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= -gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ= -gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8= gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM= +modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= diff --git a/internal/receiver/store.go b/internal/receiver/store.go index 7d81959..48f853f 100644 --- a/internal/receiver/store.go +++ b/internal/receiver/store.go @@ -7,7 +7,7 @@ import ( "fmt" "time" - "gorm.io/driver/sqlite" + "github.com/glebarez/sqlite" "gorm.io/gorm" "gorm.io/gorm/logger" ) diff --git a/test/k8s/test-cgroup-grouping.yaml b/test/k8s/test-cgroup-grouping.yaml index e46545b..4b2b1c1 100644 --- a/test/k8s/test-cgroup-grouping.yaml +++ b/test/k8s/test-cgroup-grouping.yaml @@ -55,7 +55,7 @@ spec: # Resource collector sidecar - name: collector - image: ghcr.io/your-org/forgejo-runner-resource-collector:latest # Replace with your image + image: ghcr.io/your-org/forgejo-runner-optimiser:latest # Replace with your image args: - --interval=5s - --top=3 @@ -121,7 +121,7 @@ spec: # Collector - name: collector - image: ghcr.io/your-org/forgejo-runner-resource-collector:latest # Replace with your image + image: ghcr.io/your-org/forgejo-runner-optimiser:latest # Replace with your image args: - --interval=2s - --top=5 From d0aea88a5b4345a72b893e50c7673d4f136ecec0 Mon Sep 17 00:00:00 2001 From: Martin McCaffery Date: Fri, 13 Feb 2026 16:42:37 +0100 Subject: [PATCH 5/9] refactor: Rename recommender to sizer --- CLAUDE.md | 7 ++++--- README.md | 17 ++++++++++------- internal/receiver/sizing.go | 2 +- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index f9d6972..6b6d7ef 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -28,7 +28,7 @@ make install-hooks # Install pre-commit and commit-msg hooks ## Architecture Overview -This is a Go metrics collector designed for CI/CD environments with shared PID namespaces. It consists of two binaries: +A resource optimiser for CI/CD environments with shared PID namespaces. It consists of two binaries — a **collector** and a **receiver** (which includes the **sizer**): ### Collector (`cmd/collector`) Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a summary to the receiver on shutdown (SIGINT/SIGTERM). @@ -40,11 +40,12 @@ Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a 4. On shutdown, `summary.PushClient` sends the summary to the receiver HTTP endpoint ### Receiver (`cmd/receiver`) -HTTP service that stores metric summaries in SQLite (via GORM) and provides a query API. +HTTP service that stores metric summaries in SQLite (via GORM), provides a query API, and includes the **sizer** — which computes right-sized Kubernetes resource requests and limits from historical data. **Key Endpoints:** - `POST /api/v1/metrics` - Receive metrics from collectors - `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` - Query stored metrics +- `GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}` - Compute container sizes from historical data ### Internal Packages @@ -55,7 +56,7 @@ HTTP service that stores metric summaries in SQLite (via GORM) and provides a qu | `internal/proc` | Low-level /proc parsing (stat, status, cgroup) | | `internal/cgroup` | Parses CGROUP_LIMITS and CGROUP_PROCESS_MAP env vars | | `internal/summary` | Accumulates samples, computes stats, pushes to receiver | -| `internal/receiver` | HTTP handlers and SQLite store | +| `internal/receiver` | HTTP handlers, SQLite store, and sizer logic | | `internal/output` | Metrics output formatting (JSON/text) | ### Container Metrics diff --git a/README.md b/README.md index abd58e9..fc7fb7b 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# Forgejo Runner Resource Collector +# Forgejo Runner Optimiser -A lightweight metrics collector for CI/CD workloads in shared PID namespace environments. Reads `/proc` to collect CPU and memory metrics, groups them by container/cgroup, and pushes run summaries to a receiver service for storage and querying. +A resource optimiser for CI/CD workloads in shared PID namespace environments. The **collector** reads `/proc` to gather CPU and memory metrics grouped by container/cgroup, and pushes run summaries to the **receiver**. The receiver stores metrics and exposes a **sizer** API that computes right-sized Kubernetes resource requests and limits from historical data. ## Architecture -The system has two independent binaries: +The system has two binaries — a **collector** and a **receiver** (which includes the sizer): ``` ┌─────────────────────────────────────────────┐ ┌──────────────────────────┐ @@ -19,7 +19,9 @@ The system has two independent binaries: │ └───────────┘ └────────┘ └───────────┘ │ │ │ │ │ │ │ ▼ │ └─────────────────────────────────────────────┘ │ GET /api/v1/metrics/... │ - └──────────────────────────┘ +│ GET /api/v1/sizing/... │ +│ (sizer) │ +└──────────────────────────┘ ``` ### Collector @@ -56,9 +58,9 @@ Runs as a sidecar alongside CI workloads. On a configurable interval, it reads ` CPU supports Kubernetes notation (`"2"` = 2 cores, `"500m"` = 0.5 cores). Memory supports `Ki`, `Mi`, `Gi`, `Ti` (binary) or `K`, `M`, `G`, `T` (decimal). -### Receiver +### Receiver (with sizer) -HTTP service that stores metric summaries in SQLite (via GORM) and exposes a query API. +HTTP service that stores metric summaries in SQLite (via GORM), exposes a query API, and provides a **sizer** endpoint that computes right-sized Kubernetes resource requests and limits from historical run data. ```bash ./receiver --addr=:8080 --db=metrics.db --read-token=my-secret-token --hmac-key=my-hmac-key @@ -78,6 +80,7 @@ HTTP service that stores metric summaries in SQLite (via GORM) and exposes a que - `POST /api/v1/metrics` — receive and store a metric summary (requires scoped push token) - `POST /api/v1/token` — generate a scoped push token (requires read token auth) - `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` — query stored metrics (requires read token auth) +- `GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}` — compute container sizes from historical data (requires read token auth) **Authentication:** @@ -232,7 +235,7 @@ PUSH_TOKEN=$(curl -s -X POST http://localhost:8080/api/v1/token \ | `internal/cgroup` | Parses `CGROUP_PROCESS_MAP` and `CGROUP_LIMITS` env vars | | `internal/collector` | Orchestrates the collection loop and shutdown | | `internal/summary` | Accumulates samples, computes stats, pushes to receiver | -| `internal/receiver` | HTTP handlers and SQLite store | +| `internal/receiver` | HTTP handlers, SQLite store, and sizer logic | | `internal/output` | Metrics output formatting (JSON/text) | ## Background diff --git a/internal/receiver/sizing.go b/internal/receiver/sizing.go index 928a2f5..32951ab 100644 --- a/internal/receiver/sizing.go +++ b/internal/receiver/sizing.go @@ -1,5 +1,5 @@ // ABOUTME: Computes ideal container sizes from historical run data. -// ABOUTME: Provides Kubernetes-style resource recommendations. +// ABOUTME: Provides Kubernetes-style resource sizes. package receiver import ( From 937e5b814b58af2cfa1dc0989c4d4cdc01d22adc Mon Sep 17 00:00:00 2001 From: Patrick Sy Date: Tue, 17 Feb 2026 15:53:14 +0100 Subject: [PATCH 6/9] chore: Updated actions --- .github/workflows/ci.yaml | 8 ++++---- .github/workflows/release.yaml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8647689..1af11d6 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -7,16 +7,16 @@ on: pull_request: jobs: - build: + ci: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version-file: go.mod @@ -27,7 +27,7 @@ jobs: run: make test - name: Install GoReleaser - uses: https://github.com/goreleaser/goreleaser-action@v5 + uses: https://github.com/goreleaser/goreleaser-action@v6 with: install-only: true diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 9436061..369a4d5 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,11 +10,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: fetch-depth: 0 - name: Set up Go - uses: actions/setup-go@v5 + uses: actions/setup-go@v6 with: go-version-file: go.mod - name: Test code From e38c99acd64c1df6ab5ba7ab637e45cfaeffdc25 Mon Sep 17 00:00:00 2001 From: Patrick Sy Date: Tue, 17 Feb 2026 16:08:25 +0100 Subject: [PATCH 7/9] fix: Added missing docker coordinates --- .github/workflows/ci.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1af11d6..8650315 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -31,9 +31,19 @@ jobs: with: install-only: true + - name: Sanitize Docker credentials + run: | + REGISTRY="${{ forgejo.server_url }}" + echo "registry=${REGISTRY#https://}" >> "$GITHUB_OUTPUT" + ORG="${{ forgejo.repository_owner }}" + echo "org=$(echo "$ORG" | tr '[:upper:]' '[:lower:]')" >> "$GITHUB_OUTPUT" + id: sanitize_credentials + - name: GoReleaser Check run: | unset GITHUB_TOKEN goreleaser release --snapshot --skip=publish --clean env: GORELEASER_CURRENT_TAG: v0.0.0 + DOCKER_REGISTRY: ${{ steps.sanitize_credentials.outputs.registry }} + DOCKER_ORG: ${{ steps.sanitize_credentials.outputs.org }} From 479c13f596f466126790b4aab9c980dba74b3a00 Mon Sep 17 00:00:00 2001 From: Patrick Sy Date: Tue, 17 Feb 2026 17:25:08 +0100 Subject: [PATCH 8/9] refactor: Rename from optimiser to sizer --- .github/workflows/ci.yaml | 2 +- .goreleaser.yaml | 6 +++--- CLAUDE.md | 2 +- Dockerfile | 6 +++--- Makefile | 4 ++-- README.md | 4 ++-- cmd/collector/main.go | 6 +++--- cmd/receiver/main.go | 2 +- go.mod | 2 +- internal/collector/collector.go | 6 +++--- internal/collector/collector_test.go | 4 ++-- internal/integration/integration_test.go | 4 ++-- internal/metrics/aggregator.go | 4 ++-- internal/output/logger.go | 2 +- internal/output/types.go | 2 +- internal/receiver/handler_test.go | 2 +- internal/receiver/sizing.go | 2 +- internal/receiver/sizing_test.go | 2 +- internal/receiver/store_test.go | 2 +- internal/receiver/types.go | 2 +- internal/summary/accumulator.go | 2 +- internal/summary/accumulator_test.go | 2 +- test/docker/docker-compose-stress.yaml | 2 +- test/docker/docker-compose.yaml | 2 +- test/k8s/test-cgroup-grouping.yaml | 4 ++-- 25 files changed, 39 insertions(+), 39 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 8650315..e1c629b 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -35,7 +35,7 @@ jobs: run: | REGISTRY="${{ forgejo.server_url }}" echo "registry=${REGISTRY#https://}" >> "$GITHUB_OUTPUT" - ORG="${{ forgejo.repository_owner }}" + ORG="${{ github.repository_owner }}" echo "org=$(echo "$ORG" | tr '[:upper:]' '[:lower:]')" >> "$GITHUB_OUTPUT" id: sanitize_credentials diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 3f5f26e..dd707f5 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,6 @@ version: 2 -project_name: optimiser +project_name: sizer gitea_urls: api: "{{ .Env.GITHUB_SERVER_URL }}/api/v1" @@ -53,7 +53,7 @@ dockers_v2: ids: - collector images: - - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-optimiser-collector" + - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-sizer-collector" tags: - "{{ .Version }}" - latest @@ -64,7 +64,7 @@ dockers_v2: ids: - receiver images: - - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-optimiser-receiver" + - "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-sizer-receiver" tags: - "{{ .Version }}" - latest diff --git a/CLAUDE.md b/CLAUDE.md index 6b6d7ef..819e2c1 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -28,7 +28,7 @@ make install-hooks # Install pre-commit and commit-msg hooks ## Architecture Overview -A resource optimiser for CI/CD environments with shared PID namespaces. It consists of two binaries — a **collector** and a **receiver** (which includes the **sizer**): +A resource sizer for CI/CD environments with shared PID namespaces. It consists of two binaries — a **collector** and a **receiver** (which includes the **sizer**): ### Collector (`cmd/collector`) Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a summary to the receiver on shutdown (SIGINT/SIGTERM). diff --git a/Dockerfile b/Dockerfile index 61ae4e0..070beff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,7 +10,7 @@ COPY . . # Collector build (no CGO needed) FROM builder-base AS builder-collector -RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /optimiser ./cmd/collector +RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /sizer ./cmd/collector # Receiver build FROM builder-base AS builder-receiver @@ -20,9 +20,9 @@ RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /metrics-receiver ./cm # Collector image FROM alpine:3.19 AS collector -COPY --from=builder-collector /optimiser /usr/local/bin/optimiser +COPY --from=builder-collector /sizer /usr/local/bin/sizer -ENTRYPOINT ["/usr/local/bin/optimiser"] +ENTRYPOINT ["/usr/local/bin/sizer"] # Receiver image FROM alpine:3.19 AS receiver diff --git a/Makefile b/Makefile index 8bb918a..d1e1543 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ -# ABOUTME: Makefile for forgejo-runner-optimiser project. +# ABOUTME: Makefile for forgejo-runner-sizer project. # ABOUTME: Provides targets for building, formatting, linting, and testing. -BINARY_NAME := optimiser +BINARY_NAME := sizer CMD_PATH := ./cmd/collector GO := go GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2 diff --git a/README.md b/README.md index fc7fb7b..f8a9cf4 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# Forgejo Runner Optimiser +# Forgejo Runner Sizer -A resource optimiser for CI/CD workloads in shared PID namespace environments. The **collector** reads `/proc` to gather CPU and memory metrics grouped by container/cgroup, and pushes run summaries to the **receiver**. The receiver stores metrics and exposes a **sizer** API that computes right-sized Kubernetes resource requests and limits from historical data. +A resource sizer for CI/CD workloads in shared PID namespace environments. The **collector** reads `/proc` to gather CPU and memory metrics grouped by container/cgroup, and pushes run summaries to the **receiver**. The receiver stores metrics and exposes a **sizer** API that computes right-sized Kubernetes resource requests and limits from historical data. ## Architecture diff --git a/cmd/collector/main.go b/cmd/collector/main.go index 65b7ce4..4e7ce27 100644 --- a/cmd/collector/main.go +++ b/cmd/collector/main.go @@ -10,9 +10,9 @@ import ( "syscall" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/collector" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/collector" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) const ( diff --git a/cmd/receiver/main.go b/cmd/receiver/main.go index c540736..a7863c7 100644 --- a/cmd/receiver/main.go +++ b/cmd/receiver/main.go @@ -11,7 +11,7 @@ import ( "syscall" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/receiver" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver" ) const ( diff --git a/go.mod b/go.mod index 898904b..a51ecd1 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser +module edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer go 1.25.6 diff --git a/internal/collector/collector.go b/internal/collector/collector.go index 8ed587a..4c800c7 100644 --- a/internal/collector/collector.go +++ b/internal/collector/collector.go @@ -6,9 +6,9 @@ import ( "log/slog" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) // Config holds the collector configuration diff --git a/internal/collector/collector_test.go b/internal/collector/collector_test.go index a9791ca..b0a9a38 100644 --- a/internal/collector/collector_test.go +++ b/internal/collector/collector_test.go @@ -10,8 +10,8 @@ import ( "testing" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) func TestCollector_EmitsSummaryOnShutdown(t *testing.T) { diff --git a/internal/integration/integration_test.go b/internal/integration/integration_test.go index f21fa6f..91f532c 100644 --- a/internal/integration/integration_test.go +++ b/internal/integration/integration_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/receiver" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) const ( diff --git a/internal/metrics/aggregator.go b/internal/metrics/aggregator.go index a4997d2..bd5bb8c 100644 --- a/internal/metrics/aggregator.go +++ b/internal/metrics/aggregator.go @@ -4,8 +4,8 @@ import ( "sort" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/cgroup" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/proc" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/cgroup" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/proc" ) // Aggregator collects and aggregates metrics from processes diff --git a/internal/output/logger.go b/internal/output/logger.go index d8541bb..c557f89 100644 --- a/internal/output/logger.go +++ b/internal/output/logger.go @@ -6,7 +6,7 @@ import ( "log/slog" "os" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics" ) // LogFormat specifies the log output format diff --git a/internal/output/types.go b/internal/output/types.go index 28576c8..ffd3754 100644 --- a/internal/output/types.go +++ b/internal/output/types.go @@ -1,6 +1,6 @@ package output -import "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics" +import "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics" // Writer defines the interface for outputting metrics // This allows for different implementations (logging, HTTP push, etc.) diff --git a/internal/receiver/handler_test.go b/internal/receiver/handler_test.go index 12b327e..dcf1791 100644 --- a/internal/receiver/handler_test.go +++ b/internal/receiver/handler_test.go @@ -11,7 +11,7 @@ import ( "strings" "testing" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) func TestHandler_ReceiveMetrics(t *testing.T) { diff --git a/internal/receiver/sizing.go b/internal/receiver/sizing.go index 32951ab..256a48d 100644 --- a/internal/receiver/sizing.go +++ b/internal/receiver/sizing.go @@ -8,7 +8,7 @@ import ( "math" "sort" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) // ResourceSize holds Kubernetes-formatted resource values diff --git a/internal/receiver/sizing_test.go b/internal/receiver/sizing_test.go index a1ac3c5..ce57210 100644 --- a/internal/receiver/sizing_test.go +++ b/internal/receiver/sizing_test.go @@ -6,7 +6,7 @@ import ( "net/http/httptest" "testing" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) func TestFormatMemoryK8s(t *testing.T) { diff --git a/internal/receiver/store_test.go b/internal/receiver/store_test.go index d63169f..eebd06d 100644 --- a/internal/receiver/store_test.go +++ b/internal/receiver/store_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) func TestNewStore(t *testing.T) { diff --git a/internal/receiver/types.go b/internal/receiver/types.go index 58f6a50..1afee44 100644 --- a/internal/receiver/types.go +++ b/internal/receiver/types.go @@ -2,7 +2,7 @@ // ABOUTME: Defines MetricsPayload combining execution metadata with run summary. package receiver -import "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary" +import "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" // ExecutionContext holds GitHub Actions style identifiers for a workflow run type ExecutionContext struct { diff --git a/internal/summary/accumulator.go b/internal/summary/accumulator.go index 972ff28..65530ca 100644 --- a/internal/summary/accumulator.go +++ b/internal/summary/accumulator.go @@ -7,7 +7,7 @@ import ( "sort" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics" ) // containerAccumulator tracks metrics for a single container diff --git a/internal/summary/accumulator_test.go b/internal/summary/accumulator_test.go index 00f1b3c..1bbaab4 100644 --- a/internal/summary/accumulator_test.go +++ b/internal/summary/accumulator_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics" ) func TestAccumulator_NoSamples(t *testing.T) { diff --git a/test/docker/docker-compose-stress.yaml b/test/docker/docker-compose-stress.yaml index d4a0be0..7c8578f 100644 --- a/test/docker/docker-compose-stress.yaml +++ b/test/docker/docker-compose-stress.yaml @@ -113,7 +113,7 @@ services: # Cgroup configuration # stress-ng-cpu is the worker process name for CPU stress # stress-ng-vm is the worker process name for memory stress - CGROUP_PROCESS_MAP: '{"stress-ng-cpu":"cpu-stress","stress-ng-vm":"mem-stress","dd":"io-stress","resource-collec":"collector"}' + CGROUP_PROCESS_MAP: '{"stress-ng-cpu":"cpu-stress","stress-ng-vm":"mem-stress","dd":"io-stress","sizer":"collector"}' CGROUP_LIMITS: '{"cpu-stress":{"cpu":"1","memory":"128Mi"},"mem-stress":{"cpu":"500m","memory":"256Mi"},"io-stress":{"cpu":"500m","memory":"128Mi"},"collector":{"cpu":"200m","memory":"64Mi"}}' deploy: resources: diff --git a/test/docker/docker-compose.yaml b/test/docker/docker-compose.yaml index 87a9a9a..af438af 100644 --- a/test/docker/docker-compose.yaml +++ b/test/docker/docker-compose.yaml @@ -68,7 +68,7 @@ services: environment: # Map unique process names to container names # 'cat' runs only in runner, 'sleep' runs only in sidecar - CGROUP_PROCESS_MAP: '{"cat":"runner","sleep":"sidecar","resource-collec":"collector"}' + CGROUP_PROCESS_MAP: '{"cat":"runner","sleep":"sidecar","sizer":"collector"}' CGROUP_LIMITS: '{"runner":{"cpu":"500m","memory":"256Mi"},"sidecar":{"cpu":"100m","memory":"128Mi"},"collector":{"cpu":"100m","memory":"64Mi"}}' deploy: resources: diff --git a/test/k8s/test-cgroup-grouping.yaml b/test/k8s/test-cgroup-grouping.yaml index 4b2b1c1..fc27d81 100644 --- a/test/k8s/test-cgroup-grouping.yaml +++ b/test/k8s/test-cgroup-grouping.yaml @@ -55,7 +55,7 @@ spec: # Resource collector sidecar - name: collector - image: ghcr.io/your-org/forgejo-runner-optimiser:latest # Replace with your image + image: ghcr.io/your-org/forgejo-runner-sizer:latest args: - --interval=5s - --top=3 @@ -121,7 +121,7 @@ spec: # Collector - name: collector - image: ghcr.io/your-org/forgejo-runner-optimiser:latest # Replace with your image + image: ghcr.io/your-org/forgejo-runner-sizer:latest args: - --interval=2s - --top=5 From bc9d0dd8ea071833e2fed802378813ff206d32c7 Mon Sep 17 00:00:00 2001 From: Manuel Ganter Date: Wed, 18 Feb 2026 11:12:14 +0100 Subject: [PATCH 9/9] feat: migrate receiver to Fuego framework with OpenAPI generation Replace net/http handlers with Fuego framework for automatic OpenAPI 3.0 spec generation. Add generated Go client package, OpenAPI extraction script, and update Makefile with separate build/run targets for both binaries. Co-Authored-By: Claude Opus 4.6 --- Makefile | 34 +- cmd/receiver/main.go | 65 +- docs/openapi.json | 665 +++++++++++++ go.mod | 33 +- go.sum | 100 +- internal/integration/integration_test.go | 28 +- internal/receiver/handler.go | 297 +++--- internal/receiver/handler_test.go | 90 +- internal/receiver/sizing_test.go | 25 +- pkg/client/client.gen.go | 1096 ++++++++++++++++++++++ scripts/extract-openapi/main.go | 64 ++ 11 files changed, 2245 insertions(+), 252 deletions(-) create mode 100644 docs/openapi.json create mode 100644 pkg/client/client.gen.go create mode 100644 scripts/extract-openapi/main.go diff --git a/Makefile b/Makefile index d1e1543..5adc181 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,10 @@ # ABOUTME: Makefile for forgejo-runner-sizer project. # ABOUTME: Provides targets for building, formatting, linting, and testing. -BINARY_NAME := sizer -CMD_PATH := ./cmd/collector GO := go GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2 GITLEAKS := $(GO) run github.com/zricethezav/gitleaks/v8@v8.30.0 +OAPI_CODEGEN := $(GO) run github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest # Build flags LDFLAGS := -s -w @@ -13,18 +12,23 @@ BUILD_FLAGS := -ldflags "$(LDFLAGS)" default: run -.PHONY: all build clean fmt format lint gitleaks test run help vet tidy install-hooks +.PHONY: all build build-collector build-receiver clean fmt format lint gitleaks test run-collector run-receiver help vet tidy install-hooks openapi generate-client # Default target all: fmt vet lint build ## Build targets -build: ## Build the binary - $(GO) build $(BUILD_FLAGS) -o $(BINARY_NAME) $(CMD_PATH) +build: build-collector build-receiver ## Build both binaries + +build-collector: ## Build the collector binary + $(GO) build $(BUILD_FLAGS) -o collector ./cmd/collector + +build-receiver: ## Build the receiver binary + $(GO) build $(BUILD_FLAGS) -o receiver ./cmd/receiver clean: ## Remove build artifacts - rm -f $(BINARY_NAME) coverage.out coverage.html + rm -f collector receiver coverage.out coverage.html $(GO) clean ## Code quality targets @@ -46,6 +50,16 @@ gitleaks: ## Check for secrets in git history gitleaks-all: ## Check for secrets in git history $(GITLEAKS) git . +## OpenAPI / Client Generation + +openapi: ## Generate OpenAPI spec from Fuego routes + $(GO) run scripts/extract-openapi/main.go + +generate-client: openapi ## Generate Go client from OpenAPI spec + rm -rf pkg/client + mkdir -p pkg/client + $(OAPI_CODEGEN) -generate types,client -package client docs/openapi.json > pkg/client/client.gen.go + ## Dependency management tidy: ## Tidy go modules @@ -62,11 +76,11 @@ test-coverage: ## Run tests with coverage ## Run targets -run: build ## Build and run with default settings - ./$(BINARY_NAME) +run-collector: build-collector ## Build and run the collector + ./collector -run-text: build ## Build and run with text output format - ./$(BINARY_NAME) --log-format text --interval 2s +run-receiver: build-receiver ## Build and run the receiver + ./receiver --read-token=secure-read-token --hmac-key=secure-hmac-key ## Git hooks diff --git a/cmd/receiver/main.go b/cmd/receiver/main.go index a7863c7..9067eee 100644 --- a/cmd/receiver/main.go +++ b/cmd/receiver/main.go @@ -1,16 +1,17 @@ +// ABOUTME: Entry point for the metrics receiver service. +// ABOUTME: HTTP service using Fuego framework with automatic OpenAPI 3.0 generation. package main import ( - "context" "flag" "fmt" "log/slog" - "net/http" "os" - "os/signal" - "syscall" "time" + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-fuego/fuego" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver" ) @@ -39,42 +40,44 @@ func main() { defer func() { _ = store.Close() }() handler := receiver.NewHandler(store, logger, *readToken, *hmacKey, *tokenTTL) - mux := http.NewServeMux() - handler.RegisterRoutes(mux) - server := &http.Server{ - Addr: *addr, - Handler: mux, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - } + // Create Fuego server with OpenAPI configuration + s := fuego.NewServer( + fuego.WithAddr(*addr), + fuego.WithEngineOptions( + fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{ + PrettyFormatJSON: true, + JSONFilePath: "docs/openapi.json", + SwaggerURL: "/swagger", + Info: &openapi3.Info{ + Title: "Forgejo Runner Resource Collector API", + Version: "1.0.0", + Description: "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.", + Contact: &openapi3.Contact{ + Name: "API Support", + URL: "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer", + }, + License: &openapi3.License{ + Name: "Apache 2.0", + URL: "http://www.apache.org/licenses/LICENSE-2.0.html", + }, + }, + }), + ), + ) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - sigChan := make(chan os.Signal, 1) - signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - - go func() { - sig := <-sigChan - logger.Info("received signal, shutting down", slog.String("signal", sig.String())) - cancel() - - shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second) - defer shutdownCancel() - _ = server.Shutdown(shutdownCtx) - }() + // Register routes + handler.RegisterRoutes(s) logger.Info("starting metrics receiver", slog.String("addr", *addr), slog.String("db", *dbPath), + slog.String("swagger", fmt.Sprintf("http://localhost%s/swagger", *addr)), ) - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + // Run server (handles graceful shutdown) + if err := s.Run(); err != nil { fmt.Fprintf(os.Stderr, "error: %v\n", err) os.Exit(1) } - - <-ctx.Done() - logger.Info("receiver stopped gracefully") } diff --git a/docs/openapi.json b/docs/openapi.json new file mode 100644 index 0000000..35c1b9e --- /dev/null +++ b/docs/openapi.json @@ -0,0 +1,665 @@ +{ + "components": { + "schemas": { + "HTTPError": { + "description": "HTTPError schema", + "properties": { + "detail": { + "description": "Human readable error message", + "nullable": true, + "type": "string" + }, + "errors": { + "items": { + "nullable": true, + "properties": { + "more": { + "additionalProperties": { + "description": "Additional information about the error", + "nullable": true + }, + "description": "Additional information about the error", + "nullable": true, + "type": "object" + }, + "name": { + "description": "For example, name of the parameter that caused the error", + "type": "string" + }, + "reason": { + "description": "Human readable error message", + "type": "string" + } + }, + "type": "object" + }, + "nullable": true, + "type": "array" + }, + "instance": { + "nullable": true, + "type": "string" + }, + "status": { + "description": "HTTP status code", + "example": 403, + "nullable": true, + "type": "integer" + }, + "title": { + "description": "Short title of the error", + "nullable": true, + "type": "string" + }, + "type": { + "description": "URL of the error type. Can be used to lookup the error in a documentation", + "nullable": true, + "type": "string" + } + }, + "type": "object" + }, + "HealthResponse": { + "description": "HealthResponse schema", + "properties": { + "status": { + "type": "string" + } + }, + "type": "object" + }, + "MetricCreatedResponse": { + "description": "MetricCreatedResponse schema", + "properties": { + "id": { + "minimum": 0, + "type": "integer" + }, + "status": { + "type": "string" + } + }, + "type": "object" + }, + "MetricResponse": { + "description": "MetricResponse schema", + "properties": { + "id": { + "minimum": 0, + "type": "integer" + }, + "job": { + "type": "string" + }, + "organization": { + "type": "string" + }, + "payload": {}, + "received_at": { + "format": "date-time", + "type": "string" + }, + "repository": { + "type": "string" + }, + "run_id": { + "type": "string" + }, + "workflow": { + "type": "string" + } + }, + "type": "object" + }, + "SizingResponse": { + "description": "SizingResponse schema", + "properties": { + "containers": { + "items": { + "properties": { + "cpu": { + "properties": { + "limit": { + "type": "string" + }, + "request": { + "type": "string" + } + }, + "type": "object" + }, + "memory": { + "properties": { + "limit": { + "type": "string" + }, + "request": { + "type": "string" + } + }, + "type": "object" + }, + "name": { + "type": "string" + } + }, + "type": "object" + }, + "type": "array" + }, + "meta": { + "properties": { + "buffer_percent": { + "type": "integer" + }, + "cpu_percentile": { + "type": "string" + }, + "runs_analyzed": { + "type": "integer" + } + }, + "type": "object" + }, + "total": { + "properties": { + "cpu": { + "properties": { + "limit": { + "type": "string" + }, + "request": { + "type": "string" + } + }, + "type": "object" + }, + "memory": { + "properties": { + "limit": { + "type": "string" + }, + "request": { + "type": "string" + } + }, + "type": "object" + } + }, + "type": "object" + } + }, + "type": "object" + }, + "TokenRequest": { + "description": "TokenRequest schema", + "properties": { + "job": { + "type": "string" + }, + "organization": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "workflow": { + "type": "string" + } + }, + "type": "object" + }, + "TokenResponse": { + "description": "TokenResponse schema", + "properties": { + "token": { + "type": "string" + } + }, + "type": "object" + }, + "unknown-interface": { + "description": "unknown-interface schema" + } + } + }, + "info": { + "contact": { + "name": "API Support", + "url": "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer" + }, + "description": "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.", + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "title": "Forgejo Runner Resource Collector API", + "version": "1.0.0" + }, + "openapi": "3.1.0", + "paths": { + "/api/v1/metrics": { + "post": { + "description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).ReceiveMetrics`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n\n---\n\n", + "operationId": "POST_/api/v1/metrics", + "parameters": [ + { + "in": "header", + "name": "Accept", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/MetricCreatedResponse" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/MetricCreatedResponse" + } + } + }, + "description": "OK" + }, + "400": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Bad Request _(validation or deserialization error)_" + }, + "500": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Internal Server Error _(panics)_" + }, + "default": { + "description": "" + } + }, + "summary": "receive metrics", + "tags": [ + "api/v1" + ] + } + }, + "/api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}": { + "get": { + "description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GetMetricsByWorkflowJob`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n", + "operationId": "GET_/api/v1/metrics/repo/:org/:repo/:workflow/:job", + "parameters": [ + { + "in": "header", + "name": "Accept", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "org", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "repo", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "workflow", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "job", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "items": { + "$ref": "#/components/schemas/MetricResponse" + }, + "type": "array" + } + }, + "application/xml": { + "schema": { + "items": { + "$ref": "#/components/schemas/MetricResponse" + }, + "type": "array" + } + } + }, + "description": "OK" + }, + "400": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Bad Request _(validation or deserialization error)_" + }, + "500": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Internal Server Error _(panics)_" + }, + "default": { + "description": "" + } + }, + "summary": "get metrics by workflow job", + "tags": [ + "api/v1" + ] + } + }, + "/api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}": { + "get": { + "description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GetSizing`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n", + "operationId": "GET_/api/v1/sizing/repo/:org/:repo/:workflow/:job", + "parameters": [ + { + "in": "header", + "name": "Accept", + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "org", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "repo", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "workflow", + "required": true, + "schema": { + "type": "string" + } + }, + { + "in": "path", + "name": "job", + "required": true, + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/SizingResponse" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/SizingResponse" + } + } + }, + "description": "OK" + }, + "400": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Bad Request _(validation or deserialization error)_" + }, + "500": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Internal Server Error _(panics)_" + }, + "default": { + "description": "" + } + }, + "summary": "get sizing", + "tags": [ + "api/v1" + ] + } + }, + "/api/v1/token": { + "post": { + "description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GenerateToken`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n", + "operationId": "POST_/api/v1/token", + "parameters": [ + { + "in": "header", + "name": "Accept", + "schema": { + "type": "string" + } + } + ], + "requestBody": { + "content": { + "*/*": { + "schema": { + "$ref": "#/components/schemas/TokenRequest" + } + } + }, + "description": "Request body for receiver.TokenRequest", + "required": true + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/TokenResponse" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/TokenResponse" + } + } + }, + "description": "OK" + }, + "400": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Bad Request _(validation or deserialization error)_" + }, + "500": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Internal Server Error _(panics)_" + }, + "default": { + "description": "" + } + }, + "summary": "generate token", + "tags": [ + "api/v1" + ] + } + }, + "/health": { + "get": { + "description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).Health`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n\n---\n\n", + "operationId": "GET_/health", + "parameters": [ + { + "in": "header", + "name": "Accept", + "schema": { + "type": "string" + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HealthResponse" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HealthResponse" + } + } + }, + "description": "OK" + }, + "400": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Bad Request _(validation or deserialization error)_" + }, + "500": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + }, + "application/xml": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + }, + "description": "Internal Server Error _(panics)_" + }, + "default": { + "description": "" + } + }, + "summary": "health" + } + } + } +} \ No newline at end of file diff --git a/go.mod b/go.mod index a51ecd1..13f846b 100644 --- a/go.mod +++ b/go.mod @@ -3,20 +3,45 @@ module edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer go 1.25.6 require ( + github.com/getkin/kin-openapi v0.133.0 github.com/glebarez/sqlite v1.11.0 + github.com/go-fuego/fuego v0.19.0 + github.com/oapi-codegen/runtime v1.1.2 gorm.io/gorm v1.31.1 ) require ( + github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.11 // indirect github.com/glebarez/go-sqlite v1.21.2 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.22.3 // indirect + github.com/go-openapi/swag/jsonname v0.25.4 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.28.0 // indirect + github.com/golang-jwt/jwt/v5 v5.3.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/schema v1.4.1 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mailru/easyjson v0.9.1 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect + github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/text v0.20.0 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/woodsbury/decimal128 v1.4.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/text v0.31.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect modernc.org/libc v1.22.5 // indirect modernc.org/mathutil v1.5.0 // indirect modernc.org/memory v1.5.0 // indirect diff --git a/go.sum b/go.sum index 95df11c..a80e80c 100644 --- a/go.sum +++ b/go.sum @@ -1,27 +1,109 @@ +github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= +github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= +github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= +github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik= +github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ= +github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE= github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo= github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k= github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw= github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= +github.com/go-fuego/fuego v0.19.0 h1:kxkkBsrbGZP1YnPCAPIdUpMu53nreqN8N86lfi50CJw= +github.com/go-fuego/fuego v0.19.0/go.mod h1:O7CLZbvCCBA9ijhN/q8SnyFTzDdMsqYZjUbR82VDHhA= +github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8= +github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo= +github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI= +github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag= +github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls= +github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688= +github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= +github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E= +github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8= +github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI= +github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY= +github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c= +github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= -golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM= +github.com/thejerf/slogassert v0.3.4/go.mod h1:0zn9ISLVKo1aPMTqcGfG1o6dWwt+Rk574GlUxHD4rs8= +github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA= +github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4= +github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc= +github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg= gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs= modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= diff --git a/internal/integration/integration_test.go b/internal/integration/integration_test.go index 91f532c..915998c 100644 --- a/internal/integration/integration_test.go +++ b/internal/integration/integration_test.go @@ -14,6 +14,8 @@ import ( "testing" "time" + "github.com/go-fuego/fuego" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver" "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) @@ -33,10 +35,17 @@ func setupTestReceiver(t *testing.T) (*receiver.Store, *httptest.Server, func()) } handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), testReadToken, testHMACKey, 0) - mux := http.NewServeMux() - handler.RegisterRoutes(mux) + s := fuego.NewServer( + fuego.WithoutStartupMessages(), + fuego.WithEngineOptions( + fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{ + Disabled: true, + }), + ), + ) + handler.RegisterRoutes(s) - server := httptest.NewServer(mux) + server := httptest.NewServer(s.Mux) cleanup := func() { server.Close() @@ -372,10 +381,17 @@ func setupTestReceiverWithToken(t *testing.T, readToken, hmacKey string) (*recei } handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), readToken, hmacKey, 0) - mux := http.NewServeMux() - handler.RegisterRoutes(mux) + s := fuego.NewServer( + fuego.WithoutStartupMessages(), + fuego.WithEngineOptions( + fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{ + Disabled: true, + }), + ), + ) + handler.RegisterRoutes(s) - server := httptest.NewServer(mux) + server := httptest.NewServer(s.Mux) cleanup := func() { server.Close() diff --git a/internal/receiver/handler.go b/internal/receiver/handler.go index 57c09b5..e42365d 100644 --- a/internal/receiver/handler.go +++ b/internal/receiver/handler.go @@ -1,15 +1,18 @@ -// ABOUTME: HTTP handlers for the metrics receiver service. -// ABOUTME: Provides endpoints for receiving and querying metrics. +// ABOUTME: HTTP handlers for the metrics receiver service using Fuego framework. +// ABOUTME: Provides endpoints for receiving and querying metrics with automatic OpenAPI generation. package receiver import ( "crypto/subtle" "encoding/json" + "errors" "fmt" "log/slog" "net/http" "strings" "time" + + "github.com/go-fuego/fuego" ) // Handler handles HTTP requests for the metrics receiver @@ -32,128 +35,175 @@ func NewHandler(store *Store, logger *slog.Logger, readToken, hmacKey string, to return &Handler{store: store, logger: logger, readToken: readToken, hmacKey: hmacKey, tokenTTL: tokenTTL} } -// RegisterRoutes registers all HTTP routes on the given mux -func (h *Handler) RegisterRoutes(mux *http.ServeMux) { - mux.HandleFunc("POST /api/v1/metrics", h.handleReceiveMetrics) - mux.HandleFunc("POST /api/v1/token", h.handleGenerateToken) - mux.HandleFunc("GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}", h.handleGetByWorkflowJob) - mux.HandleFunc("GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}", h.handleGetSizing) - mux.HandleFunc("GET /health", h.handleHealth) +// Common errors +var ( + ErrUnauthorized = errors.New("authorization required") + ErrInvalidToken = errors.New("invalid token") + ErrInvalidFormat = errors.New("invalid authorization format") + ErrMissingHMACKey = errors.New("token generation requires a configured HMAC key") + ErrMissingFields = errors.New("organization, repository, workflow, and job are required") + ErrMissingRunID = errors.New("run_id is required") + ErrInvalidParams = errors.New("org, repo, workflow and job are required") + ErrNoMetrics = errors.New("no metrics found for this workflow/job") + ErrInvalidPercent = errors.New("invalid cpu_percentile: must be one of peak, p99, p95, p75, p50, avg") +) + +// HealthResponse is the response for the health endpoint +type HealthResponse struct { + Status string `json:"status"` } -// validateReadToken checks the Authorization header for a valid Bearer token. -func (h *Handler) validateReadToken(w http.ResponseWriter, r *http.Request) bool { - if h.readToken == "" { - h.logger.Warn("no read-token configured, rejecting request", slog.String("path", r.URL.Path)) - http.Error(w, "authorization required", http.StatusUnauthorized) - return false - } - - authHeader := r.Header.Get("Authorization") - if authHeader == "" { - h.logger.Warn("missing authorization header", slog.String("path", r.URL.Path)) - http.Error(w, "authorization required", http.StatusUnauthorized) - return false - } - - const bearerPrefix = "Bearer " - if !strings.HasPrefix(authHeader, bearerPrefix) { - h.logger.Warn("invalid authorization format", slog.String("path", r.URL.Path)) - http.Error(w, "invalid authorization format", http.StatusUnauthorized) - return false - } - - token := strings.TrimPrefix(authHeader, bearerPrefix) - if subtle.ConstantTimeCompare([]byte(token), []byte(h.readToken)) != 1 { - h.logger.Warn("invalid token", slog.String("path", r.URL.Path)) - http.Error(w, "invalid token", http.StatusUnauthorized) - return false - } - - return true +// MetricCreatedResponse is the response when a metric is successfully created +type MetricCreatedResponse struct { + ID uint `json:"id"` + Status string `json:"status"` } -func (h *Handler) handleGenerateToken(w http.ResponseWriter, r *http.Request) { - if h.hmacKey == "" { - http.Error(w, "token generation requires a configured HMAC key", http.StatusBadRequest) - return - } - - if !h.validateReadToken(w, r) { - return - } - - var req TokenRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "invalid JSON body", http.StatusBadRequest) - return - } - - if req.Organization == "" || req.Repository == "" || req.Workflow == "" || req.Job == "" { - http.Error(w, "organization, repository, workflow, and job are required", http.StatusBadRequest) - return - } - - token := GenerateToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job) - - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(TokenResponse{Token: token}) +// GetMetricsRequest contains path parameters for getting metrics +type GetMetricsRequest struct { + Org string `path:"org"` + Repo string `path:"repo"` + Workflow string `path:"workflow"` + Job string `path:"job"` } -// validatePushToken checks push authentication via scoped HMAC token. -func (h *Handler) validatePushToken(w http.ResponseWriter, r *http.Request, exec ExecutionContext) bool { +// GetSizingRequest contains path and query parameters for sizing endpoint +type GetSizingRequest struct { + Org string `path:"org"` + Repo string `path:"repo"` + Workflow string `path:"workflow"` + Job string `path:"job"` + Runs int `query:"runs" default:"5" validate:"min=1,max=100" description:"Number of recent runs to analyze"` + Buffer int `query:"buffer" default:"20" validate:"min=0,max=100" description:"Buffer percentage to add"` + CPUPercentile string `query:"cpu_percentile" default:"p95" description:"CPU percentile to use (peak, p99, p95, p75, p50, avg)"` +} + +// RegisterRoutes registers all HTTP routes on the Fuego server +func (h *Handler) RegisterRoutes(s *fuego.Server) { + // Health endpoint (no auth) + fuego.Get(s, "/health", h.Health) + + // API group with authentication + api := fuego.Group(s, "/api/v1") + + // Token generation (requires read token) + fuego.Post(api, "/token", h.GenerateToken, fuego.OptionMiddleware(h.requireReadToken)) + + // Metrics endpoints + fuego.Post(api, "/metrics", h.ReceiveMetrics) // Uses push token validated in handler + fuego.Get(api, "/metrics/repo/{org}/{repo}/{workflow}/{job}", h.GetMetricsByWorkflowJob, fuego.OptionMiddleware(h.requireReadToken)) + + // Sizing endpoint + fuego.Get(api, "/sizing/repo/{org}/{repo}/{workflow}/{job}", h.GetSizing, fuego.OptionMiddleware(h.requireReadToken)) +} + +// requireReadToken is middleware that validates the read token +func (h *Handler) requireReadToken(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if h.readToken == "" { + h.logger.Warn("no read-token configured, rejecting request", slog.String("path", r.URL.Path)) + http.Error(w, "authorization required", http.StatusUnauthorized) + return + } + + authHeader := r.Header.Get("Authorization") + if authHeader == "" { + h.logger.Warn("missing authorization header", slog.String("path", r.URL.Path)) + http.Error(w, "authorization required", http.StatusUnauthorized) + return + } + + const bearerPrefix = "Bearer " + if !strings.HasPrefix(authHeader, bearerPrefix) { + h.logger.Warn("invalid authorization format", slog.String("path", r.URL.Path)) + http.Error(w, "invalid authorization format", http.StatusUnauthorized) + return + } + + token := strings.TrimPrefix(authHeader, bearerPrefix) + if subtle.ConstantTimeCompare([]byte(token), []byte(h.readToken)) != 1 { + h.logger.Warn("invalid token", slog.String("path", r.URL.Path)) + http.Error(w, "invalid token", http.StatusUnauthorized) + return + } + + next.ServeHTTP(w, r) + }) +} + +// validatePushToken checks push authentication via scoped HMAC token +func (h *Handler) validatePushToken(r *http.Request, exec ExecutionContext) error { if h.hmacKey == "" { h.logger.Warn("no HMAC key configured, rejecting push", slog.String("path", r.URL.Path)) - http.Error(w, "authorization required", http.StatusUnauthorized) - return false + return ErrUnauthorized } authHeader := r.Header.Get("Authorization") if authHeader == "" { h.logger.Warn("missing push authorization", slog.String("path", r.URL.Path)) - http.Error(w, "authorization required", http.StatusUnauthorized) - return false + return ErrUnauthorized } const bearerPrefix = "Bearer " if !strings.HasPrefix(authHeader, bearerPrefix) { h.logger.Warn("invalid push authorization format", slog.String("path", r.URL.Path)) - http.Error(w, "invalid authorization format", http.StatusUnauthorized) - return false + return ErrInvalidFormat } token := strings.TrimPrefix(authHeader, bearerPrefix) if !ValidateToken(h.hmacKey, token, exec.Organization, exec.Repository, exec.Workflow, exec.Job, h.tokenTTL) { h.logger.Warn("invalid push token", slog.String("path", r.URL.Path)) - http.Error(w, "invalid token", http.StatusUnauthorized) - return false + return ErrInvalidToken } - return true + return nil } -func (h *Handler) handleReceiveMetrics(w http.ResponseWriter, r *http.Request) { +// Health returns the service health status +func (h *Handler) Health(c fuego.ContextNoBody) (HealthResponse, error) { + return HealthResponse{Status: "ok"}, nil +} + +// GenerateToken generates a scoped HMAC push token for a workflow/job +func (h *Handler) GenerateToken(c fuego.ContextWithBody[TokenRequest]) (TokenResponse, error) { + if h.hmacKey == "" { + return TokenResponse{}, fuego.BadRequestError{Detail: ErrMissingHMACKey.Error()} + } + + req, err := c.Body() + if err != nil { + return TokenResponse{}, fuego.BadRequestError{Detail: "invalid JSON body"} + } + + if req.Organization == "" || req.Repository == "" || req.Workflow == "" || req.Job == "" { + return TokenResponse{}, fuego.BadRequestError{Detail: ErrMissingFields.Error()} + } + + token := GenerateToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job) + return TokenResponse{Token: token}, nil +} + +// ReceiveMetrics receives and stores metrics from a collector +func (h *Handler) ReceiveMetrics(c fuego.ContextNoBody) (MetricCreatedResponse, error) { var payload MetricsPayload - if err := json.NewDecoder(r.Body).Decode(&payload); err != nil { + if err := json.NewDecoder(c.Request().Body).Decode(&payload); err != nil { h.logger.Error("failed to decode payload", slog.String("error", err.Error())) - http.Error(w, "invalid JSON payload", http.StatusBadRequest) - return + return MetricCreatedResponse{}, fuego.BadRequestError{Detail: "invalid JSON payload"} } if payload.Execution.RunID == "" { - http.Error(w, "run_id is required", http.StatusBadRequest) - return + return MetricCreatedResponse{}, fuego.BadRequestError{Detail: ErrMissingRunID.Error()} } - if !h.validatePushToken(w, r, payload.Execution) { - return + // Validate push token + if err := h.validatePushToken(c.Request(), payload.Execution); err != nil { + return MetricCreatedResponse{}, fuego.UnauthorizedError{Detail: err.Error()} } id, err := h.store.SaveMetric(&payload) if err != nil { h.logger.Error("failed to save metric", slog.String("error", err.Error())) - http.Error(w, "failed to save metric", http.StatusInternalServerError) - return + return MetricCreatedResponse{}, fuego.InternalServerError{Detail: "failed to save metric"} } h.logger.Info("metric saved", @@ -162,30 +212,25 @@ func (h *Handler) handleReceiveMetrics(w http.ResponseWriter, r *http.Request) { slog.String("repository", payload.Execution.Repository), ) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusCreated) - _ = json.NewEncoder(w).Encode(map[string]any{"id": id, "status": "created"}) + c.SetStatus(http.StatusCreated) + return MetricCreatedResponse{ID: id, Status: "created"}, nil } -func (h *Handler) handleGetByWorkflowJob(w http.ResponseWriter, r *http.Request) { - if !h.validateReadToken(w, r) { - return - } +// GetMetricsByWorkflowJob retrieves all metrics for a specific workflow/job +func (h *Handler) GetMetricsByWorkflowJob(c fuego.ContextNoBody) ([]MetricResponse, error) { + org := c.PathParam("org") + repo := c.PathParam("repo") + workflow := c.PathParam("workflow") + job := c.PathParam("job") - org := r.PathValue("org") - repo := r.PathValue("repo") - workflow := r.PathValue("workflow") - job := r.PathValue("job") if org == "" || repo == "" || workflow == "" || job == "" { - http.Error(w, "org, repo, workflow and job are required", http.StatusBadRequest) - return + return nil, fuego.BadRequestError{Detail: ErrInvalidParams.Error()} } metrics, err := h.store.GetMetricsByWorkflowJob(org, repo, workflow, job) if err != nil { h.logger.Error("failed to get metrics", slog.String("error", err.Error())) - http.Error(w, "failed to get metrics", http.StatusInternalServerError) - return + return nil, fuego.InternalServerError{Detail: "failed to get metrics"} } // Convert to response type with Payload as JSON object @@ -194,67 +239,53 @@ func (h *Handler) handleGetByWorkflowJob(w http.ResponseWriter, r *http.Request) response[i] = m.ToResponse() } - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(response) + return response, nil } -func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(map[string]string{"status": "ok"}) -} +// GetSizing computes Kubernetes resource sizing recommendations +func (h *Handler) GetSizing(c fuego.ContextNoBody) (SizingResponse, error) { + org := c.PathParam("org") + repo := c.PathParam("repo") + workflow := c.PathParam("workflow") + job := c.PathParam("job") -func (h *Handler) handleGetSizing(w http.ResponseWriter, r *http.Request) { - if !h.validateReadToken(w, r) { - return - } - - org := r.PathValue("org") - repo := r.PathValue("repo") - workflow := r.PathValue("workflow") - job := r.PathValue("job") if org == "" || repo == "" || workflow == "" || job == "" { - http.Error(w, "org, repo, workflow and job are required", http.StatusBadRequest) - return + return SizingResponse{}, fuego.BadRequestError{Detail: ErrInvalidParams.Error()} } // Parse query parameters with defaults - runs := parseIntQueryParam(r, "runs", 5, 1, 100) - buffer := parseIntQueryParam(r, "buffer", 20, 0, 100) - cpuPercentile := r.URL.Query().Get("cpu_percentile") + runs := parseIntQueryParamFromContext(c, "runs", 5, 1, 100) + buffer := parseIntQueryParamFromContext(c, "buffer", 20, 0, 100) + cpuPercentile := c.QueryParam("cpu_percentile") if cpuPercentile == "" { cpuPercentile = "p95" } if !IsValidPercentile(cpuPercentile) { - http.Error(w, "invalid cpu_percentile: must be one of peak, p99, p95, p75, p50, avg", http.StatusBadRequest) - return + return SizingResponse{}, fuego.BadRequestError{Detail: ErrInvalidPercent.Error()} } metrics, err := h.store.GetRecentMetricsByWorkflowJob(org, repo, workflow, job, runs) if err != nil { h.logger.Error("failed to get metrics", slog.String("error", err.Error())) - http.Error(w, "failed to get metrics", http.StatusInternalServerError) - return + return SizingResponse{}, fuego.InternalServerError{Detail: "failed to get metrics"} } if len(metrics) == 0 { - http.Error(w, "no metrics found for this workflow/job", http.StatusNotFound) - return + return SizingResponse{}, fuego.NotFoundError{Detail: ErrNoMetrics.Error()} } response, err := computeSizing(metrics, buffer, cpuPercentile) if err != nil { h.logger.Error("failed to compute sizing", slog.String("error", err.Error())) - http.Error(w, "failed to compute sizing", http.StatusInternalServerError) - return + return SizingResponse{}, fuego.InternalServerError{Detail: "failed to compute sizing"} } - w.Header().Set("Content-Type", "application/json") - _ = json.NewEncoder(w).Encode(response) + return *response, nil } -// parseIntQueryParam parses an integer query parameter with default, min, and max values -func parseIntQueryParam(r *http.Request, name string, defaultVal, minVal, maxVal int) int { - strVal := r.URL.Query().Get(name) +// parseIntQueryParamFromContext parses an integer query parameter with default, min, and max values +func parseIntQueryParamFromContext(c fuego.ContextNoBody, name string, defaultVal, minVal, maxVal int) int { + strVal := c.QueryParam(name) if strVal == "" { return defaultVal } diff --git a/internal/receiver/handler_test.go b/internal/receiver/handler_test.go index dcf1791..3a2a2d3 100644 --- a/internal/receiver/handler_test.go +++ b/internal/receiver/handler_test.go @@ -11,6 +11,8 @@ import ( "strings" "testing" + "github.com/go-fuego/fuego" + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary" ) @@ -42,9 +44,8 @@ func TestHandler_ReceiveMetrics(t *testing.T) { req.Header.Set("Authorization", "Bearer "+pushToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusCreated { t.Errorf("status = %d, want %d", rec.Code, http.StatusCreated) @@ -69,9 +70,8 @@ func TestHandler_ReceiveMetrics_InvalidJSON(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader([]byte("not json"))) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusBadRequest { t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) @@ -95,9 +95,8 @@ func TestHandler_ReceiveMetrics_MissingRunID(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader(body)) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusBadRequest { t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) @@ -125,9 +124,8 @@ func TestHandler_GetByWorkflowJob(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) @@ -151,9 +149,8 @@ func TestHandler_GetByWorkflowJob_NotFound(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) @@ -180,8 +177,7 @@ func TestHandler_GetByWorkflowJob_WithToken(t *testing.T) { t.Fatalf("SaveMetric() error = %v", err) } - mux := http.NewServeMux() - h.RegisterRoutes(mux) + s := newTestServer(h) tests := []struct { name string @@ -201,7 +197,7 @@ func TestHandler_GetByWorkflowJob_WithToken(t *testing.T) { req.Header.Set("Authorization", tt.authHeader) } rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) + s.Mux.ServeHTTP(rec, req) if rec.Code != tt.wantCode { t.Errorf("status = %d, want %d", rec.Code, tt.wantCode) @@ -217,9 +213,8 @@ func TestHandler_Health(t *testing.T) { req := httptest.NewRequest(http.MethodGet, "/health", nil) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) @@ -250,9 +245,8 @@ func TestHandler_GenerateToken(t *testing.T) { req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK) @@ -289,9 +283,8 @@ func TestHandler_GenerateToken_NoAuth(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/api/v1/token", bytes.NewReader(body)) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusUnauthorized { t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) @@ -314,9 +307,8 @@ func TestHandler_GenerateToken_MissingFields(t *testing.T) { req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusBadRequest { t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) @@ -338,12 +330,12 @@ func TestHandler_GenerateToken_NoReadToken(t *testing.T) { req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) - if rec.Code != http.StatusBadRequest { - t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) + // With no read token, the middleware rejects before we reach the handler + if rec.Code != http.StatusUnauthorized { + t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) } } @@ -352,8 +344,7 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) { h, cleanup := newTestHandlerWithToken(t, readToken) defer cleanup() - mux := http.NewServeMux() - h.RegisterRoutes(mux) + s := newTestServer(h) exec := ExecutionContext{ Organization: "org", @@ -391,7 +382,7 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) { req.Header.Set("Authorization", tt.authHeader) } rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) + s.Mux.ServeHTTP(rec, req) if rec.Code != tt.wantCode { t.Errorf("status = %d, want %d", rec.Code, tt.wantCode) @@ -420,9 +411,8 @@ func TestHandler_ReceiveMetrics_RejectsWhenNoReadToken(t *testing.T) { req.Header.Set("Content-Type", "application/json") rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusUnauthorized { t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) @@ -436,15 +426,27 @@ func TestHandler_GetByWorkflowJob_RejectsWhenNoReadToken(t *testing.T) { req := httptest.NewRequest(http.MethodGet, "/api/v1/metrics/repo/org/repo/ci.yml/build", nil) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusUnauthorized { t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized) } } +func newTestServer(h *Handler) *fuego.Server { + s := fuego.NewServer( + fuego.WithoutStartupMessages(), + fuego.WithEngineOptions( + fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{ + Disabled: true, + }), + ), + ) + h.RegisterRoutes(s) + return s +} + func newTestHandler(t *testing.T) (*Handler, func()) { t.Helper() dbPath := filepath.Join(t.TempDir(), "test.db") diff --git a/internal/receiver/sizing_test.go b/internal/receiver/sizing_test.go index ce57210..05c3297 100644 --- a/internal/receiver/sizing_test.go +++ b/internal/receiver/sizing_test.go @@ -342,9 +342,8 @@ func TestHandler_GetSizing(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) @@ -396,9 +395,8 @@ func TestHandler_GetSizing_CustomParams(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusOK { t.Errorf("status = %d, want %d", rec.Code, http.StatusOK) @@ -432,9 +430,8 @@ func TestHandler_GetSizing_NotFound(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusNotFound { t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound) @@ -450,9 +447,8 @@ func TestHandler_GetSizing_InvalidPercentile(t *testing.T) { req.Header.Set("Authorization", "Bearer "+readToken) rec := httptest.NewRecorder() - mux := http.NewServeMux() - h.RegisterRoutes(mux) - mux.ServeHTTP(rec, req) + s := newTestServer(h) + s.Mux.ServeHTTP(rec, req) if rec.Code != http.StatusBadRequest { t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest) @@ -474,8 +470,7 @@ func TestHandler_GetSizing_AuthRequired(t *testing.T) { {"valid token", "Bearer " + readToken, http.StatusNotFound}, // no metrics, but auth works } - mux := http.NewServeMux() - h.RegisterRoutes(mux) + s := newTestServer(h) for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -484,7 +479,7 @@ func TestHandler_GetSizing_AuthRequired(t *testing.T) { req.Header.Set("Authorization", tt.authHeader) } rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) + s.Mux.ServeHTTP(rec, req) if rec.Code != tt.wantCode { t.Errorf("status = %d, want %d", rec.Code, tt.wantCode) diff --git a/pkg/client/client.gen.go b/pkg/client/client.gen.go new file mode 100644 index 0000000..1085246 --- /dev/null +++ b/pkg/client/client.gen.go @@ -0,0 +1,1096 @@ +// Package client provides primitives to interact with the openapi HTTP API. +// +// Code generated by github.com/oapi-codegen/oapi-codegen/v2 version v2.5.1 DO NOT EDIT. +package client + +import ( + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/oapi-codegen/runtime" +) + +// HTTPError HTTPError schema +type HTTPError struct { + // Detail Human readable error message + Detail *string `json:"detail"` + Errors *[]struct { + // More Additional information about the error + More *map[string]*interface{} `json:"more"` + + // Name For example, name of the parameter that caused the error + Name *string `json:"name,omitempty"` + + // Reason Human readable error message + Reason *string `json:"reason,omitempty"` + } `json:"errors"` + Instance *string `json:"instance"` + + // Status HTTP status code + Status *int `json:"status"` + + // Title Short title of the error + Title *string `json:"title"` + + // Type URL of the error type. Can be used to lookup the error in a documentation + Type *string `json:"type"` +} + +// HealthResponse HealthResponse schema +type HealthResponse struct { + Status *string `json:"status,omitempty"` +} + +// MetricCreatedResponse MetricCreatedResponse schema +type MetricCreatedResponse struct { + Id *int `json:"id,omitempty"` + Status *string `json:"status,omitempty"` +} + +// MetricResponse MetricResponse schema +type MetricResponse struct { + Id *int `json:"id,omitempty"` + Job *string `json:"job,omitempty"` + Organization *string `json:"organization,omitempty"` + Payload interface{} `json:"payload,omitempty"` + ReceivedAt *time.Time `json:"received_at,omitempty"` + Repository *string `json:"repository,omitempty"` + RunId *string `json:"run_id,omitempty"` + Workflow *string `json:"workflow,omitempty"` +} + +// SizingResponse SizingResponse schema +type SizingResponse struct { + Containers *[]struct { + Cpu *struct { + Limit *string `json:"limit,omitempty"` + Request *string `json:"request,omitempty"` + } `json:"cpu,omitempty"` + Memory *struct { + Limit *string `json:"limit,omitempty"` + Request *string `json:"request,omitempty"` + } `json:"memory,omitempty"` + Name *string `json:"name,omitempty"` + } `json:"containers,omitempty"` + Meta *struct { + BufferPercent *int `json:"buffer_percent,omitempty"` + CpuPercentile *string `json:"cpu_percentile,omitempty"` + RunsAnalyzed *int `json:"runs_analyzed,omitempty"` + } `json:"meta,omitempty"` + Total *struct { + Cpu *struct { + Limit *string `json:"limit,omitempty"` + Request *string `json:"request,omitempty"` + } `json:"cpu,omitempty"` + Memory *struct { + Limit *string `json:"limit,omitempty"` + Request *string `json:"request,omitempty"` + } `json:"memory,omitempty"` + } `json:"total,omitempty"` +} + +// TokenRequest TokenRequest schema +type TokenRequest struct { + Job *string `json:"job,omitempty"` + Organization *string `json:"organization,omitempty"` + Repository *string `json:"repository,omitempty"` + Workflow *string `json:"workflow,omitempty"` +} + +// TokenResponse TokenResponse schema +type TokenResponse struct { + Token *string `json:"token,omitempty"` +} + +// POSTapiv1metricsParams defines parameters for POSTapiv1metrics. +type POSTapiv1metricsParams struct { + Accept *string `json:"Accept,omitempty"` +} + +// GETapiv1metricsrepoOrgRepoWorkflowJobParams defines parameters for GETapiv1metricsrepoOrgRepoWorkflowJob. +type GETapiv1metricsrepoOrgRepoWorkflowJobParams struct { + Accept *string `json:"Accept,omitempty"` +} + +// GETapiv1sizingrepoOrgRepoWorkflowJobParams defines parameters for GETapiv1sizingrepoOrgRepoWorkflowJob. +type GETapiv1sizingrepoOrgRepoWorkflowJobParams struct { + Accept *string `json:"Accept,omitempty"` +} + +// POSTapiv1tokenParams defines parameters for POSTapiv1token. +type POSTapiv1tokenParams struct { + Accept *string `json:"Accept,omitempty"` +} + +// GEThealthParams defines parameters for GEThealth. +type GEThealthParams struct { + Accept *string `json:"Accept,omitempty"` +} + +// RequestEditorFn is the function signature for the RequestEditor callback function +type RequestEditorFn func(ctx context.Context, req *http.Request) error + +// Doer performs HTTP requests. +// +// The standard http.Client implements this interface. +type HttpRequestDoer interface { + Do(req *http.Request) (*http.Response, error) +} + +// Client which conforms to the OpenAPI3 specification for this service. +type Client struct { + // The endpoint of the server conforming to this interface, with scheme, + // https://api.deepmap.com for example. This can contain a path relative + // to the server, such as https://api.deepmap.com/dev-test, and all the + // paths in the swagger spec will be appended to the server. + Server string + + // Doer for performing requests, typically a *http.Client with any + // customized settings, such as certificate chains. + Client HttpRequestDoer + + // A list of callbacks for modifying requests which are generated before sending over + // the network. + RequestEditors []RequestEditorFn +} + +// ClientOption allows setting custom parameters during construction +type ClientOption func(*Client) error + +// Creates a new Client, with reasonable defaults +func NewClient(server string, opts ...ClientOption) (*Client, error) { + // create a client with sane default values + client := Client{ + Server: server, + } + // mutate client and add all optional params + for _, o := range opts { + if err := o(&client); err != nil { + return nil, err + } + } + // ensure the server URL always has a trailing slash + if !strings.HasSuffix(client.Server, "/") { + client.Server += "/" + } + // create httpClient, if not already present + if client.Client == nil { + client.Client = &http.Client{} + } + return &client, nil +} + +// WithHTTPClient allows overriding the default Doer, which is +// automatically created using http.Client. This is useful for tests. +func WithHTTPClient(doer HttpRequestDoer) ClientOption { + return func(c *Client) error { + c.Client = doer + return nil + } +} + +// WithRequestEditorFn allows setting up a callback function, which will be +// called right before sending the request. This can be used to mutate the request. +func WithRequestEditorFn(fn RequestEditorFn) ClientOption { + return func(c *Client) error { + c.RequestEditors = append(c.RequestEditors, fn) + return nil + } +} + +// The interface specification for the client above. +type ClientInterface interface { + // POSTapiv1metrics request + POSTapiv1metrics(ctx context.Context, params *POSTapiv1metricsParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GETapiv1metricsrepoOrgRepoWorkflowJob request + GETapiv1metricsrepoOrgRepoWorkflowJob(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1metricsrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GETapiv1sizingrepoOrgRepoWorkflowJob request + GETapiv1sizingrepoOrgRepoWorkflowJob(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1sizingrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*http.Response, error) + + // POSTapiv1tokenWithBody request with any body + POSTapiv1tokenWithBody(ctx context.Context, params *POSTapiv1tokenParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) + + // GEThealth request + GEThealth(ctx context.Context, params *GEThealthParams, reqEditors ...RequestEditorFn) (*http.Response, error) +} + +func (c *Client) POSTapiv1metrics(ctx context.Context, params *POSTapiv1metricsParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPOSTapiv1metricsRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GETapiv1metricsrepoOrgRepoWorkflowJob(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1metricsrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGETapiv1metricsrepoOrgRepoWorkflowJobRequest(c.Server, org, repo, workflow, job, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GETapiv1sizingrepoOrgRepoWorkflowJob(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1sizingrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGETapiv1sizingrepoOrgRepoWorkflowJobRequest(c.Server, org, repo, workflow, job, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) POSTapiv1tokenWithBody(ctx context.Context, params *POSTapiv1tokenParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewPOSTapiv1tokenRequestWithBody(c.Server, params, contentType, body) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +func (c *Client) GEThealth(ctx context.Context, params *GEThealthParams, reqEditors ...RequestEditorFn) (*http.Response, error) { + req, err := NewGEThealthRequest(c.Server, params) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if err := c.applyEditors(ctx, req, reqEditors); err != nil { + return nil, err + } + return c.Client.Do(req) +} + +// NewPOSTapiv1metricsRequest generates requests for POSTapiv1metrics +func NewPOSTapiv1metricsRequest(server string, params *POSTapiv1metricsParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/metrics") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), nil) + if err != nil { + return nil, err + } + + if params != nil { + + if params.Accept != nil { + var headerParam0 string + + headerParam0, err = runtime.StyleParamWithLocation("simple", false, "Accept", runtime.ParamLocationHeader, *params.Accept) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", headerParam0) + } + + } + + return req, nil +} + +// NewGETapiv1metricsrepoOrgRepoWorkflowJobRequest generates requests for GETapiv1metricsrepoOrgRepoWorkflowJob +func NewGETapiv1metricsrepoOrgRepoWorkflowJobRequest(server string, org string, repo string, workflow string, job string, params *GETapiv1metricsrepoOrgRepoWorkflowJobParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "org", runtime.ParamLocationPath, org) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "repo", runtime.ParamLocationPath, repo) + if err != nil { + return nil, err + } + + var pathParam2 string + + pathParam2, err = runtime.StyleParamWithLocation("simple", false, "workflow", runtime.ParamLocationPath, workflow) + if err != nil { + return nil, err + } + + var pathParam3 string + + pathParam3, err = runtime.StyleParamWithLocation("simple", false, "job", runtime.ParamLocationPath, job) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/metrics/repo/%s/%s/%s/%s", pathParam0, pathParam1, pathParam2, pathParam3) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + if params != nil { + + if params.Accept != nil { + var headerParam0 string + + headerParam0, err = runtime.StyleParamWithLocation("simple", false, "Accept", runtime.ParamLocationHeader, *params.Accept) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", headerParam0) + } + + } + + return req, nil +} + +// NewGETapiv1sizingrepoOrgRepoWorkflowJobRequest generates requests for GETapiv1sizingrepoOrgRepoWorkflowJob +func NewGETapiv1sizingrepoOrgRepoWorkflowJobRequest(server string, org string, repo string, workflow string, job string, params *GETapiv1sizingrepoOrgRepoWorkflowJobParams) (*http.Request, error) { + var err error + + var pathParam0 string + + pathParam0, err = runtime.StyleParamWithLocation("simple", false, "org", runtime.ParamLocationPath, org) + if err != nil { + return nil, err + } + + var pathParam1 string + + pathParam1, err = runtime.StyleParamWithLocation("simple", false, "repo", runtime.ParamLocationPath, repo) + if err != nil { + return nil, err + } + + var pathParam2 string + + pathParam2, err = runtime.StyleParamWithLocation("simple", false, "workflow", runtime.ParamLocationPath, workflow) + if err != nil { + return nil, err + } + + var pathParam3 string + + pathParam3, err = runtime.StyleParamWithLocation("simple", false, "job", runtime.ParamLocationPath, job) + if err != nil { + return nil, err + } + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/sizing/repo/%s/%s/%s/%s", pathParam0, pathParam1, pathParam2, pathParam3) + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + if params != nil { + + if params.Accept != nil { + var headerParam0 string + + headerParam0, err = runtime.StyleParamWithLocation("simple", false, "Accept", runtime.ParamLocationHeader, *params.Accept) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", headerParam0) + } + + } + + return req, nil +} + +// NewPOSTapiv1tokenRequestWithBody generates requests for POSTapiv1token with any type of body +func NewPOSTapiv1tokenRequestWithBody(server string, params *POSTapiv1tokenParams, contentType string, body io.Reader) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/api/v1/token") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", queryURL.String(), body) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", contentType) + + if params != nil { + + if params.Accept != nil { + var headerParam0 string + + headerParam0, err = runtime.StyleParamWithLocation("simple", false, "Accept", runtime.ParamLocationHeader, *params.Accept) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", headerParam0) + } + + } + + return req, nil +} + +// NewGEThealthRequest generates requests for GEThealth +func NewGEThealthRequest(server string, params *GEThealthParams) (*http.Request, error) { + var err error + + serverURL, err := url.Parse(server) + if err != nil { + return nil, err + } + + operationPath := fmt.Sprintf("/health") + if operationPath[0] == '/' { + operationPath = "." + operationPath + } + + queryURL, err := serverURL.Parse(operationPath) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", queryURL.String(), nil) + if err != nil { + return nil, err + } + + if params != nil { + + if params.Accept != nil { + var headerParam0 string + + headerParam0, err = runtime.StyleParamWithLocation("simple", false, "Accept", runtime.ParamLocationHeader, *params.Accept) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", headerParam0) + } + + } + + return req, nil +} + +func (c *Client) applyEditors(ctx context.Context, req *http.Request, additionalEditors []RequestEditorFn) error { + for _, r := range c.RequestEditors { + if err := r(ctx, req); err != nil { + return err + } + } + for _, r := range additionalEditors { + if err := r(ctx, req); err != nil { + return err + } + } + return nil +} + +// ClientWithResponses builds on ClientInterface to offer response payloads +type ClientWithResponses struct { + ClientInterface +} + +// NewClientWithResponses creates a new ClientWithResponses, which wraps +// Client with return type handling +func NewClientWithResponses(server string, opts ...ClientOption) (*ClientWithResponses, error) { + client, err := NewClient(server, opts...) + if err != nil { + return nil, err + } + return &ClientWithResponses{client}, nil +} + +// WithBaseURL overrides the baseURL. +func WithBaseURL(baseURL string) ClientOption { + return func(c *Client) error { + newBaseURL, err := url.Parse(baseURL) + if err != nil { + return err + } + c.Server = newBaseURL.String() + return nil + } +} + +// ClientWithResponsesInterface is the interface specification for the client with responses above. +type ClientWithResponsesInterface interface { + // POSTapiv1metricsWithResponse request + POSTapiv1metricsWithResponse(ctx context.Context, params *POSTapiv1metricsParams, reqEditors ...RequestEditorFn) (*POSTapiv1metricsResponse, error) + + // GETapiv1metricsrepoOrgRepoWorkflowJobWithResponse request + GETapiv1metricsrepoOrgRepoWorkflowJobWithResponse(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1metricsrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*GETapiv1metricsrepoOrgRepoWorkflowJobResponse, error) + + // GETapiv1sizingrepoOrgRepoWorkflowJobWithResponse request + GETapiv1sizingrepoOrgRepoWorkflowJobWithResponse(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1sizingrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*GETapiv1sizingrepoOrgRepoWorkflowJobResponse, error) + + // POSTapiv1tokenWithBodyWithResponse request with any body + POSTapiv1tokenWithBodyWithResponse(ctx context.Context, params *POSTapiv1tokenParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*POSTapiv1tokenResponse, error) + + // GEThealthWithResponse request + GEThealthWithResponse(ctx context.Context, params *GEThealthParams, reqEditors ...RequestEditorFn) (*GEThealthResponse, error) +} + +type POSTapiv1metricsResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *MetricCreatedResponse + XML200 *MetricCreatedResponse + JSON400 *HTTPError + XML400 *HTTPError + JSON500 *HTTPError + XML500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r POSTapiv1metricsResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r POSTapiv1metricsResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GETapiv1metricsrepoOrgRepoWorkflowJobResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *[]MetricResponse + XML200 *[]MetricResponse + JSON400 *HTTPError + XML400 *HTTPError + JSON500 *HTTPError + XML500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r GETapiv1metricsrepoOrgRepoWorkflowJobResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GETapiv1metricsrepoOrgRepoWorkflowJobResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GETapiv1sizingrepoOrgRepoWorkflowJobResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *SizingResponse + XML200 *SizingResponse + JSON400 *HTTPError + XML400 *HTTPError + JSON500 *HTTPError + XML500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r GETapiv1sizingrepoOrgRepoWorkflowJobResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GETapiv1sizingrepoOrgRepoWorkflowJobResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type POSTapiv1tokenResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *TokenResponse + XML200 *TokenResponse + JSON400 *HTTPError + XML400 *HTTPError + JSON500 *HTTPError + XML500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r POSTapiv1tokenResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r POSTapiv1tokenResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +type GEThealthResponse struct { + Body []byte + HTTPResponse *http.Response + JSON200 *HealthResponse + XML200 *HealthResponse + JSON400 *HTTPError + XML400 *HTTPError + JSON500 *HTTPError + XML500 *HTTPError +} + +// Status returns HTTPResponse.Status +func (r GEThealthResponse) Status() string { + if r.HTTPResponse != nil { + return r.HTTPResponse.Status + } + return http.StatusText(0) +} + +// StatusCode returns HTTPResponse.StatusCode +func (r GEThealthResponse) StatusCode() int { + if r.HTTPResponse != nil { + return r.HTTPResponse.StatusCode + } + return 0 +} + +// POSTapiv1metricsWithResponse request returning *POSTapiv1metricsResponse +func (c *ClientWithResponses) POSTapiv1metricsWithResponse(ctx context.Context, params *POSTapiv1metricsParams, reqEditors ...RequestEditorFn) (*POSTapiv1metricsResponse, error) { + rsp, err := c.POSTapiv1metrics(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParsePOSTapiv1metricsResponse(rsp) +} + +// GETapiv1metricsrepoOrgRepoWorkflowJobWithResponse request returning *GETapiv1metricsrepoOrgRepoWorkflowJobResponse +func (c *ClientWithResponses) GETapiv1metricsrepoOrgRepoWorkflowJobWithResponse(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1metricsrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*GETapiv1metricsrepoOrgRepoWorkflowJobResponse, error) { + rsp, err := c.GETapiv1metricsrepoOrgRepoWorkflowJob(ctx, org, repo, workflow, job, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGETapiv1metricsrepoOrgRepoWorkflowJobResponse(rsp) +} + +// GETapiv1sizingrepoOrgRepoWorkflowJobWithResponse request returning *GETapiv1sizingrepoOrgRepoWorkflowJobResponse +func (c *ClientWithResponses) GETapiv1sizingrepoOrgRepoWorkflowJobWithResponse(ctx context.Context, org string, repo string, workflow string, job string, params *GETapiv1sizingrepoOrgRepoWorkflowJobParams, reqEditors ...RequestEditorFn) (*GETapiv1sizingrepoOrgRepoWorkflowJobResponse, error) { + rsp, err := c.GETapiv1sizingrepoOrgRepoWorkflowJob(ctx, org, repo, workflow, job, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGETapiv1sizingrepoOrgRepoWorkflowJobResponse(rsp) +} + +// POSTapiv1tokenWithBodyWithResponse request with arbitrary body returning *POSTapiv1tokenResponse +func (c *ClientWithResponses) POSTapiv1tokenWithBodyWithResponse(ctx context.Context, params *POSTapiv1tokenParams, contentType string, body io.Reader, reqEditors ...RequestEditorFn) (*POSTapiv1tokenResponse, error) { + rsp, err := c.POSTapiv1tokenWithBody(ctx, params, contentType, body, reqEditors...) + if err != nil { + return nil, err + } + return ParsePOSTapiv1tokenResponse(rsp) +} + +// GEThealthWithResponse request returning *GEThealthResponse +func (c *ClientWithResponses) GEThealthWithResponse(ctx context.Context, params *GEThealthParams, reqEditors ...RequestEditorFn) (*GEThealthResponse, error) { + rsp, err := c.GEThealth(ctx, params, reqEditors...) + if err != nil { + return nil, err + } + return ParseGEThealthResponse(rsp) +} + +// ParsePOSTapiv1metricsResponse parses an HTTP response from a POSTapiv1metricsWithResponse call +func ParsePOSTapiv1metricsResponse(rsp *http.Response) (*POSTapiv1metricsResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &POSTapiv1metricsResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest MetricCreatedResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 200: + var dest MetricCreatedResponse + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 400: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 500: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML500 = &dest + + } + + return response, nil +} + +// ParseGETapiv1metricsrepoOrgRepoWorkflowJobResponse parses an HTTP response from a GETapiv1metricsrepoOrgRepoWorkflowJobWithResponse call +func ParseGETapiv1metricsrepoOrgRepoWorkflowJobResponse(rsp *http.Response) (*GETapiv1metricsrepoOrgRepoWorkflowJobResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GETapiv1metricsrepoOrgRepoWorkflowJobResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest []MetricResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 200: + var dest []MetricResponse + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 400: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 500: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML500 = &dest + + } + + return response, nil +} + +// ParseGETapiv1sizingrepoOrgRepoWorkflowJobResponse parses an HTTP response from a GETapiv1sizingrepoOrgRepoWorkflowJobWithResponse call +func ParseGETapiv1sizingrepoOrgRepoWorkflowJobResponse(rsp *http.Response) (*GETapiv1sizingrepoOrgRepoWorkflowJobResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GETapiv1sizingrepoOrgRepoWorkflowJobResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest SizingResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 200: + var dest SizingResponse + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 400: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 500: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML500 = &dest + + } + + return response, nil +} + +// ParsePOSTapiv1tokenResponse parses an HTTP response from a POSTapiv1tokenWithResponse call +func ParsePOSTapiv1tokenResponse(rsp *http.Response) (*POSTapiv1tokenResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &POSTapiv1tokenResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest TokenResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 200: + var dest TokenResponse + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 400: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 500: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML500 = &dest + + } + + return response, nil +} + +// ParseGEThealthResponse parses an HTTP response from a GEThealthWithResponse call +func ParseGEThealthResponse(rsp *http.Response) (*GEThealthResponse, error) { + bodyBytes, err := io.ReadAll(rsp.Body) + defer func() { _ = rsp.Body.Close() }() + if err != nil { + return nil, err + } + + response := &GEThealthResponse{ + Body: bodyBytes, + HTTPResponse: rsp, + } + + switch { + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: + var dest HealthResponse + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 400: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 500: + var dest HTTPError + if err := json.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.JSON500 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 200: + var dest HealthResponse + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML200 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 400: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML400 = &dest + + case strings.Contains(rsp.Header.Get("Content-Type"), "xml") && rsp.StatusCode == 500: + var dest HTTPError + if err := xml.Unmarshal(bodyBytes, &dest); err != nil { + return nil, err + } + response.XML500 = &dest + + } + + return response, nil +} diff --git a/scripts/extract-openapi/main.go b/scripts/extract-openapi/main.go new file mode 100644 index 0000000..327d967 --- /dev/null +++ b/scripts/extract-openapi/main.go @@ -0,0 +1,64 @@ +//go:build ignore + +// ABOUTME: Extracts OpenAPI spec from Fuego server without running it. +// ABOUTME: Run with: go run scripts/extract-openapi/main.go +package main + +import ( + "encoding/json" + "fmt" + "io" + "log/slog" + "os" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-fuego/fuego" + + "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver" +) + +func main() { + // Create a minimal handler (store is nil, won't be used) + logger := slog.New(slog.NewTextHandler(io.Discard, nil)) + handler := receiver.NewHandler(nil, logger, "dummy", "dummy", 0) + + // Create Fuego server with OpenAPI config + s := fuego.NewServer( + fuego.WithoutStartupMessages(), + fuego.WithEngineOptions( + fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{ + DisableLocalSave: true, + Info: &openapi3.Info{ + Title: "Forgejo Runner Resource Collector API", + Version: "1.0.0", + Description: "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.", + Contact: &openapi3.Contact{ + Name: "API Support", + URL: "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer", + }, + License: &openapi3.License{ + Name: "Apache 2.0", + URL: "http://www.apache.org/licenses/LICENSE-2.0.html", + }, + }, + }), + ), + ) + + // Register routes to populate OpenAPI spec + handler.RegisterRoutes(s) + + // Output OpenAPI spec as JSON + spec, err := json.MarshalIndent(s.OpenAPI.Description(), "", " ") + if err != nil { + fmt.Fprintf(os.Stderr, "Error marshaling OpenAPI spec: %v\n", err) + os.Exit(1) + } + + if err := os.WriteFile("docs/openapi.json", spec, 0644); err != nil { + fmt.Fprintf(os.Stderr, "Error writing docs/openapi.json: %v\n", err) + os.Exit(1) + } + + fmt.Println("Generated docs/openapi.json") +}