Compare commits
9 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| bc9d0dd8ea | |||
| 479c13f596 | |||
| e38c99acd6 | |||
| 937e5b814b | |||
| d0aea88a5b | |||
| 862fc07328 | |||
| 7e3a4efb2d | |||
| a96a1079eb | |||
| 8101e9b20e |
35 changed files with 3345 additions and 319 deletions
18
.github/workflows/ci.yaml
vendored
18
.github/workflows/ci.yaml
vendored
|
|
@ -7,16 +7,16 @@ on:
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
ci:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
||||||
|
|
@ -27,13 +27,23 @@ jobs:
|
||||||
run: make test
|
run: make test
|
||||||
|
|
||||||
- name: Install GoReleaser
|
- name: Install GoReleaser
|
||||||
uses: https://github.com/goreleaser/goreleaser-action@v5
|
uses: https://github.com/goreleaser/goreleaser-action@v6
|
||||||
with:
|
with:
|
||||||
install-only: true
|
install-only: true
|
||||||
|
|
||||||
|
- name: Sanitize Docker credentials
|
||||||
|
run: |
|
||||||
|
REGISTRY="${{ forgejo.server_url }}"
|
||||||
|
echo "registry=${REGISTRY#https://}" >> "$GITHUB_OUTPUT"
|
||||||
|
ORG="${{ github.repository_owner }}"
|
||||||
|
echo "org=$(echo "$ORG" | tr '[:upper:]' '[:lower:]')" >> "$GITHUB_OUTPUT"
|
||||||
|
id: sanitize_credentials
|
||||||
|
|
||||||
- name: GoReleaser Check
|
- name: GoReleaser Check
|
||||||
run: |
|
run: |
|
||||||
unset GITHUB_TOKEN
|
unset GITHUB_TOKEN
|
||||||
goreleaser release --snapshot --skip=publish --clean
|
goreleaser release --snapshot --skip=publish --clean
|
||||||
env:
|
env:
|
||||||
GORELEASER_CURRENT_TAG: v0.0.0
|
GORELEASER_CURRENT_TAG: v0.0.0
|
||||||
|
DOCKER_REGISTRY: ${{ steps.sanitize_credentials.outputs.registry }}
|
||||||
|
DOCKER_ORG: ${{ steps.sanitize_credentials.outputs.org }}
|
||||||
|
|
|
||||||
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
|
|
@ -10,11 +10,11 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v5
|
uses: actions/setup-go@v6
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
- name: Test code
|
- name: Test code
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
version: 2
|
version: 2
|
||||||
|
|
||||||
project_name: resource-collector
|
project_name: sizer
|
||||||
|
|
||||||
gitea_urls:
|
gitea_urls:
|
||||||
api: "{{ .Env.GITHUB_SERVER_URL }}/api/v1"
|
api: "{{ .Env.GITHUB_SERVER_URL }}/api/v1"
|
||||||
|
|
@ -11,9 +11,21 @@ before:
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: resource-collector
|
- id: collector
|
||||||
main: ./cmd/collector
|
main: ./cmd/collector
|
||||||
binary: resource-collector
|
binary: collector
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- id: receiver
|
||||||
|
main: ./cmd/receiver
|
||||||
|
binary: receiver
|
||||||
env:
|
env:
|
||||||
- CGO_ENABLED=0
|
- CGO_ENABLED=0
|
||||||
goos:
|
goos:
|
||||||
|
|
@ -37,12 +49,28 @@ snapshot:
|
||||||
version_template: "{{ incpatch .Version }}-next"
|
version_template: "{{ incpatch .Version }}-next"
|
||||||
|
|
||||||
dockers_v2:
|
dockers_v2:
|
||||||
- images:
|
- id: collector
|
||||||
- "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/resource-collector"
|
ids:
|
||||||
|
- collector
|
||||||
|
images:
|
||||||
|
- "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-sizer-collector"
|
||||||
tags:
|
tags:
|
||||||
- "{{ .Version }}"
|
- "{{ .Version }}"
|
||||||
- latest
|
- latest
|
||||||
dockerfile: Dockerfile.goreleaser
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
build_args:
|
||||||
|
BINARY: collector
|
||||||
|
- id: receiver
|
||||||
|
ids:
|
||||||
|
- receiver
|
||||||
|
images:
|
||||||
|
- "{{ .Env.DOCKER_REGISTRY }}/{{ .Env.DOCKER_ORG }}/forgejo-runner-sizer-receiver"
|
||||||
|
tags:
|
||||||
|
- "{{ .Version }}"
|
||||||
|
- latest
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
build_args:
|
||||||
|
BINARY: receiver
|
||||||
|
|
||||||
changelog:
|
changelog:
|
||||||
sort: asc
|
sort: asc
|
||||||
|
|
|
||||||
|
|
@ -28,7 +28,7 @@ make install-hooks # Install pre-commit and commit-msg hooks
|
||||||
|
|
||||||
## Architecture Overview
|
## Architecture Overview
|
||||||
|
|
||||||
This is a Go metrics collector designed for CI/CD environments with shared PID namespaces. It consists of two binaries:
|
A resource sizer for CI/CD environments with shared PID namespaces. It consists of two binaries — a **collector** and a **receiver** (which includes the **sizer**):
|
||||||
|
|
||||||
### Collector (`cmd/collector`)
|
### Collector (`cmd/collector`)
|
||||||
Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a summary to the receiver on shutdown (SIGINT/SIGTERM).
|
Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a summary to the receiver on shutdown (SIGINT/SIGTERM).
|
||||||
|
|
@ -40,11 +40,12 @@ Runs alongside CI workloads, periodically reads `/proc` filesystem, and pushes a
|
||||||
4. On shutdown, `summary.PushClient` sends the summary to the receiver HTTP endpoint
|
4. On shutdown, `summary.PushClient` sends the summary to the receiver HTTP endpoint
|
||||||
|
|
||||||
### Receiver (`cmd/receiver`)
|
### Receiver (`cmd/receiver`)
|
||||||
HTTP service that stores metric summaries in SQLite (via GORM) and provides a query API.
|
HTTP service that stores metric summaries in SQLite (via GORM), provides a query API, and includes the **sizer** — which computes right-sized Kubernetes resource requests and limits from historical data.
|
||||||
|
|
||||||
**Key Endpoints:**
|
**Key Endpoints:**
|
||||||
- `POST /api/v1/metrics` - Receive metrics from collectors
|
- `POST /api/v1/metrics` - Receive metrics from collectors
|
||||||
- `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` - Query stored metrics
|
- `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` - Query stored metrics
|
||||||
|
- `GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}` - Compute container sizes from historical data
|
||||||
|
|
||||||
### Internal Packages
|
### Internal Packages
|
||||||
|
|
||||||
|
|
@ -55,7 +56,7 @@ HTTP service that stores metric summaries in SQLite (via GORM) and provides a qu
|
||||||
| `internal/proc` | Low-level /proc parsing (stat, status, cgroup) |
|
| `internal/proc` | Low-level /proc parsing (stat, status, cgroup) |
|
||||||
| `internal/cgroup` | Parses CGROUP_LIMITS and CGROUP_PROCESS_MAP env vars |
|
| `internal/cgroup` | Parses CGROUP_LIMITS and CGROUP_PROCESS_MAP env vars |
|
||||||
| `internal/summary` | Accumulates samples, computes stats, pushes to receiver |
|
| `internal/summary` | Accumulates samples, computes stats, pushes to receiver |
|
||||||
| `internal/receiver` | HTTP handlers and SQLite store |
|
| `internal/receiver` | HTTP handlers, SQLite store, and sizer logic |
|
||||||
| `internal/output` | Metrics output formatting (JSON/text) |
|
| `internal/output` | Metrics output formatting (JSON/text) |
|
||||||
|
|
||||||
### Container Metrics
|
### Container Metrics
|
||||||
|
|
|
||||||
13
Dockerfile
13
Dockerfile
|
|
@ -10,26 +10,23 @@ COPY . .
|
||||||
# Collector build (no CGO needed)
|
# Collector build (no CGO needed)
|
||||||
FROM builder-base AS builder-collector
|
FROM builder-base AS builder-collector
|
||||||
|
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /resource-collector ./cmd/collector
|
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /sizer ./cmd/collector
|
||||||
|
|
||||||
# Receiver build (CGO needed for SQLite)
|
# Receiver build
|
||||||
FROM builder-base AS builder-receiver
|
FROM builder-base AS builder-receiver
|
||||||
|
|
||||||
RUN apk add --no-cache gcc musl-dev
|
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /metrics-receiver ./cmd/receiver
|
||||||
RUN CGO_ENABLED=1 GOOS=linux go build -ldflags="-s -w" -o /metrics-receiver ./cmd/receiver
|
|
||||||
|
|
||||||
# Collector image
|
# Collector image
|
||||||
FROM alpine:3.19 AS collector
|
FROM alpine:3.19 AS collector
|
||||||
|
|
||||||
COPY --from=builder-collector /resource-collector /usr/local/bin/resource-collector
|
COPY --from=builder-collector /sizer /usr/local/bin/sizer
|
||||||
|
|
||||||
ENTRYPOINT ["/usr/local/bin/resource-collector"]
|
ENTRYPOINT ["/usr/local/bin/sizer"]
|
||||||
|
|
||||||
# Receiver image
|
# Receiver image
|
||||||
FROM alpine:3.19 AS receiver
|
FROM alpine:3.19 AS receiver
|
||||||
|
|
||||||
RUN apk add --no-cache sqlite-libs
|
|
||||||
|
|
||||||
COPY --from=builder-receiver /metrics-receiver /usr/local/bin/metrics-receiver
|
COPY --from=builder-receiver /metrics-receiver /usr/local/bin/metrics-receiver
|
||||||
|
|
||||||
EXPOSE 8080
|
EXPOSE 8080
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
FROM gcr.io/distroless/static:nonroot
|
FROM gcr.io/distroless/static:nonroot
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
COPY ${TARGETPLATFORM}/resource-collector /resource-collector
|
ARG BINARY
|
||||||
ENTRYPOINT ["/resource-collector"]
|
COPY ${TARGETPLATFORM}/${BINARY} /app
|
||||||
|
ENTRYPOINT ["/app"]
|
||||||
|
|
|
||||||
36
Makefile
36
Makefile
|
|
@ -1,11 +1,10 @@
|
||||||
# ABOUTME: Makefile for forgejo-runner-resource-collector project.
|
# ABOUTME: Makefile for forgejo-runner-sizer project.
|
||||||
# ABOUTME: Provides targets for building, formatting, linting, and testing.
|
# ABOUTME: Provides targets for building, formatting, linting, and testing.
|
||||||
|
|
||||||
BINARY_NAME := resource-collector
|
|
||||||
CMD_PATH := ./cmd/collector
|
|
||||||
GO := go
|
GO := go
|
||||||
GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2
|
GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2
|
||||||
GITLEAKS := $(GO) run github.com/zricethezav/gitleaks/v8@v8.30.0
|
GITLEAKS := $(GO) run github.com/zricethezav/gitleaks/v8@v8.30.0
|
||||||
|
OAPI_CODEGEN := $(GO) run github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@latest
|
||||||
|
|
||||||
# Build flags
|
# Build flags
|
||||||
LDFLAGS := -s -w
|
LDFLAGS := -s -w
|
||||||
|
|
@ -13,18 +12,23 @@ BUILD_FLAGS := -ldflags "$(LDFLAGS)"
|
||||||
|
|
||||||
default: run
|
default: run
|
||||||
|
|
||||||
.PHONY: all build clean fmt format lint gitleaks test run help vet tidy install-hooks
|
.PHONY: all build build-collector build-receiver clean fmt format lint gitleaks test run-collector run-receiver help vet tidy install-hooks openapi generate-client
|
||||||
|
|
||||||
# Default target
|
# Default target
|
||||||
all: fmt vet lint build
|
all: fmt vet lint build
|
||||||
|
|
||||||
## Build targets
|
## Build targets
|
||||||
|
|
||||||
build: ## Build the binary
|
build: build-collector build-receiver ## Build both binaries
|
||||||
$(GO) build $(BUILD_FLAGS) -o $(BINARY_NAME) $(CMD_PATH)
|
|
||||||
|
build-collector: ## Build the collector binary
|
||||||
|
$(GO) build $(BUILD_FLAGS) -o collector ./cmd/collector
|
||||||
|
|
||||||
|
build-receiver: ## Build the receiver binary
|
||||||
|
$(GO) build $(BUILD_FLAGS) -o receiver ./cmd/receiver
|
||||||
|
|
||||||
clean: ## Remove build artifacts
|
clean: ## Remove build artifacts
|
||||||
rm -f $(BINARY_NAME) coverage.out coverage.html
|
rm -f collector receiver coverage.out coverage.html
|
||||||
$(GO) clean
|
$(GO) clean
|
||||||
|
|
||||||
## Code quality targets
|
## Code quality targets
|
||||||
|
|
@ -46,6 +50,16 @@ gitleaks: ## Check for secrets in git history
|
||||||
gitleaks-all: ## Check for secrets in git history
|
gitleaks-all: ## Check for secrets in git history
|
||||||
$(GITLEAKS) git .
|
$(GITLEAKS) git .
|
||||||
|
|
||||||
|
## OpenAPI / Client Generation
|
||||||
|
|
||||||
|
openapi: ## Generate OpenAPI spec from Fuego routes
|
||||||
|
$(GO) run scripts/extract-openapi/main.go
|
||||||
|
|
||||||
|
generate-client: openapi ## Generate Go client from OpenAPI spec
|
||||||
|
rm -rf pkg/client
|
||||||
|
mkdir -p pkg/client
|
||||||
|
$(OAPI_CODEGEN) -generate types,client -package client docs/openapi.json > pkg/client/client.gen.go
|
||||||
|
|
||||||
## Dependency management
|
## Dependency management
|
||||||
|
|
||||||
tidy: ## Tidy go modules
|
tidy: ## Tidy go modules
|
||||||
|
|
@ -62,11 +76,11 @@ test-coverage: ## Run tests with coverage
|
||||||
|
|
||||||
## Run targets
|
## Run targets
|
||||||
|
|
||||||
run: build ## Build and run with default settings
|
run-collector: build-collector ## Build and run the collector
|
||||||
./$(BINARY_NAME)
|
./collector
|
||||||
|
|
||||||
run-text: build ## Build and run with text output format
|
run-receiver: build-receiver ## Build and run the receiver
|
||||||
./$(BINARY_NAME) --log-format text --interval 2s
|
./receiver --read-token=secure-read-token --hmac-key=secure-hmac-key
|
||||||
|
|
||||||
## Git hooks
|
## Git hooks
|
||||||
|
|
||||||
|
|
|
||||||
17
README.md
17
README.md
|
|
@ -1,10 +1,10 @@
|
||||||
# Forgejo Runner Resource Collector
|
# Forgejo Runner Sizer
|
||||||
|
|
||||||
A lightweight metrics collector for CI/CD workloads in shared PID namespace environments. Reads `/proc` to collect CPU and memory metrics, groups them by container/cgroup, and pushes run summaries to a receiver service for storage and querying.
|
A resource sizer for CI/CD workloads in shared PID namespace environments. The **collector** reads `/proc` to gather CPU and memory metrics grouped by container/cgroup, and pushes run summaries to the **receiver**. The receiver stores metrics and exposes a **sizer** API that computes right-sized Kubernetes resource requests and limits from historical data.
|
||||||
|
|
||||||
## Architecture
|
## Architecture
|
||||||
|
|
||||||
The system has two independent binaries:
|
The system has two binaries — a **collector** and a **receiver** (which includes the sizer):
|
||||||
|
|
||||||
```
|
```
|
||||||
┌─────────────────────────────────────────────┐ ┌──────────────────────────┐
|
┌─────────────────────────────────────────────┐ ┌──────────────────────────┐
|
||||||
|
|
@ -19,7 +19,9 @@ The system has two independent binaries:
|
||||||
│ └───────────┘ └────────┘ └───────────┘ │ │ │ │
|
│ └───────────┘ └────────┘ └───────────┘ │ │ │ │
|
||||||
│ │ │ ▼ │
|
│ │ │ ▼ │
|
||||||
└─────────────────────────────────────────────┘ │ GET /api/v1/metrics/... │
|
└─────────────────────────────────────────────┘ │ GET /api/v1/metrics/... │
|
||||||
└──────────────────────────┘
|
│ GET /api/v1/sizing/... │
|
||||||
|
│ (sizer) │
|
||||||
|
└──────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
### Collector
|
### Collector
|
||||||
|
|
@ -56,9 +58,9 @@ Runs as a sidecar alongside CI workloads. On a configurable interval, it reads `
|
||||||
|
|
||||||
CPU supports Kubernetes notation (`"2"` = 2 cores, `"500m"` = 0.5 cores). Memory supports `Ki`, `Mi`, `Gi`, `Ti` (binary) or `K`, `M`, `G`, `T` (decimal).
|
CPU supports Kubernetes notation (`"2"` = 2 cores, `"500m"` = 0.5 cores). Memory supports `Ki`, `Mi`, `Gi`, `Ti` (binary) or `K`, `M`, `G`, `T` (decimal).
|
||||||
|
|
||||||
### Receiver
|
### Receiver (with sizer)
|
||||||
|
|
||||||
HTTP service that stores metric summaries in SQLite (via GORM) and exposes a query API.
|
HTTP service that stores metric summaries in SQLite (via GORM), exposes a query API, and provides a **sizer** endpoint that computes right-sized Kubernetes resource requests and limits from historical run data.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./receiver --addr=:8080 --db=metrics.db --read-token=my-secret-token --hmac-key=my-hmac-key
|
./receiver --addr=:8080 --db=metrics.db --read-token=my-secret-token --hmac-key=my-hmac-key
|
||||||
|
|
@ -78,6 +80,7 @@ HTTP service that stores metric summaries in SQLite (via GORM) and exposes a que
|
||||||
- `POST /api/v1/metrics` — receive and store a metric summary (requires scoped push token)
|
- `POST /api/v1/metrics` — receive and store a metric summary (requires scoped push token)
|
||||||
- `POST /api/v1/token` — generate a scoped push token (requires read token auth)
|
- `POST /api/v1/token` — generate a scoped push token (requires read token auth)
|
||||||
- `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` — query stored metrics (requires read token auth)
|
- `GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}` — query stored metrics (requires read token auth)
|
||||||
|
- `GET /api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}` — compute container sizes from historical data (requires read token auth)
|
||||||
|
|
||||||
**Authentication:**
|
**Authentication:**
|
||||||
|
|
||||||
|
|
@ -232,7 +235,7 @@ PUSH_TOKEN=$(curl -s -X POST http://localhost:8080/api/v1/token \
|
||||||
| `internal/cgroup` | Parses `CGROUP_PROCESS_MAP` and `CGROUP_LIMITS` env vars |
|
| `internal/cgroup` | Parses `CGROUP_PROCESS_MAP` and `CGROUP_LIMITS` env vars |
|
||||||
| `internal/collector` | Orchestrates the collection loop and shutdown |
|
| `internal/collector` | Orchestrates the collection loop and shutdown |
|
||||||
| `internal/summary` | Accumulates samples, computes stats, pushes to receiver |
|
| `internal/summary` | Accumulates samples, computes stats, pushes to receiver |
|
||||||
| `internal/receiver` | HTTP handlers and SQLite store |
|
| `internal/receiver` | HTTP handlers, SQLite store, and sizer logic |
|
||||||
| `internal/output` | Metrics output formatting (JSON/text) |
|
| `internal/output` | Metrics output formatting (JSON/text) |
|
||||||
|
|
||||||
## Background
|
## Background
|
||||||
|
|
|
||||||
|
|
@ -10,9 +10,9 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/collector"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/collector"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,18 @@
|
||||||
|
// ABOUTME: Entry point for the metrics receiver service.
|
||||||
|
// ABOUTME: HTTP service using Fuego framework with automatic OpenAPI 3.0 generation.
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net/http"
|
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/receiver"
|
"github.com/getkin/kin-openapi/openapi3"
|
||||||
|
"github.com/go-fuego/fuego"
|
||||||
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -24,6 +25,7 @@ func main() {
|
||||||
dbPath := flag.String("db", defaultDBPath, "SQLite database path")
|
dbPath := flag.String("db", defaultDBPath, "SQLite database path")
|
||||||
readToken := flag.String("read-token", os.Getenv("RECEIVER_READ_TOKEN"), "Pre-shared token for read endpoints (or set RECEIVER_READ_TOKEN)")
|
readToken := flag.String("read-token", os.Getenv("RECEIVER_READ_TOKEN"), "Pre-shared token for read endpoints (or set RECEIVER_READ_TOKEN)")
|
||||||
hmacKey := flag.String("hmac-key", os.Getenv("RECEIVER_HMAC_KEY"), "Secret key for push token generation/validation (or set RECEIVER_HMAC_KEY)")
|
hmacKey := flag.String("hmac-key", os.Getenv("RECEIVER_HMAC_KEY"), "Secret key for push token generation/validation (or set RECEIVER_HMAC_KEY)")
|
||||||
|
tokenTTL := flag.Duration("token-ttl", 2*time.Hour, "Time-to-live for push tokens (default 2h)")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
logger := slog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{
|
logger := slog.New(slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{
|
||||||
|
|
@ -37,43 +39,45 @@ func main() {
|
||||||
}
|
}
|
||||||
defer func() { _ = store.Close() }()
|
defer func() { _ = store.Close() }()
|
||||||
|
|
||||||
handler := receiver.NewHandler(store, logger, *readToken, *hmacKey)
|
handler := receiver.NewHandler(store, logger, *readToken, *hmacKey, *tokenTTL)
|
||||||
mux := http.NewServeMux()
|
|
||||||
handler.RegisterRoutes(mux)
|
|
||||||
|
|
||||||
server := &http.Server{
|
// Create Fuego server with OpenAPI configuration
|
||||||
Addr: *addr,
|
s := fuego.NewServer(
|
||||||
Handler: mux,
|
fuego.WithAddr(*addr),
|
||||||
ReadTimeout: 10 * time.Second,
|
fuego.WithEngineOptions(
|
||||||
WriteTimeout: 10 * time.Second,
|
fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{
|
||||||
}
|
PrettyFormatJSON: true,
|
||||||
|
JSONFilePath: "docs/openapi.json",
|
||||||
|
SwaggerURL: "/swagger",
|
||||||
|
Info: &openapi3.Info{
|
||||||
|
Title: "Forgejo Runner Resource Collector API",
|
||||||
|
Version: "1.0.0",
|
||||||
|
Description: "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.",
|
||||||
|
Contact: &openapi3.Contact{
|
||||||
|
Name: "API Support",
|
||||||
|
URL: "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer",
|
||||||
|
},
|
||||||
|
License: &openapi3.License{
|
||||||
|
Name: "Apache 2.0",
|
||||||
|
URL: "http://www.apache.org/licenses/LICENSE-2.0.html",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
// Register routes
|
||||||
defer cancel()
|
handler.RegisterRoutes(s)
|
||||||
|
|
||||||
sigChan := make(chan os.Signal, 1)
|
|
||||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
sig := <-sigChan
|
|
||||||
logger.Info("received signal, shutting down", slog.String("signal", sig.String()))
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
|
||||||
defer shutdownCancel()
|
|
||||||
_ = server.Shutdown(shutdownCtx)
|
|
||||||
}()
|
|
||||||
|
|
||||||
logger.Info("starting metrics receiver",
|
logger.Info("starting metrics receiver",
|
||||||
slog.String("addr", *addr),
|
slog.String("addr", *addr),
|
||||||
slog.String("db", *dbPath),
|
slog.String("db", *dbPath),
|
||||||
|
slog.String("swagger", fmt.Sprintf("http://localhost%s/swagger", *addr)),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
// Run server (handles graceful shutdown)
|
||||||
|
if err := s.Run(); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
fmt.Fprintf(os.Stderr, "error: %v\n", err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
<-ctx.Done()
|
|
||||||
logger.Info("receiver stopped gracefully")
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
665
docs/openapi.json
Normal file
665
docs/openapi.json
Normal file
|
|
@ -0,0 +1,665 @@
|
||||||
|
{
|
||||||
|
"components": {
|
||||||
|
"schemas": {
|
||||||
|
"HTTPError": {
|
||||||
|
"description": "HTTPError schema",
|
||||||
|
"properties": {
|
||||||
|
"detail": {
|
||||||
|
"description": "Human readable error message",
|
||||||
|
"nullable": true,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"errors": {
|
||||||
|
"items": {
|
||||||
|
"nullable": true,
|
||||||
|
"properties": {
|
||||||
|
"more": {
|
||||||
|
"additionalProperties": {
|
||||||
|
"description": "Additional information about the error",
|
||||||
|
"nullable": true
|
||||||
|
},
|
||||||
|
"description": "Additional information about the error",
|
||||||
|
"nullable": true,
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"description": "For example, name of the parameter that caused the error",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"reason": {
|
||||||
|
"description": "Human readable error message",
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"nullable": true,
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"instance": {
|
||||||
|
"nullable": true,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"description": "HTTP status code",
|
||||||
|
"example": 403,
|
||||||
|
"nullable": true,
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"title": {
|
||||||
|
"description": "Short title of the error",
|
||||||
|
"nullable": true,
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"description": "URL of the error type. Can be used to lookup the error in a documentation",
|
||||||
|
"nullable": true,
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"HealthResponse": {
|
||||||
|
"description": "HealthResponse schema",
|
||||||
|
"properties": {
|
||||||
|
"status": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"MetricCreatedResponse": {
|
||||||
|
"description": "MetricCreatedResponse schema",
|
||||||
|
"properties": {
|
||||||
|
"id": {
|
||||||
|
"minimum": 0,
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"MetricResponse": {
|
||||||
|
"description": "MetricResponse schema",
|
||||||
|
"properties": {
|
||||||
|
"id": {
|
||||||
|
"minimum": 0,
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"job": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"organization": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"payload": {},
|
||||||
|
"received_at": {
|
||||||
|
"format": "date-time",
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"run_id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"workflow": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"SizingResponse": {
|
||||||
|
"description": "SizingResponse schema",
|
||||||
|
"properties": {
|
||||||
|
"containers": {
|
||||||
|
"items": {
|
||||||
|
"properties": {
|
||||||
|
"cpu": {
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"request": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"memory": {
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"request": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
},
|
||||||
|
"meta": {
|
||||||
|
"properties": {
|
||||||
|
"buffer_percent": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"cpu_percentile": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"runs_analyzed": {
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"total": {
|
||||||
|
"properties": {
|
||||||
|
"cpu": {
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"request": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"memory": {
|
||||||
|
"properties": {
|
||||||
|
"limit": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"request": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"TokenRequest": {
|
||||||
|
"description": "TokenRequest schema",
|
||||||
|
"properties": {
|
||||||
|
"job": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"organization": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"repository": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"workflow": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"TokenResponse": {
|
||||||
|
"description": "TokenResponse schema",
|
||||||
|
"properties": {
|
||||||
|
"token": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "object"
|
||||||
|
},
|
||||||
|
"unknown-interface": {
|
||||||
|
"description": "unknown-interface schema"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"info": {
|
||||||
|
"contact": {
|
||||||
|
"name": "API Support",
|
||||||
|
"url": "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer"
|
||||||
|
},
|
||||||
|
"description": "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.",
|
||||||
|
"license": {
|
||||||
|
"name": "Apache 2.0",
|
||||||
|
"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
|
||||||
|
},
|
||||||
|
"title": "Forgejo Runner Resource Collector API",
|
||||||
|
"version": "1.0.0"
|
||||||
|
},
|
||||||
|
"openapi": "3.1.0",
|
||||||
|
"paths": {
|
||||||
|
"/api/v1/metrics": {
|
||||||
|
"post": {
|
||||||
|
"description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).ReceiveMetrics`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n\n---\n\n",
|
||||||
|
"operationId": "POST_/api/v1/metrics",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "header",
|
||||||
|
"name": "Accept",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/MetricCreatedResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/MetricCreatedResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "OK"
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Bad Request _(validation or deserialization error)_"
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Internal Server Error _(panics)_"
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"summary": "receive metrics",
|
||||||
|
"tags": [
|
||||||
|
"api/v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}": {
|
||||||
|
"get": {
|
||||||
|
"description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GetMetricsByWorkflowJob`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n",
|
||||||
|
"operationId": "GET_/api/v1/metrics/repo/:org/:repo/:workflow/:job",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "header",
|
||||||
|
"name": "Accept",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "org",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "repo",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "workflow",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "job",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/MetricResponse"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/components/schemas/MetricResponse"
|
||||||
|
},
|
||||||
|
"type": "array"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "OK"
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Bad Request _(validation or deserialization error)_"
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Internal Server Error _(panics)_"
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"summary": "get metrics by workflow job",
|
||||||
|
"tags": [
|
||||||
|
"api/v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/api/v1/sizing/repo/{org}/{repo}/{workflow}/{job}": {
|
||||||
|
"get": {
|
||||||
|
"description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GetSizing`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n",
|
||||||
|
"operationId": "GET_/api/v1/sizing/repo/:org/:repo/:workflow/:job",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "header",
|
||||||
|
"name": "Accept",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "org",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "repo",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "workflow",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"in": "path",
|
||||||
|
"name": "job",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/SizingResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/SizingResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "OK"
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Bad Request _(validation or deserialization error)_"
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Internal Server Error _(panics)_"
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"summary": "get sizing",
|
||||||
|
"tags": [
|
||||||
|
"api/v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/api/v1/token": {
|
||||||
|
"post": {
|
||||||
|
"description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).GenerateToken`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n- `edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).requireReadToken`\n\n---\n\n",
|
||||||
|
"operationId": "POST_/api/v1/token",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "header",
|
||||||
|
"name": "Accept",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"requestBody": {
|
||||||
|
"content": {
|
||||||
|
"*/*": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/TokenRequest"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Request body for receiver.TokenRequest",
|
||||||
|
"required": true
|
||||||
|
},
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/TokenResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/TokenResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "OK"
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Bad Request _(validation or deserialization error)_"
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Internal Server Error _(panics)_"
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"summary": "generate token",
|
||||||
|
"tags": [
|
||||||
|
"api/v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/health": {
|
||||||
|
"get": {
|
||||||
|
"description": "#### Controller: \n\n`edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver.(*Handler).Health`\n\n#### Middlewares:\n\n- `github.com/go-fuego/fuego.defaultLogger.middleware`\n\n---\n\n",
|
||||||
|
"operationId": "GET_/health",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"in": "header",
|
||||||
|
"name": "Accept",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HealthResponse"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HealthResponse"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "OK"
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Bad Request _(validation or deserialization error)_"
|
||||||
|
},
|
||||||
|
"500": {
|
||||||
|
"content": {
|
||||||
|
"application/json": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"application/xml": {
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/components/schemas/HTTPError"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"description": "Internal Server Error _(panics)_"
|
||||||
|
},
|
||||||
|
"default": {
|
||||||
|
"description": ""
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"summary": "health"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
42
go.mod
42
go.mod
|
|
@ -1,15 +1,49 @@
|
||||||
module edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser
|
module edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer
|
||||||
|
|
||||||
go 1.25.6
|
go 1.25.6
|
||||||
|
|
||||||
require (
|
require (
|
||||||
gorm.io/driver/sqlite v1.6.0
|
github.com/getkin/kin-openapi v0.133.0
|
||||||
|
github.com/glebarez/sqlite v1.11.0
|
||||||
|
github.com/go-fuego/fuego v0.19.0
|
||||||
|
github.com/oapi-codegen/runtime v1.1.2
|
||||||
gorm.io/gorm v1.31.1
|
gorm.io/gorm v1.31.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
||||||
|
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11 // indirect
|
||||||
|
github.com/glebarez/go-sqlite v1.21.2 // indirect
|
||||||
|
github.com/go-openapi/jsonpointer v0.22.3 // indirect
|
||||||
|
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
|
||||||
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
|
github.com/go-playground/validator/v10 v10.28.0 // indirect
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0 // indirect
|
||||||
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
|
github.com/gorilla/schema v1.4.1 // indirect
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
github.com/jinzhu/now v1.1.5 // indirect
|
||||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
golang.org/x/text v0.20.0 // indirect
|
github.com/kr/pretty v0.3.1 // indirect
|
||||||
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
|
github.com/mailru/easyjson v0.9.1 // indirect
|
||||||
|
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||||
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
||||||
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 // indirect
|
||||||
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 // indirect
|
||||||
|
github.com/perimeterx/marshmallow v1.1.5 // indirect
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1 // indirect
|
||||||
|
github.com/woodsbury/decimal128 v1.4.0 // indirect
|
||||||
|
golang.org/x/crypto v0.45.0 // indirect
|
||||||
|
golang.org/x/sys v0.39.0 // indirect
|
||||||
|
golang.org/x/text v0.31.0 // indirect
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
|
modernc.org/libc v1.22.5 // indirect
|
||||||
|
modernc.org/mathutil v1.5.0 // indirect
|
||||||
|
modernc.org/memory v1.5.0 // indirect
|
||||||
|
modernc.org/sqlite v1.23.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
||||||
116
go.sum
116
go.sum
|
|
@ -1,12 +1,116 @@
|
||||||
|
github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk=
|
||||||
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
|
||||||
|
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
|
||||||
|
github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||||
|
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11 h1:AQvxbp830wPhHTqc1u7nzoLT+ZFxGY7emj5DR5DYFik=
|
||||||
|
github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||||
|
github.com/getkin/kin-openapi v0.133.0 h1:pJdmNohVIJ97r4AUFtEXRXwESr8b0bD721u/Tz6k8PQ=
|
||||||
|
github.com/getkin/kin-openapi v0.133.0/go.mod h1:boAciF6cXk5FhPqe/NQeBTeenbjqU4LhWBf09ILVvWE=
|
||||||
|
github.com/glebarez/go-sqlite v1.21.2 h1:3a6LFC4sKahUunAmynQKLZceZCOzUthkRkEAl9gAXWo=
|
||||||
|
github.com/glebarez/go-sqlite v1.21.2/go.mod h1:sfxdZyhQjTM2Wry3gVYWaW072Ri1WMdWJi0k6+3382k=
|
||||||
|
github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GMw=
|
||||||
|
github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ=
|
||||||
|
github.com/go-fuego/fuego v0.19.0 h1:kxkkBsrbGZP1YnPCAPIdUpMu53nreqN8N86lfi50CJw=
|
||||||
|
github.com/go-fuego/fuego v0.19.0/go.mod h1:O7CLZbvCCBA9ijhN/q8SnyFTzDdMsqYZjUbR82VDHhA=
|
||||||
|
github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8=
|
||||||
|
github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo=
|
||||||
|
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
|
||||||
|
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
|
||||||
|
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
|
||||||
|
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
|
||||||
|
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||||
|
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||||
|
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||||
|
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||||
|
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||||
|
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||||
|
github.com/go-playground/validator/v10 v10.28.0 h1:Q7ibns33JjyW48gHkuFT91qX48KG0ktULL6FgHdG688=
|
||||||
|
github.com/go-playground/validator/v10 v10.28.0/go.mod h1:GoI6I1SjPBh9p7ykNE/yj3fFYbyDOpwMn5KXd+m2hUU=
|
||||||
|
github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM=
|
||||||
|
github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
|
||||||
|
github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
|
||||||
|
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
|
||||||
|
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
|
||||||
|
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||||
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
|
||||||
|
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
|
||||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE=
|
||||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
gorm.io/driver/sqlite v1.6.0 h1:WHRRrIiulaPiPFmDcod6prc4l2VGVWHz80KspNsxSfQ=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
|
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||||
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
|
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||||
|
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||||
|
github.com/mailru/easyjson v0.9.1 h1:LbtsOm5WAswyWbvTEOqhypdPeZzHavpZx96/n553mR8=
|
||||||
|
github.com/mailru/easyjson v0.9.1/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
|
||||||
|
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||||
|
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||||
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw=
|
||||||
|
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
|
||||||
|
github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI=
|
||||||
|
github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
|
||||||
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037 h1:G7ERwszslrBzRxj//JalHPu/3yz+De2J+4aLtSRlHiY=
|
||||||
|
github.com/oasdiff/yaml v0.0.0-20250309154309-f31be36b4037/go.mod h1:2bpvgLBZEtENV5scfDFEtB/5+1M4hkQhDQrccEJ/qGw=
|
||||||
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90 h1:bQx3WeLcUWy+RletIKwUIt4x3t8n2SxavmoclizMb8c=
|
||||||
|
github.com/oasdiff/yaml3 v0.0.0-20250309153720-d2182401db90/go.mod h1:y5+oSEHCPT/DGrS++Wc/479ERge0zTFxaF8PbGKcg2o=
|
||||||
|
github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s=
|
||||||
|
github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw=
|
||||||
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
|
||||||
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||||
|
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||||
|
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||||
|
github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0=
|
||||||
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
|
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||||
|
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||||
|
github.com/thejerf/slogassert v0.3.4 h1:VoTsXixRbXMrRSSxDjYTiEDCM4VWbsYPW5rB/hX24kM=
|
||||||
|
github.com/thejerf/slogassert v0.3.4/go.mod h1:0zn9ISLVKo1aPMTqcGfG1o6dWwt+Rk574GlUxHD4rs8=
|
||||||
|
github.com/ugorji/go/codec v1.3.0 h1:Qd2W2sQawAfG8XSvzwhBeoGq71zXOC/Q1E9y/wUcsUA=
|
||||||
|
github.com/ugorji/go/codec v1.3.0/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
|
||||||
|
github.com/woodsbury/decimal128 v1.4.0 h1:xJATj7lLu4f2oObouMt2tgGiElE5gO6mSWUjQsBgUlc=
|
||||||
|
github.com/woodsbury/decimal128 v1.4.0/go.mod h1:BP46FUrVjVhdTbKT+XuQh2xfQaGki9LMIRJSFuh6THU=
|
||||||
|
golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q=
|
||||||
|
golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||||
|
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
|
golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
|
||||||
|
golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
|
||||||
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||||
|
modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
|
||||||
|
modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
|
||||||
|
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
|
||||||
|
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
|
||||||
|
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
|
||||||
|
modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
|
||||||
|
modernc.org/sqlite v1.23.1 h1:nrSBg4aRQQwq59JpvGEQ15tNxoO5pX/kUjcRNwSAGQM=
|
||||||
|
modernc.org/sqlite v1.23.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk=
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,9 @@ import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Config holds the collector configuration
|
// Config holds the collector configuration
|
||||||
|
|
|
||||||
|
|
@ -10,8 +10,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/output"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/output"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCollector_EmitsSummaryOnShutdown(t *testing.T) {
|
func TestCollector_EmitsSummaryOnShutdown(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -14,8 +14,10 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/receiver"
|
"github.com/go-fuego/fuego"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver"
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
@ -32,11 +34,18 @@ func setupTestReceiver(t *testing.T) (*receiver.Store, *httptest.Server, func())
|
||||||
t.Fatalf("NewStore() error = %v", err)
|
t.Fatalf("NewStore() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), testReadToken, testHMACKey)
|
handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), testReadToken, testHMACKey, 0)
|
||||||
mux := http.NewServeMux()
|
s := fuego.NewServer(
|
||||||
handler.RegisterRoutes(mux)
|
fuego.WithoutStartupMessages(),
|
||||||
|
fuego.WithEngineOptions(
|
||||||
|
fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{
|
||||||
|
Disabled: true,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
handler.RegisterRoutes(s)
|
||||||
|
|
||||||
server := httptest.NewServer(mux)
|
server := httptest.NewServer(s.Mux)
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
server.Close()
|
server.Close()
|
||||||
|
|
@ -46,9 +55,9 @@ func setupTestReceiver(t *testing.T) (*receiver.Store, *httptest.Server, func())
|
||||||
return store, server, cleanup
|
return store, server, cleanup
|
||||||
}
|
}
|
||||||
|
|
||||||
// generatePushToken generates a scoped push token for an execution context
|
// generatePushToken generates a push token for an execution context
|
||||||
func generatePushToken(exec summary.ExecutionContext) string {
|
func generatePushToken(exec summary.ExecutionContext) string {
|
||||||
return receiver.GenerateScopedToken(testHMACKey, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
return receiver.GenerateToken(testHMACKey, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPushClientToReceiver(t *testing.T) {
|
func TestPushClientToReceiver(t *testing.T) {
|
||||||
|
|
@ -166,8 +175,8 @@ func TestPushClientIntegration(t *testing.T) {
|
||||||
t.Setenv("GITHUB_JOB", "push-job")
|
t.Setenv("GITHUB_JOB", "push-job")
|
||||||
t.Setenv("GITHUB_RUN_ID", "push-run-456")
|
t.Setenv("GITHUB_RUN_ID", "push-run-456")
|
||||||
|
|
||||||
// Generate scoped push token
|
// Generate push token
|
||||||
pushToken := receiver.GenerateScopedToken(testHMACKey, "push-client-org", "push-client-repo", "push-test.yml", "push-job")
|
pushToken := receiver.GenerateToken(testHMACKey, "push-client-org", "push-client-repo", "push-test.yml", "push-job")
|
||||||
|
|
||||||
// Create push client with token - it reads execution context from env vars
|
// Create push client with token - it reads execution context from env vars
|
||||||
pushClient := summary.NewPushClient(server.URL+"/api/v1/metrics", pushToken)
|
pushClient := summary.NewPushClient(server.URL+"/api/v1/metrics", pushToken)
|
||||||
|
|
@ -371,11 +380,18 @@ func setupTestReceiverWithToken(t *testing.T, readToken, hmacKey string) (*recei
|
||||||
t.Fatalf("NewStore() error = %v", err)
|
t.Fatalf("NewStore() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), readToken, hmacKey)
|
handler := receiver.NewHandler(store, slog.New(slog.NewTextHandler(io.Discard, nil)), readToken, hmacKey, 0)
|
||||||
mux := http.NewServeMux()
|
s := fuego.NewServer(
|
||||||
handler.RegisterRoutes(mux)
|
fuego.WithoutStartupMessages(),
|
||||||
|
fuego.WithEngineOptions(
|
||||||
|
fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{
|
||||||
|
Disabled: true,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
handler.RegisterRoutes(s)
|
||||||
|
|
||||||
server := httptest.NewServer(mux)
|
server := httptest.NewServer(s.Mux)
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
server.Close()
|
server.Close()
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/cgroup"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/cgroup"
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/proc"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/proc"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Aggregator collects and aggregates metrics from processes
|
// Aggregator collects and aggregates metrics from processes
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// LogFormat specifies the log output format
|
// LogFormat specifies the log output format
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
package output
|
package output
|
||||||
|
|
||||||
import "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics"
|
import "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics"
|
||||||
|
|
||||||
// Writer defines the interface for outputting metrics
|
// Writer defines the interface for outputting metrics
|
||||||
// This allows for different implementations (logging, HTTP push, etc.)
|
// This allows for different implementations (logging, HTTP push, etc.)
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,18 @@
|
||||||
// ABOUTME: HTTP handlers for the metrics receiver service.
|
// ABOUTME: HTTP handlers for the metrics receiver service using Fuego framework.
|
||||||
// ABOUTME: Provides endpoints for receiving and querying metrics.
|
// ABOUTME: Provides endpoints for receiving and querying metrics with automatic OpenAPI generation.
|
||||||
package receiver
|
package receiver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
"log/slog"
|
"log/slog"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-fuego/fuego"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Handler handles HTTP requests for the metrics receiver
|
// Handler handles HTTP requests for the metrics receiver
|
||||||
|
|
@ -16,136 +21,189 @@ type Handler struct {
|
||||||
logger *slog.Logger
|
logger *slog.Logger
|
||||||
readToken string // Pre-shared token for read endpoint authentication
|
readToken string // Pre-shared token for read endpoint authentication
|
||||||
hmacKey string // Separate key for HMAC-based push token generation/validation
|
hmacKey string // Separate key for HMAC-based push token generation/validation
|
||||||
|
tokenTTL time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewHandler creates a new HTTP handler with the given store.
|
// NewHandler creates a new HTTP handler with the given store.
|
||||||
// readToken authenticates read endpoints and the token generation endpoint.
|
// readToken authenticates read endpoints and the token generation endpoint.
|
||||||
// hmacKey is the secret used to derive scoped push tokens.
|
// hmacKey is the secret used to derive scoped push tokens.
|
||||||
func NewHandler(store *Store, logger *slog.Logger, readToken, hmacKey string) *Handler {
|
// tokenTTL specifies how long push tokens are valid (0 uses DefaultTokenTTL).
|
||||||
return &Handler{store: store, logger: logger, readToken: readToken, hmacKey: hmacKey}
|
func NewHandler(store *Store, logger *slog.Logger, readToken, hmacKey string, tokenTTL time.Duration) *Handler {
|
||||||
|
if tokenTTL == 0 {
|
||||||
|
tokenTTL = DefaultTokenTTL
|
||||||
|
}
|
||||||
|
return &Handler{store: store, logger: logger, readToken: readToken, hmacKey: hmacKey, tokenTTL: tokenTTL}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterRoutes registers all HTTP routes on the given mux
|
// Common errors
|
||||||
func (h *Handler) RegisterRoutes(mux *http.ServeMux) {
|
var (
|
||||||
mux.HandleFunc("POST /api/v1/metrics", h.handleReceiveMetrics)
|
ErrUnauthorized = errors.New("authorization required")
|
||||||
mux.HandleFunc("POST /api/v1/token", h.handleGenerateToken)
|
ErrInvalidToken = errors.New("invalid token")
|
||||||
mux.HandleFunc("GET /api/v1/metrics/repo/{org}/{repo}/{workflow}/{job}", h.handleGetByWorkflowJob)
|
ErrInvalidFormat = errors.New("invalid authorization format")
|
||||||
mux.HandleFunc("GET /health", h.handleHealth)
|
ErrMissingHMACKey = errors.New("token generation requires a configured HMAC key")
|
||||||
|
ErrMissingFields = errors.New("organization, repository, workflow, and job are required")
|
||||||
|
ErrMissingRunID = errors.New("run_id is required")
|
||||||
|
ErrInvalidParams = errors.New("org, repo, workflow and job are required")
|
||||||
|
ErrNoMetrics = errors.New("no metrics found for this workflow/job")
|
||||||
|
ErrInvalidPercent = errors.New("invalid cpu_percentile: must be one of peak, p99, p95, p75, p50, avg")
|
||||||
|
)
|
||||||
|
|
||||||
|
// HealthResponse is the response for the health endpoint
|
||||||
|
type HealthResponse struct {
|
||||||
|
Status string `json:"status"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateReadToken checks the Authorization header for a valid Bearer token.
|
// MetricCreatedResponse is the response when a metric is successfully created
|
||||||
func (h *Handler) validateReadToken(w http.ResponseWriter, r *http.Request) bool {
|
type MetricCreatedResponse struct {
|
||||||
if h.readToken == "" {
|
ID uint `json:"id"`
|
||||||
h.logger.Warn("no read-token configured, rejecting request", slog.String("path", r.URL.Path))
|
Status string `json:"status"`
|
||||||
http.Error(w, "authorization required", http.StatusUnauthorized)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
authHeader := r.Header.Get("Authorization")
|
|
||||||
if authHeader == "" {
|
|
||||||
h.logger.Warn("missing authorization header", slog.String("path", r.URL.Path))
|
|
||||||
http.Error(w, "authorization required", http.StatusUnauthorized)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
const bearerPrefix = "Bearer "
|
|
||||||
if !strings.HasPrefix(authHeader, bearerPrefix) {
|
|
||||||
h.logger.Warn("invalid authorization format", slog.String("path", r.URL.Path))
|
|
||||||
http.Error(w, "invalid authorization format", http.StatusUnauthorized)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
token := strings.TrimPrefix(authHeader, bearerPrefix)
|
|
||||||
if subtle.ConstantTimeCompare([]byte(token), []byte(h.readToken)) != 1 {
|
|
||||||
h.logger.Warn("invalid token", slog.String("path", r.URL.Path))
|
|
||||||
http.Error(w, "invalid token", http.StatusUnauthorized)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handleGenerateToken(w http.ResponseWriter, r *http.Request) {
|
// GetMetricsRequest contains path parameters for getting metrics
|
||||||
if h.hmacKey == "" {
|
type GetMetricsRequest struct {
|
||||||
http.Error(w, "token generation requires a configured HMAC key", http.StatusBadRequest)
|
Org string `path:"org"`
|
||||||
return
|
Repo string `path:"repo"`
|
||||||
}
|
Workflow string `path:"workflow"`
|
||||||
|
Job string `path:"job"`
|
||||||
if !h.validateReadToken(w, r) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var req TokenRequest
|
|
||||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
|
||||||
http.Error(w, "invalid JSON body", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Organization == "" || req.Repository == "" || req.Workflow == "" || req.Job == "" {
|
|
||||||
http.Error(w, "organization, repository, workflow, and job are required", http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
token := GenerateScopedToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job)
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
_ = json.NewEncoder(w).Encode(TokenResponse{Token: token})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// validatePushToken checks push authentication via scoped HMAC token.
|
// GetSizingRequest contains path and query parameters for sizing endpoint
|
||||||
func (h *Handler) validatePushToken(w http.ResponseWriter, r *http.Request, exec ExecutionContext) bool {
|
type GetSizingRequest struct {
|
||||||
|
Org string `path:"org"`
|
||||||
|
Repo string `path:"repo"`
|
||||||
|
Workflow string `path:"workflow"`
|
||||||
|
Job string `path:"job"`
|
||||||
|
Runs int `query:"runs" default:"5" validate:"min=1,max=100" description:"Number of recent runs to analyze"`
|
||||||
|
Buffer int `query:"buffer" default:"20" validate:"min=0,max=100" description:"Buffer percentage to add"`
|
||||||
|
CPUPercentile string `query:"cpu_percentile" default:"p95" description:"CPU percentile to use (peak, p99, p95, p75, p50, avg)"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterRoutes registers all HTTP routes on the Fuego server
|
||||||
|
func (h *Handler) RegisterRoutes(s *fuego.Server) {
|
||||||
|
// Health endpoint (no auth)
|
||||||
|
fuego.Get(s, "/health", h.Health)
|
||||||
|
|
||||||
|
// API group with authentication
|
||||||
|
api := fuego.Group(s, "/api/v1")
|
||||||
|
|
||||||
|
// Token generation (requires read token)
|
||||||
|
fuego.Post(api, "/token", h.GenerateToken, fuego.OptionMiddleware(h.requireReadToken))
|
||||||
|
|
||||||
|
// Metrics endpoints
|
||||||
|
fuego.Post(api, "/metrics", h.ReceiveMetrics) // Uses push token validated in handler
|
||||||
|
fuego.Get(api, "/metrics/repo/{org}/{repo}/{workflow}/{job}", h.GetMetricsByWorkflowJob, fuego.OptionMiddleware(h.requireReadToken))
|
||||||
|
|
||||||
|
// Sizing endpoint
|
||||||
|
fuego.Get(api, "/sizing/repo/{org}/{repo}/{workflow}/{job}", h.GetSizing, fuego.OptionMiddleware(h.requireReadToken))
|
||||||
|
}
|
||||||
|
|
||||||
|
// requireReadToken is middleware that validates the read token
|
||||||
|
func (h *Handler) requireReadToken(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if h.readToken == "" {
|
||||||
|
h.logger.Warn("no read-token configured, rejecting request", slog.String("path", r.URL.Path))
|
||||||
|
http.Error(w, "authorization required", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
authHeader := r.Header.Get("Authorization")
|
||||||
|
if authHeader == "" {
|
||||||
|
h.logger.Warn("missing authorization header", slog.String("path", r.URL.Path))
|
||||||
|
http.Error(w, "authorization required", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const bearerPrefix = "Bearer "
|
||||||
|
if !strings.HasPrefix(authHeader, bearerPrefix) {
|
||||||
|
h.logger.Warn("invalid authorization format", slog.String("path", r.URL.Path))
|
||||||
|
http.Error(w, "invalid authorization format", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
token := strings.TrimPrefix(authHeader, bearerPrefix)
|
||||||
|
if subtle.ConstantTimeCompare([]byte(token), []byte(h.readToken)) != 1 {
|
||||||
|
h.logger.Warn("invalid token", slog.String("path", r.URL.Path))
|
||||||
|
http.Error(w, "invalid token", http.StatusUnauthorized)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// validatePushToken checks push authentication via scoped HMAC token
|
||||||
|
func (h *Handler) validatePushToken(r *http.Request, exec ExecutionContext) error {
|
||||||
if h.hmacKey == "" {
|
if h.hmacKey == "" {
|
||||||
h.logger.Warn("no HMAC key configured, rejecting push", slog.String("path", r.URL.Path))
|
h.logger.Warn("no HMAC key configured, rejecting push", slog.String("path", r.URL.Path))
|
||||||
http.Error(w, "authorization required", http.StatusUnauthorized)
|
return ErrUnauthorized
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authHeader := r.Header.Get("Authorization")
|
authHeader := r.Header.Get("Authorization")
|
||||||
if authHeader == "" {
|
if authHeader == "" {
|
||||||
h.logger.Warn("missing push authorization", slog.String("path", r.URL.Path))
|
h.logger.Warn("missing push authorization", slog.String("path", r.URL.Path))
|
||||||
http.Error(w, "authorization required", http.StatusUnauthorized)
|
return ErrUnauthorized
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const bearerPrefix = "Bearer "
|
const bearerPrefix = "Bearer "
|
||||||
if !strings.HasPrefix(authHeader, bearerPrefix) {
|
if !strings.HasPrefix(authHeader, bearerPrefix) {
|
||||||
h.logger.Warn("invalid push authorization format", slog.String("path", r.URL.Path))
|
h.logger.Warn("invalid push authorization format", slog.String("path", r.URL.Path))
|
||||||
http.Error(w, "invalid authorization format", http.StatusUnauthorized)
|
return ErrInvalidFormat
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
token := strings.TrimPrefix(authHeader, bearerPrefix)
|
token := strings.TrimPrefix(authHeader, bearerPrefix)
|
||||||
if !ValidateScopedToken(h.hmacKey, token, exec.Organization, exec.Repository, exec.Workflow, exec.Job) {
|
if !ValidateToken(h.hmacKey, token, exec.Organization, exec.Repository, exec.Workflow, exec.Job, h.tokenTTL) {
|
||||||
h.logger.Warn("invalid push token", slog.String("path", r.URL.Path))
|
h.logger.Warn("invalid push token", slog.String("path", r.URL.Path))
|
||||||
http.Error(w, "invalid token", http.StatusUnauthorized)
|
return ErrInvalidToken
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handleReceiveMetrics(w http.ResponseWriter, r *http.Request) {
|
// Health returns the service health status
|
||||||
|
func (h *Handler) Health(c fuego.ContextNoBody) (HealthResponse, error) {
|
||||||
|
return HealthResponse{Status: "ok"}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateToken generates a scoped HMAC push token for a workflow/job
|
||||||
|
func (h *Handler) GenerateToken(c fuego.ContextWithBody[TokenRequest]) (TokenResponse, error) {
|
||||||
|
if h.hmacKey == "" {
|
||||||
|
return TokenResponse{}, fuego.BadRequestError{Detail: ErrMissingHMACKey.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := c.Body()
|
||||||
|
if err != nil {
|
||||||
|
return TokenResponse{}, fuego.BadRequestError{Detail: "invalid JSON body"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Organization == "" || req.Repository == "" || req.Workflow == "" || req.Job == "" {
|
||||||
|
return TokenResponse{}, fuego.BadRequestError{Detail: ErrMissingFields.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
token := GenerateToken(h.hmacKey, req.Organization, req.Repository, req.Workflow, req.Job)
|
||||||
|
return TokenResponse{Token: token}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveMetrics receives and stores metrics from a collector
|
||||||
|
func (h *Handler) ReceiveMetrics(c fuego.ContextNoBody) (MetricCreatedResponse, error) {
|
||||||
var payload MetricsPayload
|
var payload MetricsPayload
|
||||||
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
|
if err := json.NewDecoder(c.Request().Body).Decode(&payload); err != nil {
|
||||||
h.logger.Error("failed to decode payload", slog.String("error", err.Error()))
|
h.logger.Error("failed to decode payload", slog.String("error", err.Error()))
|
||||||
http.Error(w, "invalid JSON payload", http.StatusBadRequest)
|
return MetricCreatedResponse{}, fuego.BadRequestError{Detail: "invalid JSON payload"}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if payload.Execution.RunID == "" {
|
if payload.Execution.RunID == "" {
|
||||||
http.Error(w, "run_id is required", http.StatusBadRequest)
|
return MetricCreatedResponse{}, fuego.BadRequestError{Detail: ErrMissingRunID.Error()}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !h.validatePushToken(w, r, payload.Execution) {
|
// Validate push token
|
||||||
return
|
if err := h.validatePushToken(c.Request(), payload.Execution); err != nil {
|
||||||
|
return MetricCreatedResponse{}, fuego.UnauthorizedError{Detail: err.Error()}
|
||||||
}
|
}
|
||||||
|
|
||||||
id, err := h.store.SaveMetric(&payload)
|
id, err := h.store.SaveMetric(&payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("failed to save metric", slog.String("error", err.Error()))
|
h.logger.Error("failed to save metric", slog.String("error", err.Error()))
|
||||||
http.Error(w, "failed to save metric", http.StatusInternalServerError)
|
return MetricCreatedResponse{}, fuego.InternalServerError{Detail: "failed to save metric"}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h.logger.Info("metric saved",
|
h.logger.Info("metric saved",
|
||||||
|
|
@ -154,30 +212,25 @@ func (h *Handler) handleReceiveMetrics(w http.ResponseWriter, r *http.Request) {
|
||||||
slog.String("repository", payload.Execution.Repository),
|
slog.String("repository", payload.Execution.Repository),
|
||||||
)
|
)
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
c.SetStatus(http.StatusCreated)
|
||||||
w.WriteHeader(http.StatusCreated)
|
return MetricCreatedResponse{ID: id, Status: "created"}, nil
|
||||||
_ = json.NewEncoder(w).Encode(map[string]any{"id": id, "status": "created"})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handleGetByWorkflowJob(w http.ResponseWriter, r *http.Request) {
|
// GetMetricsByWorkflowJob retrieves all metrics for a specific workflow/job
|
||||||
if !h.validateReadToken(w, r) {
|
func (h *Handler) GetMetricsByWorkflowJob(c fuego.ContextNoBody) ([]MetricResponse, error) {
|
||||||
return
|
org := c.PathParam("org")
|
||||||
}
|
repo := c.PathParam("repo")
|
||||||
|
workflow := c.PathParam("workflow")
|
||||||
|
job := c.PathParam("job")
|
||||||
|
|
||||||
org := r.PathValue("org")
|
|
||||||
repo := r.PathValue("repo")
|
|
||||||
workflow := r.PathValue("workflow")
|
|
||||||
job := r.PathValue("job")
|
|
||||||
if org == "" || repo == "" || workflow == "" || job == "" {
|
if org == "" || repo == "" || workflow == "" || job == "" {
|
||||||
http.Error(w, "org, repo, workflow and job are required", http.StatusBadRequest)
|
return nil, fuego.BadRequestError{Detail: ErrInvalidParams.Error()}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics, err := h.store.GetMetricsByWorkflowJob(org, repo, workflow, job)
|
metrics, err := h.store.GetMetricsByWorkflowJob(org, repo, workflow, job)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logger.Error("failed to get metrics", slog.String("error", err.Error()))
|
h.logger.Error("failed to get metrics", slog.String("error", err.Error()))
|
||||||
http.Error(w, "failed to get metrics", http.StatusInternalServerError)
|
return nil, fuego.InternalServerError{Detail: "failed to get metrics"}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert to response type with Payload as JSON object
|
// Convert to response type with Payload as JSON object
|
||||||
|
|
@ -186,11 +239,65 @@ func (h *Handler) handleGetByWorkflowJob(w http.ResponseWriter, r *http.Request)
|
||||||
response[i] = m.ToResponse()
|
response[i] = m.ToResponse()
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
return response, nil
|
||||||
_ = json.NewEncoder(w).Encode(response)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *Handler) handleHealth(w http.ResponseWriter, r *http.Request) {
|
// GetSizing computes Kubernetes resource sizing recommendations
|
||||||
w.Header().Set("Content-Type", "application/json")
|
func (h *Handler) GetSizing(c fuego.ContextNoBody) (SizingResponse, error) {
|
||||||
_ = json.NewEncoder(w).Encode(map[string]string{"status": "ok"})
|
org := c.PathParam("org")
|
||||||
|
repo := c.PathParam("repo")
|
||||||
|
workflow := c.PathParam("workflow")
|
||||||
|
job := c.PathParam("job")
|
||||||
|
|
||||||
|
if org == "" || repo == "" || workflow == "" || job == "" {
|
||||||
|
return SizingResponse{}, fuego.BadRequestError{Detail: ErrInvalidParams.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse query parameters with defaults
|
||||||
|
runs := parseIntQueryParamFromContext(c, "runs", 5, 1, 100)
|
||||||
|
buffer := parseIntQueryParamFromContext(c, "buffer", 20, 0, 100)
|
||||||
|
cpuPercentile := c.QueryParam("cpu_percentile")
|
||||||
|
if cpuPercentile == "" {
|
||||||
|
cpuPercentile = "p95"
|
||||||
|
}
|
||||||
|
if !IsValidPercentile(cpuPercentile) {
|
||||||
|
return SizingResponse{}, fuego.BadRequestError{Detail: ErrInvalidPercent.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := h.store.GetRecentMetricsByWorkflowJob(org, repo, workflow, job, runs)
|
||||||
|
if err != nil {
|
||||||
|
h.logger.Error("failed to get metrics", slog.String("error", err.Error()))
|
||||||
|
return SizingResponse{}, fuego.InternalServerError{Detail: "failed to get metrics"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics) == 0 {
|
||||||
|
return SizingResponse{}, fuego.NotFoundError{Detail: ErrNoMetrics.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err := computeSizing(metrics, buffer, cpuPercentile)
|
||||||
|
if err != nil {
|
||||||
|
h.logger.Error("failed to compute sizing", slog.String("error", err.Error()))
|
||||||
|
return SizingResponse{}, fuego.InternalServerError{Detail: "failed to compute sizing"}
|
||||||
|
}
|
||||||
|
|
||||||
|
return *response, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseIntQueryParamFromContext parses an integer query parameter with default, min, and max values
|
||||||
|
func parseIntQueryParamFromContext(c fuego.ContextNoBody, name string, defaultVal, minVal, maxVal int) int {
|
||||||
|
strVal := c.QueryParam(name)
|
||||||
|
if strVal == "" {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
var val int
|
||||||
|
if _, err := fmt.Sscanf(strVal, "%d", &val); err != nil {
|
||||||
|
return defaultVal
|
||||||
|
}
|
||||||
|
if val < minVal {
|
||||||
|
return minVal
|
||||||
|
}
|
||||||
|
if val > maxVal {
|
||||||
|
return maxVal
|
||||||
|
}
|
||||||
|
return val
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,9 +8,12 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
"github.com/go-fuego/fuego"
|
||||||
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHandler_ReceiveMetrics(t *testing.T) {
|
func TestHandler_ReceiveMetrics(t *testing.T) {
|
||||||
|
|
@ -25,7 +28,7 @@ func TestHandler_ReceiveMetrics(t *testing.T) {
|
||||||
Job: "build",
|
Job: "build",
|
||||||
RunID: "run-123",
|
RunID: "run-123",
|
||||||
}
|
}
|
||||||
pushToken := GenerateScopedToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
pushToken := GenerateToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
||||||
|
|
||||||
payload := MetricsPayload{
|
payload := MetricsPayload{
|
||||||
Execution: exec,
|
Execution: exec,
|
||||||
|
|
@ -41,9 +44,8 @@ func TestHandler_ReceiveMetrics(t *testing.T) {
|
||||||
req.Header.Set("Authorization", "Bearer "+pushToken)
|
req.Header.Set("Authorization", "Bearer "+pushToken)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusCreated {
|
if rec.Code != http.StatusCreated {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusCreated)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusCreated)
|
||||||
|
|
@ -68,9 +70,8 @@ func TestHandler_ReceiveMetrics_InvalidJSON(t *testing.T) {
|
||||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader([]byte("not json")))
|
req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader([]byte("not json")))
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
if rec.Code != http.StatusBadRequest {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
||||||
|
|
@ -94,9 +95,8 @@ func TestHandler_ReceiveMetrics_MissingRunID(t *testing.T) {
|
||||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader(body))
|
req := httptest.NewRequest(http.MethodPost, "/api/v1/metrics", bytes.NewReader(body))
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
if rec.Code != http.StatusBadRequest {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
||||||
|
|
@ -124,9 +124,8 @@ func TestHandler_GetByWorkflowJob(t *testing.T) {
|
||||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
|
@ -150,9 +149,8 @@ func TestHandler_GetByWorkflowJob_NotFound(t *testing.T) {
|
||||||
req.Header.Set("Authorization", "Bearer "+readToken)
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
|
@ -179,8 +177,7 @@ func TestHandler_GetByWorkflowJob_WithToken(t *testing.T) {
|
||||||
t.Fatalf("SaveMetric() error = %v", err)
|
t.Fatalf("SaveMetric() error = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
@ -200,7 +197,7 @@ func TestHandler_GetByWorkflowJob_WithToken(t *testing.T) {
|
||||||
req.Header.Set("Authorization", tt.authHeader)
|
req.Header.Set("Authorization", tt.authHeader)
|
||||||
}
|
}
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
mux.ServeHTTP(rec, req)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
if rec.Code != tt.wantCode {
|
if rec.Code != tt.wantCode {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
||||||
|
|
@ -216,9 +213,8 @@ func TestHandler_Health(t *testing.T) {
|
||||||
req := httptest.NewRequest(http.MethodGet, "/health", nil)
|
req := httptest.NewRequest(http.MethodGet, "/health", nil)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
|
@ -249,9 +245,8 @@ func TestHandler_GenerateToken(t *testing.T) {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusOK {
|
if rec.Code != http.StatusOK {
|
||||||
t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK)
|
t.Fatalf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
|
@ -264,8 +259,13 @@ func TestHandler_GenerateToken(t *testing.T) {
|
||||||
if resp.Token == "" {
|
if resp.Token == "" {
|
||||||
t.Error("expected non-empty token")
|
t.Error("expected non-empty token")
|
||||||
}
|
}
|
||||||
if len(resp.Token) != 64 {
|
// Token format is "timestamp:hmac" where hmac is 64 hex chars
|
||||||
t.Errorf("token length = %d, want 64", len(resp.Token))
|
parts := strings.SplitN(resp.Token, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
t.Errorf("token should have format 'timestamp:hmac', got %q", resp.Token)
|
||||||
|
}
|
||||||
|
if len(parts[1]) != 64 {
|
||||||
|
t.Errorf("HMAC part length = %d, want 64", len(parts[1]))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -283,9 +283,8 @@ func TestHandler_GenerateToken_NoAuth(t *testing.T) {
|
||||||
req := httptest.NewRequest(http.MethodPost, "/api/v1/token", bytes.NewReader(body))
|
req := httptest.NewRequest(http.MethodPost, "/api/v1/token", bytes.NewReader(body))
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusUnauthorized {
|
if rec.Code != http.StatusUnauthorized {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
||||||
|
|
@ -308,9 +307,8 @@ func TestHandler_GenerateToken_MissingFields(t *testing.T) {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
if rec.Code != http.StatusBadRequest {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
||||||
|
|
@ -332,12 +330,12 @@ func TestHandler_GenerateToken_NoReadToken(t *testing.T) {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusBadRequest {
|
// With no read token, the middleware rejects before we reach the handler
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
if rec.Code != http.StatusUnauthorized {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -346,8 +344,7 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) {
|
||||||
h, cleanup := newTestHandlerWithToken(t, readToken)
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
|
||||||
|
|
||||||
exec := ExecutionContext{
|
exec := ExecutionContext{
|
||||||
Organization: "org",
|
Organization: "org",
|
||||||
|
|
@ -357,8 +354,8 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) {
|
||||||
RunID: "run-1",
|
RunID: "run-1",
|
||||||
}
|
}
|
||||||
|
|
||||||
validToken := GenerateScopedToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
validToken := GenerateToken(readToken, exec.Organization, exec.Repository, exec.Workflow, exec.Job)
|
||||||
wrongScopeToken := GenerateScopedToken(readToken, "other-org", "repo", "ci.yml", "build")
|
wrongScopeToken := GenerateToken(readToken, "other-org", "repo", "ci.yml", "build")
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
@ -385,7 +382,7 @@ func TestHandler_ReceiveMetrics_WithPushToken(t *testing.T) {
|
||||||
req.Header.Set("Authorization", tt.authHeader)
|
req.Header.Set("Authorization", tt.authHeader)
|
||||||
}
|
}
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
mux.ServeHTTP(rec, req)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
if rec.Code != tt.wantCode {
|
if rec.Code != tt.wantCode {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
||||||
|
|
@ -414,9 +411,8 @@ func TestHandler_ReceiveMetrics_RejectsWhenNoReadToken(t *testing.T) {
|
||||||
req.Header.Set("Content-Type", "application/json")
|
req.Header.Set("Content-Type", "application/json")
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusUnauthorized {
|
if rec.Code != http.StatusUnauthorized {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
||||||
|
|
@ -430,15 +426,27 @@ func TestHandler_GetByWorkflowJob_RejectsWhenNoReadToken(t *testing.T) {
|
||||||
req := httptest.NewRequest(http.MethodGet, "/api/v1/metrics/repo/org/repo/ci.yml/build", nil)
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/metrics/repo/org/repo/ci.yml/build", nil)
|
||||||
rec := httptest.NewRecorder()
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
mux := http.NewServeMux()
|
s := newTestServer(h)
|
||||||
h.RegisterRoutes(mux)
|
s.Mux.ServeHTTP(rec, req)
|
||||||
mux.ServeHTTP(rec, req)
|
|
||||||
|
|
||||||
if rec.Code != http.StatusUnauthorized {
|
if rec.Code != http.StatusUnauthorized {
|
||||||
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusUnauthorized)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newTestServer(h *Handler) *fuego.Server {
|
||||||
|
s := fuego.NewServer(
|
||||||
|
fuego.WithoutStartupMessages(),
|
||||||
|
fuego.WithEngineOptions(
|
||||||
|
fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{
|
||||||
|
Disabled: true,
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
h.RegisterRoutes(s)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
func newTestHandler(t *testing.T) (*Handler, func()) {
|
func newTestHandler(t *testing.T) (*Handler, func()) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
dbPath := filepath.Join(t.TempDir(), "test.db")
|
dbPath := filepath.Join(t.TempDir(), "test.db")
|
||||||
|
|
@ -448,7 +456,7 @@ func newTestHandler(t *testing.T) (*Handler, func()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
handler := NewHandler(store, logger, "", "") // no auth — endpoints will reject
|
handler := NewHandler(store, logger, "", "", 0) // no auth — endpoints will reject
|
||||||
|
|
||||||
return handler, func() { _ = store.Close() }
|
return handler, func() { _ = store.Close() }
|
||||||
}
|
}
|
||||||
|
|
@ -467,7 +475,7 @@ func newTestHandlerWithKeys(t *testing.T, readToken, hmacKey string) (*Handler,
|
||||||
}
|
}
|
||||||
|
|
||||||
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
handler := NewHandler(store, logger, readToken, hmacKey)
|
handler := NewHandler(store, logger, readToken, hmacKey, 0) // 0 uses DefaultTokenTTL
|
||||||
|
|
||||||
return handler, func() { _ = store.Close() }
|
return handler, func() { _ = store.Close() }
|
||||||
}
|
}
|
||||||
|
|
|
||||||
221
internal/receiver/sizing.go
Normal file
221
internal/receiver/sizing.go
Normal file
|
|
@ -0,0 +1,221 @@
|
||||||
|
// ABOUTME: Computes ideal container sizes from historical run data.
|
||||||
|
// ABOUTME: Provides Kubernetes-style resource sizes.
|
||||||
|
package receiver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ResourceSize holds Kubernetes-formatted resource values
|
||||||
|
type ResourceSize struct {
|
||||||
|
Request string `json:"request"`
|
||||||
|
Limit string `json:"limit"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerSizing holds computed sizing for a single container
|
||||||
|
type ContainerSizing struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
CPU ResourceSize `json:"cpu"`
|
||||||
|
Memory ResourceSize `json:"memory"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizingMeta provides context about the sizing calculation
|
||||||
|
type SizingMeta struct {
|
||||||
|
RunsAnalyzed int `json:"runs_analyzed"`
|
||||||
|
BufferPercent int `json:"buffer_percent"`
|
||||||
|
CPUPercentile string `json:"cpu_percentile"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SizingResponse is the API response for the sizing endpoint
|
||||||
|
type SizingResponse struct {
|
||||||
|
Containers []ContainerSizing `json:"containers"`
|
||||||
|
Total struct {
|
||||||
|
CPU ResourceSize `json:"cpu"`
|
||||||
|
Memory ResourceSize `json:"memory"`
|
||||||
|
} `json:"total"`
|
||||||
|
Meta SizingMeta `json:"meta"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// validPercentiles lists the allowed percentile values
|
||||||
|
var validPercentiles = map[string]bool{
|
||||||
|
"peak": true,
|
||||||
|
"p99": true,
|
||||||
|
"p95": true,
|
||||||
|
"p75": true,
|
||||||
|
"p50": true,
|
||||||
|
"avg": true,
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidPercentile checks if the given percentile string is valid
|
||||||
|
func IsValidPercentile(p string) bool {
|
||||||
|
return validPercentiles[p]
|
||||||
|
}
|
||||||
|
|
||||||
|
// selectCPUValue extracts the appropriate value from StatSummary based on percentile
|
||||||
|
func selectCPUValue(stats summary.StatSummary, percentile string) float64 {
|
||||||
|
switch percentile {
|
||||||
|
case "peak":
|
||||||
|
return stats.Peak
|
||||||
|
case "p99":
|
||||||
|
return stats.P99
|
||||||
|
case "p95":
|
||||||
|
return stats.P95
|
||||||
|
case "p75":
|
||||||
|
return stats.P75
|
||||||
|
case "p50":
|
||||||
|
return stats.P50
|
||||||
|
case "avg":
|
||||||
|
return stats.Avg
|
||||||
|
default:
|
||||||
|
return stats.P95 // default to p95
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatMemoryK8s converts bytes to Kubernetes memory format (Mi)
|
||||||
|
func formatMemoryK8s(bytes float64) string {
|
||||||
|
const Mi = 1024 * 1024
|
||||||
|
return fmt.Sprintf("%.0fMi", math.Ceil(bytes/Mi))
|
||||||
|
}
|
||||||
|
|
||||||
|
// formatCPUK8s converts cores to Kubernetes CPU format (millicores or whole cores)
|
||||||
|
func formatCPUK8s(cores float64) string {
|
||||||
|
millicores := cores * 1000
|
||||||
|
if millicores >= 1000 && math.Mod(millicores, 1000) == 0 {
|
||||||
|
return fmt.Sprintf("%.0f", cores)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.0fm", math.Ceil(millicores))
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundUpMemoryLimit rounds bytes up to the next power of 2 in Mi
|
||||||
|
func roundUpMemoryLimit(bytes float64) float64 {
|
||||||
|
const Mi = 1024 * 1024
|
||||||
|
if bytes <= 0 {
|
||||||
|
return Mi // minimum 1Mi
|
||||||
|
}
|
||||||
|
miValue := bytes / Mi
|
||||||
|
if miValue <= 1 {
|
||||||
|
return Mi // minimum 1Mi
|
||||||
|
}
|
||||||
|
// Find next power of 2
|
||||||
|
power := math.Ceil(math.Log2(miValue))
|
||||||
|
return math.Pow(2, power) * Mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// roundUpCPULimit rounds cores up to the next 0.5 increment
|
||||||
|
func roundUpCPULimit(cores float64) float64 {
|
||||||
|
if cores <= 0 {
|
||||||
|
return 0.5 // minimum 0.5 cores
|
||||||
|
}
|
||||||
|
return math.Ceil(cores*2) / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// containerAggregation holds accumulated stats for a single container across runs
|
||||||
|
type containerAggregation struct {
|
||||||
|
cpuValues []float64
|
||||||
|
memoryPeaks []float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeSizing calculates ideal container sizes from metrics
|
||||||
|
func computeSizing(metrics []Metric, bufferPercent int, cpuPercentile string) (*SizingResponse, error) {
|
||||||
|
if len(metrics) == 0 {
|
||||||
|
return nil, fmt.Errorf("no metrics provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate container stats across all runs
|
||||||
|
containerStats := make(map[string]*containerAggregation)
|
||||||
|
|
||||||
|
for _, m := range metrics {
|
||||||
|
var runSummary summary.RunSummary
|
||||||
|
if err := json.Unmarshal([]byte(m.Payload), &runSummary); err != nil {
|
||||||
|
continue // skip invalid payloads
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range runSummary.Containers {
|
||||||
|
if _, exists := containerStats[c.Name]; !exists {
|
||||||
|
containerStats[c.Name] = &containerAggregation{
|
||||||
|
cpuValues: make([]float64, 0),
|
||||||
|
memoryPeaks: make([]float64, 0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
agg := containerStats[c.Name]
|
||||||
|
agg.cpuValues = append(agg.cpuValues, selectCPUValue(c.CPUCores, cpuPercentile))
|
||||||
|
agg.memoryPeaks = append(agg.memoryPeaks, c.MemoryBytes.Peak)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate sizing for each container
|
||||||
|
bufferMultiplier := 1.0 + float64(bufferPercent)/100.0
|
||||||
|
var containers []ContainerSizing
|
||||||
|
var totalCPU, totalMemory float64
|
||||||
|
|
||||||
|
// Sort container names for consistent output
|
||||||
|
names := make([]string, 0, len(containerStats))
|
||||||
|
for name := range containerStats {
|
||||||
|
names = append(names, name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
agg := containerStats[name]
|
||||||
|
|
||||||
|
// CPU: max of selected percentile values across runs
|
||||||
|
maxCPU := 0.0
|
||||||
|
for _, v := range agg.cpuValues {
|
||||||
|
if v > maxCPU {
|
||||||
|
maxCPU = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory: peak of peaks
|
||||||
|
maxMemory := 0.0
|
||||||
|
for _, v := range agg.memoryPeaks {
|
||||||
|
if v > maxMemory {
|
||||||
|
maxMemory = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply buffer
|
||||||
|
cpuWithBuffer := maxCPU * bufferMultiplier
|
||||||
|
memoryWithBuffer := maxMemory * bufferMultiplier
|
||||||
|
|
||||||
|
containers = append(containers, ContainerSizing{
|
||||||
|
Name: name,
|
||||||
|
CPU: ResourceSize{
|
||||||
|
Request: formatCPUK8s(cpuWithBuffer),
|
||||||
|
Limit: formatCPUK8s(roundUpCPULimit(cpuWithBuffer)),
|
||||||
|
},
|
||||||
|
Memory: ResourceSize{
|
||||||
|
Request: formatMemoryK8s(memoryWithBuffer),
|
||||||
|
Limit: formatMemoryK8s(roundUpMemoryLimit(memoryWithBuffer)),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
totalCPU += cpuWithBuffer
|
||||||
|
totalMemory += memoryWithBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
response := &SizingResponse{
|
||||||
|
Containers: containers,
|
||||||
|
Meta: SizingMeta{
|
||||||
|
RunsAnalyzed: len(metrics),
|
||||||
|
BufferPercent: bufferPercent,
|
||||||
|
CPUPercentile: cpuPercentile,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Total.CPU = ResourceSize{
|
||||||
|
Request: formatCPUK8s(totalCPU),
|
||||||
|
Limit: formatCPUK8s(roundUpCPULimit(totalCPU)),
|
||||||
|
}
|
||||||
|
response.Total.Memory = ResourceSize{
|
||||||
|
Request: formatMemoryK8s(totalMemory),
|
||||||
|
Limit: formatMemoryK8s(roundUpMemoryLimit(totalMemory)),
|
||||||
|
}
|
||||||
|
|
||||||
|
return response, nil
|
||||||
|
}
|
||||||
489
internal/receiver/sizing_test.go
Normal file
489
internal/receiver/sizing_test.go
Normal file
|
|
@ -0,0 +1,489 @@
|
||||||
|
package receiver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFormatMemoryK8s(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
bytes float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0Mi"},
|
||||||
|
{1024 * 1024, "1Mi"},
|
||||||
|
{256 * 1024 * 1024, "256Mi"},
|
||||||
|
{512 * 1024 * 1024, "512Mi"},
|
||||||
|
{1024 * 1024 * 1024, "1024Mi"},
|
||||||
|
{2 * 1024 * 1024 * 1024, "2048Mi"},
|
||||||
|
{1.5 * 1024 * 1024 * 1024, "1536Mi"},
|
||||||
|
{100 * 1024 * 1024, "100Mi"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := formatMemoryK8s(tt.bytes)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("formatMemoryK8s(%v) = %q, want %q", tt.bytes, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFormatCPUK8s(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
cores float64
|
||||||
|
want string
|
||||||
|
}{
|
||||||
|
{0, "0m"},
|
||||||
|
{0.1, "100m"},
|
||||||
|
{0.5, "500m"},
|
||||||
|
{1.0, "1"},
|
||||||
|
{1.5, "1500m"},
|
||||||
|
{2.0, "2"},
|
||||||
|
{2.5, "2500m"},
|
||||||
|
{0.123, "123m"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := formatCPUK8s(tt.cores)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("formatCPUK8s(%v) = %q, want %q", tt.cores, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundUpMemoryLimit(t *testing.T) {
|
||||||
|
Mi := float64(1024 * 1024)
|
||||||
|
tests := []struct {
|
||||||
|
bytes float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{0, Mi}, // minimum 1Mi
|
||||||
|
{100, Mi}, // rounds up to 1Mi
|
||||||
|
{Mi, Mi}, // exactly 1Mi stays 1Mi
|
||||||
|
{1.5 * Mi, 2 * Mi},
|
||||||
|
{200 * Mi, 256 * Mi},
|
||||||
|
{300 * Mi, 512 * Mi},
|
||||||
|
{600 * Mi, 1024 * Mi},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := roundUpMemoryLimit(tt.bytes)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("roundUpMemoryLimit(%v) = %v, want %v", tt.bytes, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRoundUpCPULimit(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
cores float64
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{0, 0.5}, // minimum 0.5
|
||||||
|
{0.1, 0.5},
|
||||||
|
{0.5, 0.5},
|
||||||
|
{0.6, 1.0},
|
||||||
|
{1.0, 1.0},
|
||||||
|
{1.1, 1.5},
|
||||||
|
{1.5, 1.5},
|
||||||
|
{2.0, 2.0},
|
||||||
|
{2.3, 2.5},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := roundUpCPULimit(tt.cores)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("roundUpCPULimit(%v) = %v, want %v", tt.cores, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSelectCPUValue(t *testing.T) {
|
||||||
|
stats := summary.StatSummary{
|
||||||
|
Peak: 10.0,
|
||||||
|
P99: 9.0,
|
||||||
|
P95: 8.0,
|
||||||
|
P75: 6.0,
|
||||||
|
P50: 5.0,
|
||||||
|
Avg: 4.0,
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
percentile string
|
||||||
|
want float64
|
||||||
|
}{
|
||||||
|
{"peak", 10.0},
|
||||||
|
{"p99", 9.0},
|
||||||
|
{"p95", 8.0},
|
||||||
|
{"p75", 6.0},
|
||||||
|
{"p50", 5.0},
|
||||||
|
{"avg", 4.0},
|
||||||
|
{"invalid", 8.0}, // defaults to p95
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
got := selectCPUValue(stats, tt.percentile)
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("selectCPUValue(stats, %q) = %v, want %v", tt.percentile, got, tt.want)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsValidPercentile(t *testing.T) {
|
||||||
|
valid := []string{"peak", "p99", "p95", "p75", "p50", "avg"}
|
||||||
|
for _, p := range valid {
|
||||||
|
if !IsValidPercentile(p) {
|
||||||
|
t.Errorf("IsValidPercentile(%q) = false, want true", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
invalid := []string{"p80", "p90", "max", ""}
|
||||||
|
for _, p := range invalid {
|
||||||
|
if IsValidPercentile(p) {
|
||||||
|
t.Errorf("IsValidPercentile(%q) = true, want false", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeSizing_SingleRun(t *testing.T) {
|
||||||
|
runSummary := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024}, // 512Mi
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, _ := json.Marshal(runSummary)
|
||||||
|
metrics := []Metric{{Payload: string(payload)}}
|
||||||
|
|
||||||
|
resp, err := computeSizing(metrics, 20, "p95")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("computeSizing() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Containers) != 1 {
|
||||||
|
t.Fatalf("got %d containers, want 1", len(resp.Containers))
|
||||||
|
}
|
||||||
|
|
||||||
|
c := resp.Containers[0]
|
||||||
|
if c.Name != "runner" {
|
||||||
|
t.Errorf("container name = %q, want %q", c.Name, "runner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU: 0.8 * 1.2 = 0.96 -> 960m request, 1 limit
|
||||||
|
if c.CPU.Request != "960m" {
|
||||||
|
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "960m")
|
||||||
|
}
|
||||||
|
if c.CPU.Limit != "1" {
|
||||||
|
t.Errorf("CPU limit = %q, want %q", c.CPU.Limit, "1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory: 512Mi * 1.2 = 614.4Mi -> 615Mi request, 1024Mi limit
|
||||||
|
if c.Memory.Request != "615Mi" {
|
||||||
|
t.Errorf("Memory request = %q, want %q", c.Memory.Request, "615Mi")
|
||||||
|
}
|
||||||
|
if c.Memory.Limit != "1024Mi" {
|
||||||
|
t.Errorf("Memory limit = %q, want %q", c.Memory.Limit, "1024Mi")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Meta.RunsAnalyzed != 1 {
|
||||||
|
t.Errorf("runs_analyzed = %d, want 1", resp.Meta.RunsAnalyzed)
|
||||||
|
}
|
||||||
|
if resp.Meta.BufferPercent != 20 {
|
||||||
|
t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent)
|
||||||
|
}
|
||||||
|
if resp.Meta.CPUPercentile != "p95" {
|
||||||
|
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeSizing_MultipleRuns(t *testing.T) {
|
||||||
|
// Run 1: lower values
|
||||||
|
run1 := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{Peak: 0.5, P95: 0.4},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Run 2: higher values (should be used)
|
||||||
|
run2 := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{Peak: 1.0, P95: 0.8},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
payload1, _ := json.Marshal(run1)
|
||||||
|
payload2, _ := json.Marshal(run2)
|
||||||
|
metrics := []Metric{
|
||||||
|
{Payload: string(payload1)},
|
||||||
|
{Payload: string(payload2)},
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := computeSizing(metrics, 0, "p95") // no buffer for easier math
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("computeSizing() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
c := resp.Containers[0]
|
||||||
|
|
||||||
|
// CPU: max(0.4, 0.8) = 0.8
|
||||||
|
if c.CPU.Request != "800m" {
|
||||||
|
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "800m")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Memory: max(256, 512) = 512Mi
|
||||||
|
if c.Memory.Request != "512Mi" {
|
||||||
|
t.Errorf("Memory request = %q, want %q", c.Memory.Request, "512Mi")
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Meta.RunsAnalyzed != 2 {
|
||||||
|
t.Errorf("runs_analyzed = %d, want 2", resp.Meta.RunsAnalyzed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeSizing_MultipleContainers(t *testing.T) {
|
||||||
|
runSummary := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{P95: 1.0},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "dind",
|
||||||
|
CPUCores: summary.StatSummary{P95: 0.5},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 256 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
payload, _ := json.Marshal(runSummary)
|
||||||
|
metrics := []Metric{{Payload: string(payload)}}
|
||||||
|
|
||||||
|
resp, err := computeSizing(metrics, 0, "p95")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("computeSizing() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Containers) != 2 {
|
||||||
|
t.Fatalf("got %d containers, want 2", len(resp.Containers))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Containers should be sorted alphabetically
|
||||||
|
if resp.Containers[0].Name != "dind" {
|
||||||
|
t.Errorf("first container = %q, want %q", resp.Containers[0].Name, "dind")
|
||||||
|
}
|
||||||
|
if resp.Containers[1].Name != "runner" {
|
||||||
|
t.Errorf("second container = %q, want %q", resp.Containers[1].Name, "runner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Total should be sum
|
||||||
|
if resp.Total.CPU.Request != "1500m" {
|
||||||
|
t.Errorf("total CPU request = %q, want %q", resp.Total.CPU.Request, "1500m")
|
||||||
|
}
|
||||||
|
if resp.Total.Memory.Request != "768Mi" {
|
||||||
|
t.Errorf("total memory request = %q, want %q", resp.Total.Memory.Request, "768Mi")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComputeSizing_NoMetrics(t *testing.T) {
|
||||||
|
_, err := computeSizing([]Metric{}, 20, "p95")
|
||||||
|
if err == nil {
|
||||||
|
t.Error("computeSizing() with no metrics should return error")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler_GetSizing(t *testing.T) {
|
||||||
|
const readToken = "test-token"
|
||||||
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Save metrics with container data
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
runSummary := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
payload := &MetricsPayload{
|
||||||
|
Execution: ExecutionContext{
|
||||||
|
Organization: "org",
|
||||||
|
Repository: "repo",
|
||||||
|
Workflow: "ci.yml",
|
||||||
|
Job: "build",
|
||||||
|
RunID: "run-" + string(rune('1'+i)),
|
||||||
|
},
|
||||||
|
Summary: runSummary,
|
||||||
|
}
|
||||||
|
if _, err := h.store.SaveMetric(payload); err != nil {
|
||||||
|
t.Fatalf("SaveMetric() error = %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
|
s := newTestServer(h)
|
||||||
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusOK {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp SizingResponse
|
||||||
|
if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Containers) != 1 {
|
||||||
|
t.Errorf("got %d containers, want 1", len(resp.Containers))
|
||||||
|
}
|
||||||
|
if resp.Meta.RunsAnalyzed != 3 {
|
||||||
|
t.Errorf("runs_analyzed = %d, want 3", resp.Meta.RunsAnalyzed)
|
||||||
|
}
|
||||||
|
if resp.Meta.BufferPercent != 20 {
|
||||||
|
t.Errorf("buffer_percent = %d, want 20", resp.Meta.BufferPercent)
|
||||||
|
}
|
||||||
|
if resp.Meta.CPUPercentile != "p95" {
|
||||||
|
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p95")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler_GetSizing_CustomParams(t *testing.T) {
|
||||||
|
const readToken = "test-token"
|
||||||
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// Save one metric
|
||||||
|
runSummary := summary.RunSummary{
|
||||||
|
Containers: []summary.ContainerSummary{
|
||||||
|
{
|
||||||
|
Name: "runner",
|
||||||
|
CPUCores: summary.StatSummary{Peak: 1.0, P99: 0.9, P95: 0.8, P75: 0.6, P50: 0.5, Avg: 0.4},
|
||||||
|
MemoryBytes: summary.StatSummary{Peak: 512 * 1024 * 1024},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
payload := &MetricsPayload{
|
||||||
|
Execution: ExecutionContext{Organization: "org", Repository: "repo", Workflow: "ci.yml", Job: "build", RunID: "run-1"},
|
||||||
|
Summary: runSummary,
|
||||||
|
}
|
||||||
|
if _, err := h.store.SaveMetric(payload); err != nil {
|
||||||
|
t.Fatalf("SaveMetric() error = %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?runs=10&buffer=10&cpu_percentile=p75", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
|
s := newTestServer(h)
|
||||||
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusOK {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp SizingResponse
|
||||||
|
if err := json.NewDecoder(rec.Body).Decode(&resp); err != nil {
|
||||||
|
t.Fatalf("failed to decode response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.Meta.BufferPercent != 10 {
|
||||||
|
t.Errorf("buffer_percent = %d, want 10", resp.Meta.BufferPercent)
|
||||||
|
}
|
||||||
|
if resp.Meta.CPUPercentile != "p75" {
|
||||||
|
t.Errorf("cpu_percentile = %q, want %q", resp.Meta.CPUPercentile, "p75")
|
||||||
|
}
|
||||||
|
|
||||||
|
// CPU: 0.6 * 1.1 = 0.66
|
||||||
|
c := resp.Containers[0]
|
||||||
|
if c.CPU.Request != "660m" {
|
||||||
|
t.Errorf("CPU request = %q, want %q", c.CPU.Request, "660m")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler_GetSizing_NotFound(t *testing.T) {
|
||||||
|
const readToken = "test-token"
|
||||||
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
|
s := newTestServer(h)
|
||||||
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusNotFound {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusNotFound)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler_GetSizing_InvalidPercentile(t *testing.T) {
|
||||||
|
const readToken = "test-token"
|
||||||
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build?cpu_percentile=p80", nil)
|
||||||
|
req.Header.Set("Authorization", "Bearer "+readToken)
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
|
||||||
|
s := newTestServer(h)
|
||||||
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != http.StatusBadRequest {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, http.StatusBadRequest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHandler_GetSizing_AuthRequired(t *testing.T) {
|
||||||
|
const readToken = "test-token"
|
||||||
|
h, cleanup := newTestHandlerWithToken(t, readToken)
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
authHeader string
|
||||||
|
wantCode int
|
||||||
|
}{
|
||||||
|
{"no auth", "", http.StatusUnauthorized},
|
||||||
|
{"wrong token", "Bearer wrong-token", http.StatusUnauthorized},
|
||||||
|
{"valid token", "Bearer " + readToken, http.StatusNotFound}, // no metrics, but auth works
|
||||||
|
}
|
||||||
|
|
||||||
|
s := newTestServer(h)
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
req := httptest.NewRequest(http.MethodGet, "/api/v1/sizing/repo/org/repo/ci.yml/build", nil)
|
||||||
|
if tt.authHeader != "" {
|
||||||
|
req.Header.Set("Authorization", tt.authHeader)
|
||||||
|
}
|
||||||
|
rec := httptest.NewRecorder()
|
||||||
|
s.Mux.ServeHTTP(rec, req)
|
||||||
|
|
||||||
|
if rec.Code != tt.wantCode {
|
||||||
|
t.Errorf("status = %d, want %d", rec.Code, tt.wantCode)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"gorm.io/driver/sqlite"
|
"github.com/glebarez/sqlite"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
"gorm.io/gorm/logger"
|
"gorm.io/gorm/logger"
|
||||||
)
|
)
|
||||||
|
|
@ -103,6 +103,16 @@ func (s *Store) GetMetricsByWorkflowJob(org, repo, workflow, job string) ([]Metr
|
||||||
return metrics, result.Error
|
return metrics, result.Error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetRecentMetricsByWorkflowJob retrieves the last N metrics ordered by received_at DESC
|
||||||
|
func (s *Store) GetRecentMetricsByWorkflowJob(org, repo, workflow, job string, limit int) ([]Metric, error) {
|
||||||
|
var metrics []Metric
|
||||||
|
result := s.db.Where(
|
||||||
|
"organization = ? AND repository = ? AND workflow = ? AND job = ?",
|
||||||
|
org, repo, workflow, job,
|
||||||
|
).Order("received_at DESC").Limit(limit).Find(&metrics)
|
||||||
|
return metrics, result.Error
|
||||||
|
}
|
||||||
|
|
||||||
// Close closes the database connection
|
// Close closes the database connection
|
||||||
func (s *Store) Close() error {
|
func (s *Store) Close() error {
|
||||||
sqlDB, err := s.db.DB()
|
sqlDB, err := s.db.DB()
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewStore(t *testing.T) {
|
func TestNewStore(t *testing.T) {
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
// ABOUTME: HMAC-SHA256 token generation and validation for scoped push authentication.
|
// ABOUTME: HMAC-SHA256 token generation and validation for scoped push authentication.
|
||||||
// ABOUTME: Tokens are derived from a key + scope, enabling stateless validation without DB storage.
|
// ABOUTME: Tokens are derived from a key + scope + timestamp, enabling stateless validation with expiration.
|
||||||
package receiver
|
package receiver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
@ -7,19 +7,71 @@ import (
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/subtle"
|
"crypto/subtle"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GenerateScopedToken computes an HMAC-SHA256 token scoped to a specific org/repo/workflow/job.
|
// DefaultTokenTTL is the default time-to-live for push tokens.
|
||||||
// The canonical input is "v1\x00<org>\x00<repo>\x00<workflow>\x00<job>".
|
const DefaultTokenTTL = 2 * time.Hour
|
||||||
func GenerateScopedToken(key, org, repo, workflow, job string) string {
|
|
||||||
mac := hmac.New(sha256.New, []byte(key))
|
// GenerateToken creates a token with embedded timestamp for expiration support.
|
||||||
mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job))
|
// Format: "<unix_timestamp>:<hmac_hex>"
|
||||||
return hex.EncodeToString(mac.Sum(nil))
|
func GenerateToken(key, org, repo, workflow, job string) string {
|
||||||
|
return GenerateTokenAt(key, org, repo, workflow, job, time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateScopedToken checks whether a token matches the expected HMAC for the given scope.
|
// GenerateTokenAt creates a token with the specified timestamp.
|
||||||
// Uses constant-time comparison to prevent timing attacks.
|
// The HMAC input is "v1\x00<org>\x00<repo>\x00<workflow>\x00<job>\x00<timestamp>".
|
||||||
func ValidateScopedToken(key, token, org, repo, workflow, job string) bool {
|
func GenerateTokenAt(key, org, repo, workflow, job string, timestamp time.Time) string {
|
||||||
expected := GenerateScopedToken(key, org, repo, workflow, job)
|
ts := strconv.FormatInt(timestamp.Unix(), 10)
|
||||||
return subtle.ConstantTimeCompare([]byte(token), []byte(expected)) == 1
|
mac := hmac.New(sha256.New, []byte(key))
|
||||||
|
mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job + "\x00" + ts))
|
||||||
|
return ts + ":" + hex.EncodeToString(mac.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateToken validates a token and checks expiration.
|
||||||
|
// Returns true if the token is valid and not expired.
|
||||||
|
func ValidateToken(key, token, org, repo, workflow, job string, ttl time.Duration) bool {
|
||||||
|
return ValidateTokenAt(key, token, org, repo, workflow, job, ttl, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateTokenAt validates a token against a specific reference time.
|
||||||
|
func ValidateTokenAt(key, token, org, repo, workflow, job string, ttl time.Duration, now time.Time) bool {
|
||||||
|
parts := strings.SplitN(token, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
tsStr, hmacHex := parts[0], parts[1]
|
||||||
|
ts, err := strconv.ParseInt(tsStr, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
tokenTime := time.Unix(ts, 0)
|
||||||
|
if now.Sub(tokenTime) > ttl {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recompute expected HMAC
|
||||||
|
mac := hmac.New(sha256.New, []byte(key))
|
||||||
|
mac.Write([]byte("v1\x00" + org + "\x00" + repo + "\x00" + workflow + "\x00" + job + "\x00" + tsStr))
|
||||||
|
expected := hex.EncodeToString(mac.Sum(nil))
|
||||||
|
|
||||||
|
return subtle.ConstantTimeCompare([]byte(hmacHex), []byte(expected)) == 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseTokenTimestamp extracts the timestamp from a timestamped token without validating it.
|
||||||
|
func ParseTokenTimestamp(token string) (time.Time, error) {
|
||||||
|
parts := strings.SplitN(token, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return time.Time{}, fmt.Errorf("invalid token format")
|
||||||
|
}
|
||||||
|
ts, err := strconv.ParseInt(parts[0], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, fmt.Errorf("invalid timestamp: %w", err)
|
||||||
|
}
|
||||||
|
return time.Unix(ts, 0), nil
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,20 +1,35 @@
|
||||||
package receiver
|
package receiver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGenerateScopedToken_Deterministic(t *testing.T) {
|
func TestGenerateToken_Format(t *testing.T) {
|
||||||
token1 := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
token := GenerateToken("key", "org", "repo", "wf", "job")
|
||||||
token2 := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
parts := strings.SplitN(token, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
t.Fatalf("token should have format 'timestamp:hmac', got %q", token)
|
||||||
|
}
|
||||||
|
if len(parts[1]) != 64 {
|
||||||
|
t.Errorf("HMAC part length = %d, want 64", len(parts[1]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGenerateTokenAt_Deterministic(t *testing.T) {
|
||||||
|
ts := time.Unix(1700000000, 0)
|
||||||
|
token1 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
|
token2 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
if token1 != token2 {
|
if token1 != token2 {
|
||||||
t.Errorf("tokens differ: %q vs %q", token1, token2)
|
t.Errorf("tokens differ: %q vs %q", token1, token2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenerateScopedToken_ScopePinning(t *testing.T) {
|
func TestGenerateTokenAt_ScopePinning(t *testing.T) {
|
||||||
base := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
ts := time.Unix(1700000000, 0)
|
||||||
|
base := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
|
|
||||||
variants := []struct {
|
variants := []struct {
|
||||||
name string
|
name string
|
||||||
|
|
@ -31,7 +46,7 @@ func TestGenerateScopedToken_ScopePinning(t *testing.T) {
|
||||||
|
|
||||||
for _, v := range variants {
|
for _, v := range variants {
|
||||||
t.Run(v.name, func(t *testing.T) {
|
t.Run(v.name, func(t *testing.T) {
|
||||||
token := GenerateScopedToken("key", v.org, v.repo, v.wf, v.job)
|
token := GenerateTokenAt("key", v.org, v.repo, v.wf, v.job, ts)
|
||||||
if token == base {
|
if token == base {
|
||||||
t.Errorf("token for %s should differ from base", v.name)
|
t.Errorf("token for %s should differ from base", v.name)
|
||||||
}
|
}
|
||||||
|
|
@ -39,40 +54,127 @@ func TestGenerateScopedToken_ScopePinning(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenerateScopedToken_DifferentKeys(t *testing.T) {
|
func TestGenerateTokenAt_DifferentKeys(t *testing.T) {
|
||||||
token1 := GenerateScopedToken("key-a", "org", "repo", "wf", "job")
|
ts := time.Unix(1700000000, 0)
|
||||||
token2 := GenerateScopedToken("key-b", "org", "repo", "wf", "job")
|
token1 := GenerateTokenAt("key-a", "org", "repo", "wf", "job", ts)
|
||||||
|
token2 := GenerateTokenAt("key-b", "org", "repo", "wf", "job", ts)
|
||||||
if token1 == token2 {
|
if token1 == token2 {
|
||||||
t.Error("different keys should produce different tokens")
|
t.Error("different keys should produce different tokens")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGenerateScopedToken_ValidHex(t *testing.T) {
|
func TestGenerateTokenAt_DifferentTimestamps(t *testing.T) {
|
||||||
token := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
ts1 := time.Unix(1700000000, 0)
|
||||||
if len(token) != 64 {
|
ts2 := time.Unix(1700000001, 0)
|
||||||
t.Errorf("token length = %d, want 64", len(token))
|
token1 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts1)
|
||||||
}
|
token2 := GenerateTokenAt("key", "org", "repo", "wf", "job", ts2)
|
||||||
if _, err := hex.DecodeString(token); err != nil {
|
if token1 == token2 {
|
||||||
t.Errorf("token is not valid hex: %v", err)
|
t.Error("different timestamps should produce different tokens")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateScopedToken_Correct(t *testing.T) {
|
func TestValidateToken_Correct(t *testing.T) {
|
||||||
token := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
ts := time.Now()
|
||||||
if !ValidateScopedToken("key", token, "org", "repo", "wf", "job") {
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
t.Error("ValidateScopedToken should accept correct token")
|
if !ValidateToken("key", token, "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should accept correct token")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateScopedToken_WrongToken(t *testing.T) {
|
func TestValidateToken_WrongToken(t *testing.T) {
|
||||||
if ValidateScopedToken("key", "deadbeef", "org", "repo", "wf", "job") {
|
if ValidateToken("key", "12345:deadbeef", "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
t.Error("ValidateScopedToken should reject wrong token")
|
t.Error("ValidateToken should reject wrong token")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestValidateScopedToken_WrongScope(t *testing.T) {
|
func TestValidateToken_WrongScope(t *testing.T) {
|
||||||
token := GenerateScopedToken("key", "org", "repo", "wf", "job")
|
ts := time.Now()
|
||||||
if ValidateScopedToken("key", token, "org", "repo", "wf", "other-job") {
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
t.Error("ValidateScopedToken should reject token for different scope")
|
if ValidateToken("key", token, "org", "repo", "wf", "other-job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should reject token for different scope")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateToken_Expired(t *testing.T) {
|
||||||
|
ts := time.Now().Add(-10 * time.Minute)
|
||||||
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
|
if ValidateToken("key", token, "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should reject expired token")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateTokenAt_NotExpired(t *testing.T) {
|
||||||
|
tokenTime := time.Unix(1700000000, 0)
|
||||||
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", tokenTime)
|
||||||
|
|
||||||
|
// Validate at 4 minutes later (within 5 minute TTL)
|
||||||
|
now := tokenTime.Add(4 * time.Minute)
|
||||||
|
if !ValidateTokenAt("key", token, "org", "repo", "wf", "job", 5*time.Minute, now) {
|
||||||
|
t.Error("ValidateTokenAt should accept token within TTL")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateTokenAt_JustExpired(t *testing.T) {
|
||||||
|
tokenTime := time.Unix(1700000000, 0)
|
||||||
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", tokenTime)
|
||||||
|
|
||||||
|
// Validate at 6 minutes later (beyond 5 minute TTL)
|
||||||
|
now := tokenTime.Add(6 * time.Minute)
|
||||||
|
if ValidateTokenAt("key", token, "org", "repo", "wf", "job", 5*time.Minute, now) {
|
||||||
|
t.Error("ValidateTokenAt should reject token beyond TTL")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateToken_InvalidFormat(t *testing.T) {
|
||||||
|
if ValidateToken("key", "no-colon-here", "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should reject token without colon")
|
||||||
|
}
|
||||||
|
if ValidateToken("key", "not-a-number:abc123", "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should reject token with invalid timestamp")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTokenTimestamp(t *testing.T) {
|
||||||
|
ts := time.Unix(1700000000, 0)
|
||||||
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
|
|
||||||
|
parsed, err := ParseTokenTimestamp(token)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ParseTokenTimestamp failed: %v", err)
|
||||||
|
}
|
||||||
|
if !parsed.Equal(ts) {
|
||||||
|
t.Errorf("parsed timestamp = %v, want %v", parsed, ts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestParseTokenTimestamp_Invalid(t *testing.T) {
|
||||||
|
_, err := ParseTokenTimestamp("no-colon")
|
||||||
|
if err == nil {
|
||||||
|
t.Error("ParseTokenTimestamp should fail on missing colon")
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = ParseTokenTimestamp("not-a-number:abc123")
|
||||||
|
if err == nil {
|
||||||
|
t.Error("ParseTokenTimestamp should fail on invalid timestamp")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestValidateToken_TamperedTimestamp(t *testing.T) {
|
||||||
|
// Generate a valid token
|
||||||
|
ts := time.Now()
|
||||||
|
token := GenerateTokenAt("key", "org", "repo", "wf", "job", ts)
|
||||||
|
|
||||||
|
parts := strings.SplitN(token, ":", 2)
|
||||||
|
if len(parts) != 2 {
|
||||||
|
t.Fatalf("unexpected token format: %q", token)
|
||||||
|
}
|
||||||
|
hmacPart := parts[1]
|
||||||
|
|
||||||
|
// Tamper with timestamp (e.g., attacker tries to extend token lifetime)
|
||||||
|
tamperedTimestamp := strconv.FormatInt(time.Now().Add(1*time.Hour).Unix(), 10)
|
||||||
|
tamperedToken := tamperedTimestamp + ":" + hmacPart
|
||||||
|
|
||||||
|
if ValidateToken("key", tamperedToken, "org", "repo", "wf", "job", 5*time.Minute) {
|
||||||
|
t.Error("ValidateToken should reject token with tampered timestamp")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@
|
||||||
// ABOUTME: Defines MetricsPayload combining execution metadata with run summary.
|
// ABOUTME: Defines MetricsPayload combining execution metadata with run summary.
|
||||||
package receiver
|
package receiver
|
||||||
|
|
||||||
import "edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/summary"
|
import "edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/summary"
|
||||||
|
|
||||||
// ExecutionContext holds GitHub Actions style identifiers for a workflow run
|
// ExecutionContext holds GitHub Actions style identifiers for a workflow run
|
||||||
type ExecutionContext struct {
|
type ExecutionContext struct {
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
// containerAccumulator tracks metrics for a single container
|
// containerAccumulator tracks metrics for a single container
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"edp.buildth.ing/DevFW-CICD/forgejo-runner-optimiser/internal/metrics"
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAccumulator_NoSamples(t *testing.T) {
|
func TestAccumulator_NoSamples(t *testing.T) {
|
||||||
|
|
|
||||||
1096
pkg/client/client.gen.go
Normal file
1096
pkg/client/client.gen.go
Normal file
File diff suppressed because it is too large
Load diff
64
scripts/extract-openapi/main.go
Normal file
64
scripts/extract-openapi/main.go
Normal file
|
|
@ -0,0 +1,64 @@
|
||||||
|
//go:build ignore
|
||||||
|
|
||||||
|
// ABOUTME: Extracts OpenAPI spec from Fuego server without running it.
|
||||||
|
// ABOUTME: Run with: go run scripts/extract-openapi/main.go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/getkin/kin-openapi/openapi3"
|
||||||
|
"github.com/go-fuego/fuego"
|
||||||
|
|
||||||
|
"edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer/internal/receiver"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Create a minimal handler (store is nil, won't be used)
|
||||||
|
logger := slog.New(slog.NewTextHandler(io.Discard, nil))
|
||||||
|
handler := receiver.NewHandler(nil, logger, "dummy", "dummy", 0)
|
||||||
|
|
||||||
|
// Create Fuego server with OpenAPI config
|
||||||
|
s := fuego.NewServer(
|
||||||
|
fuego.WithoutStartupMessages(),
|
||||||
|
fuego.WithEngineOptions(
|
||||||
|
fuego.WithOpenAPIConfig(fuego.OpenAPIConfig{
|
||||||
|
DisableLocalSave: true,
|
||||||
|
Info: &openapi3.Info{
|
||||||
|
Title: "Forgejo Runner Resource Collector API",
|
||||||
|
Version: "1.0.0",
|
||||||
|
Description: "HTTP service that receives and stores CI/CD resource metrics from collectors, providing query and sizing recommendation APIs.",
|
||||||
|
Contact: &openapi3.Contact{
|
||||||
|
Name: "API Support",
|
||||||
|
URL: "https://edp.buildth.ing/DevFW-CICD/forgejo-runner-sizer",
|
||||||
|
},
|
||||||
|
License: &openapi3.License{
|
||||||
|
Name: "Apache 2.0",
|
||||||
|
URL: "http://www.apache.org/licenses/LICENSE-2.0.html",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register routes to populate OpenAPI spec
|
||||||
|
handler.RegisterRoutes(s)
|
||||||
|
|
||||||
|
// Output OpenAPI spec as JSON
|
||||||
|
spec, err := json.MarshalIndent(s.OpenAPI.Description(), "", " ")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error marshaling OpenAPI spec: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.WriteFile("docs/openapi.json", spec, 0644); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Error writing docs/openapi.json: %v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("Generated docs/openapi.json")
|
||||||
|
}
|
||||||
|
|
@ -113,7 +113,7 @@ services:
|
||||||
# Cgroup configuration
|
# Cgroup configuration
|
||||||
# stress-ng-cpu is the worker process name for CPU stress
|
# stress-ng-cpu is the worker process name for CPU stress
|
||||||
# stress-ng-vm is the worker process name for memory stress
|
# stress-ng-vm is the worker process name for memory stress
|
||||||
CGROUP_PROCESS_MAP: '{"stress-ng-cpu":"cpu-stress","stress-ng-vm":"mem-stress","dd":"io-stress","resource-collec":"collector"}'
|
CGROUP_PROCESS_MAP: '{"stress-ng-cpu":"cpu-stress","stress-ng-vm":"mem-stress","dd":"io-stress","sizer":"collector"}'
|
||||||
CGROUP_LIMITS: '{"cpu-stress":{"cpu":"1","memory":"128Mi"},"mem-stress":{"cpu":"500m","memory":"256Mi"},"io-stress":{"cpu":"500m","memory":"128Mi"},"collector":{"cpu":"200m","memory":"64Mi"}}'
|
CGROUP_LIMITS: '{"cpu-stress":{"cpu":"1","memory":"128Mi"},"mem-stress":{"cpu":"500m","memory":"256Mi"},"io-stress":{"cpu":"500m","memory":"128Mi"},"collector":{"cpu":"200m","memory":"64Mi"}}'
|
||||||
deploy:
|
deploy:
|
||||||
resources:
|
resources:
|
||||||
|
|
|
||||||
|
|
@ -68,7 +68,7 @@ services:
|
||||||
environment:
|
environment:
|
||||||
# Map unique process names to container names
|
# Map unique process names to container names
|
||||||
# 'cat' runs only in runner, 'sleep' runs only in sidecar
|
# 'cat' runs only in runner, 'sleep' runs only in sidecar
|
||||||
CGROUP_PROCESS_MAP: '{"cat":"runner","sleep":"sidecar","resource-collec":"collector"}'
|
CGROUP_PROCESS_MAP: '{"cat":"runner","sleep":"sidecar","sizer":"collector"}'
|
||||||
CGROUP_LIMITS: '{"runner":{"cpu":"500m","memory":"256Mi"},"sidecar":{"cpu":"100m","memory":"128Mi"},"collector":{"cpu":"100m","memory":"64Mi"}}'
|
CGROUP_LIMITS: '{"runner":{"cpu":"500m","memory":"256Mi"},"sidecar":{"cpu":"100m","memory":"128Mi"},"collector":{"cpu":"100m","memory":"64Mi"}}'
|
||||||
deploy:
|
deploy:
|
||||||
resources:
|
resources:
|
||||||
|
|
|
||||||
|
|
@ -55,7 +55,7 @@ spec:
|
||||||
|
|
||||||
# Resource collector sidecar
|
# Resource collector sidecar
|
||||||
- name: collector
|
- name: collector
|
||||||
image: ghcr.io/your-org/forgejo-runner-resource-collector:latest # Replace with your image
|
image: ghcr.io/your-org/forgejo-runner-sizer:latest
|
||||||
args:
|
args:
|
||||||
- --interval=5s
|
- --interval=5s
|
||||||
- --top=3
|
- --top=3
|
||||||
|
|
@ -121,7 +121,7 @@ spec:
|
||||||
|
|
||||||
# Collector
|
# Collector
|
||||||
- name: collector
|
- name: collector
|
||||||
image: ghcr.io/your-org/forgejo-runner-resource-collector:latest # Replace with your image
|
image: ghcr.io/your-org/forgejo-runner-sizer:latest
|
||||||
args:
|
args:
|
||||||
- --interval=2s
|
- --interval=2s
|
||||||
- --top=5
|
- --top=5
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue