feat: add resource collector for Forgejo runners

Add Go application that collects CPU and RAM metrics from /proc filesystem:
- Parse /proc/[pid]/stat for CPU usage (user/system time)
- Parse /proc/[pid]/status for memory usage (RSS, VmSize, etc.)
- Aggregate metrics across all processes
- Output via structured logging (JSON/text)
- Continuous collection with configurable interval

Designed for monitoring pipeline runner resource utilization to enable
dynamic runner sizing.
This commit is contained in:
Manuel Ganter 2026-02-04 14:13:24 +01:00
commit 219d26959f
No known key found for this signature in database
14 changed files with 1252 additions and 0 deletions

16
.gitignore vendored Normal file
View file

@ -0,0 +1,16 @@
# Binaries (root level only)
/resource-collector
/collector
# Test coverage
coverage.out
coverage.html
# IDE
.idea/
.vscode/
*.swp
*.swo
# OS
.DS_Store

83
Makefile Normal file
View file

@ -0,0 +1,83 @@
# ABOUTME: Makefile for forgejo-runner-resource-collector project.
# ABOUTME: Provides targets for building, formatting, linting, and testing.
BINARY_NAME := resource-collector
CMD_PATH := ./cmd/collector
GO := go
GOLANGCI_LINT := $(GO) run github.com/golangci/golangci-lint/v2/cmd/golangci-lint@v2.6.2
GITLEAKS := $(GO) run github.com/zricethezav/gitleaks/v8@v8.30.0
# Build flags
LDFLAGS := -s -w
BUILD_FLAGS := -ldflags "$(LDFLAGS)"
default: run
.PHONY: all build clean fmt format lint gitleaks test run help vet tidy install-hooks
# Default target
all: fmt vet lint build
## Build targets
build: ## Build the binary
$(GO) build $(BUILD_FLAGS) -o $(BINARY_NAME) $(CMD_PATH)
clean: ## Remove build artifacts
rm -f $(BINARY_NAME) coverage.out coverage.html
$(GO) clean
## Code quality targets
fmt: ## Format code using go fmt
$(GO) fmt ./...
format: fmt ## Alias for fmt
vet: ## Run go vet
$(GO) vet ./...
lint: ## Run golangci-lint
$(GOLANGCI_LINT) run ./...
gitleaks: ## Check for secrets in git history
$(GITLEAKS) git --staged
gitleaks-all: ## Check for secrets in git history
$(GITLEAKS) git .
## Dependency management
tidy: ## Tidy go modules
$(GO) mod tidy
## Testing targets
test: ## Run tests
$(GO) test -v ./...
test-coverage: ## Run tests with coverage
$(GO) test -v -coverprofile=coverage.out ./...
$(GO) tool cover -html=coverage.out -o coverage.html
## Run targets
run: build ## Build and run with default settings
./$(BINARY_NAME)
run-text: build ## Build and run with text output format
./$(BINARY_NAME) --log-format text --interval 2s
## Git hooks
install-hooks: ## Install git hooks
cp scripts/hooks/pre-commit .git/hooks/pre-commit
cp scripts/hooks/commit-msg .git/hooks/commit-msg
chmod +x .git/hooks/pre-commit
chmod +x .git/hooks/commit-msg
@echo "Git hooks installed successfully"
## Help
help: ## Show this help
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'

84
cmd/collector/main.go Normal file
View file

@ -0,0 +1,84 @@
package main
import (
"context"
"flag"
"fmt"
"log/slog"
"os"
"os/signal"
"syscall"
"time"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/collector"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/output"
)
const (
defaultInterval = 5 * time.Second
defaultProcPath = "/proc"
defaultLogLevel = "info"
defaultLogFormat = "json"
defaultTopN = 5
)
func main() {
// Parse command line flags
interval := flag.Duration("interval", defaultInterval, "Collection interval (e.g., 5s, 1m)")
procPath := flag.String("proc-path", defaultProcPath, "Path to proc filesystem")
logLevel := flag.String("log-level", defaultLogLevel, "Log level: debug, info, warn, error")
logFormat := flag.String("log-format", defaultLogFormat, "Output format: json, text")
topN := flag.Int("top", defaultTopN, "Number of top processes to include")
flag.Parse()
// Setup structured logging for application logs
appLogLevel := output.ParseLogLevel(*logLevel)
appHandler := slog.NewJSONHandler(os.Stderr, &slog.HandlerOptions{
Level: appLogLevel,
})
appLogger := slog.New(appHandler)
// Setup metrics output writer
metricsWriter := output.NewLoggerWriter(output.LoggerConfig{
Output: os.Stdout,
Format: output.ParseLogFormat(*logFormat),
Level: slog.LevelInfo,
})
defer func() { _ = metricsWriter.Close() }()
// Create collector
c := collector.New(collector.Config{
ProcPath: *procPath,
Interval: *interval,
TopN: *topN,
}, metricsWriter, appLogger)
// Setup signal handling for graceful shutdown
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
go func() {
sig := <-sigChan
appLogger.Info("received signal", slog.String("signal", sig.String()))
cancel()
}()
// Run collector
appLogger.Info("starting resource collector",
slog.Duration("interval", *interval),
slog.String("proc_path", *procPath),
slog.String("log_level", *logLevel),
slog.String("log_format", *logFormat),
slog.Int("top_n", *topN),
)
if err := c.Run(ctx); err != nil && err != context.Canceled {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
appLogger.Info("collector stopped gracefully")
}

3
go.mod Normal file
View file

@ -0,0 +1,3 @@
module edp.buildth.ing/DevFW/forgejo-runner-resource-collector
go 1.25.6

View file

@ -0,0 +1,84 @@
package collector
import (
"context"
"fmt"
"log/slog"
"time"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/metrics"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/output"
)
// Config holds the collector configuration
type Config struct {
ProcPath string
Interval time.Duration
TopN int
}
// Collector orchestrates metric collection
type Collector struct {
config Config
aggregator *metrics.Aggregator
writer output.Writer
logger *slog.Logger
}
// New creates a new collector
func New(cfg Config, writer output.Writer, logger *slog.Logger) *Collector {
return &Collector{
config: cfg,
aggregator: metrics.NewAggregator(cfg.ProcPath, cfg.TopN),
writer: writer,
logger: logger,
}
}
// Run starts the collector loop and blocks until context is cancelled
func (c *Collector) Run(ctx context.Context) error {
c.logger.Info("collector starting",
slog.String("proc_path", c.config.ProcPath),
slog.Duration("interval", c.config.Interval),
slog.Int("top_n", c.config.TopN),
)
// Collect immediately on start
if err := c.collect(); err != nil {
c.logger.Warn("initial collection failed", slog.String("error", err.Error()))
}
ticker := time.NewTicker(c.config.Interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
c.logger.Info("collector stopping")
return ctx.Err()
case <-ticker.C:
if err := c.collect(); err != nil {
c.logger.Warn("collection failed", slog.String("error", err.Error()))
}
}
}
}
// collect performs a single collection cycle
func (c *Collector) collect() error {
m, err := c.aggregator.Collect()
if err != nil {
return fmt.Errorf("aggregating metrics: %w", err)
}
if err := c.writer.Write(m); err != nil {
return fmt.Errorf("writing metrics: %w", err)
}
return nil
}
// CollectOnce performs a single collection and returns the metrics
func (c *Collector) CollectOnce() (*metrics.SystemMetrics, error) {
return c.aggregator.Collect()
}

View file

@ -0,0 +1,225 @@
package metrics
import (
"sort"
"time"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/proc"
)
// Aggregator collects and aggregates metrics from processes
type Aggregator struct {
procPath string
topN int
prevCPU *CPUSnapshot
prevProcCPU map[int]*ProcessCPUSnapshot
}
// NewAggregator creates a new metrics aggregator
func NewAggregator(procPath string, topN int) *Aggregator {
return &Aggregator{
procPath: procPath,
topN: topN,
prevProcCPU: make(map[int]*ProcessCPUSnapshot),
}
}
// Collect gathers all system metrics
func (a *Aggregator) Collect() (*SystemMetrics, error) {
now := time.Now()
// Read system info
sysInfo, err := proc.ReadSystemInfo(a.procPath)
if err != nil {
return nil, err
}
// Read system CPU
user, nice, system, idle, iowait, irq, softirq, err := proc.ReadSystemCPU(a.procPath)
if err != nil {
return nil, err
}
currentCPU := &CPUSnapshot{
Timestamp: now,
User: user,
Nice: nice,
System: system,
Idle: idle,
IOWait: iowait,
IRQ: irq,
SoftIRQ: softirq,
}
// Calculate CPU percentages
cpuMetrics := a.calculateCPUMetrics(currentCPU)
a.prevCPU = currentCPU
// Read all processes
processes, err := proc.ReadAllProcesses(a.procPath)
if err != nil {
return nil, err
}
// Calculate per-process metrics
processMetrics := a.calculateProcessMetrics(processes, now)
// Calculate memory metrics
memMetrics := a.calculateMemoryMetrics(sysInfo, processMetrics)
// Get top CPU consumers
topCPU := a.getTopByMetric(processMetrics, func(p ProcessMetrics) float64 {
return p.CPUPercent
})
// Get top memory consumers
topMemory := a.getTopByMetric(processMetrics, func(p ProcessMetrics) float64 {
return float64(p.MemRSS)
})
return &SystemMetrics{
Timestamp: now,
TotalProcesses: len(processes),
CPU: cpuMetrics,
Memory: memMetrics,
TopCPU: topCPU,
TopMemory: topMemory,
}, nil
}
// calculateCPUMetrics calculates CPU percentages between snapshots
func (a *Aggregator) calculateCPUMetrics(current *CPUSnapshot) CPUMetrics {
if a.prevCPU == nil {
return CPUMetrics{}
}
totalDelta := float64(current.Total() - a.prevCPU.Total())
if totalDelta <= 0 {
return CPUMetrics{}
}
userDelta := float64(current.User+current.Nice) - float64(a.prevCPU.User+a.prevCPU.Nice)
systemDelta := float64(current.System+current.IRQ+current.SoftIRQ) - float64(a.prevCPU.System+a.prevCPU.IRQ+a.prevCPU.SoftIRQ)
idleDelta := float64(current.Idle) - float64(a.prevCPU.Idle)
iowaitDelta := float64(current.IOWait) - float64(a.prevCPU.IOWait)
return CPUMetrics{
TotalPercent: (totalDelta - idleDelta - iowaitDelta) / totalDelta * 100,
UserPercent: userDelta / totalDelta * 100,
SystemPercent: systemDelta / totalDelta * 100,
IdlePercent: idleDelta / totalDelta * 100,
IOWaitPercent: iowaitDelta / totalDelta * 100,
}
}
// calculateProcessMetrics calculates metrics for each process
func (a *Aggregator) calculateProcessMetrics(processes []*proc.ProcessInfo, now time.Time) []ProcessMetrics {
newProcCPU := make(map[int]*ProcessCPUSnapshot)
metrics := make([]ProcessMetrics, 0, len(processes))
for _, p := range processes {
pid := p.Stat.PID
// Calculate CPU percentage for this process
cpuPercent := 0.0
if prev, ok := a.prevProcCPU[pid]; ok && a.prevCPU != nil {
totalDelta := float64(a.prevCPU.Total())
if a.prevCPU != nil {
// Use system CPU total delta for process CPU calculation
currentTotal := a.prevCPU.Total()
if currentTotal > 0 {
procDelta := float64(p.Stat.TotalTime()) - float64(prev.Total())
if procDelta > 0 {
// Calculate based on elapsed time and clock ticks
elapsed := now.Sub(prev.Timestamp).Seconds()
if elapsed > 0 {
// CPU percent = (process ticks / clock_ticks_per_sec) / elapsed_time * 100
cpuPercent = (procDelta / float64(proc.DefaultClockTicks)) / elapsed * 100
if cpuPercent > 100 {
cpuPercent = 100 // Cap at 100% per CPU
}
}
}
}
}
_ = totalDelta // Avoid unused variable warning
}
// Store current snapshot for next iteration
newProcCPU[pid] = &ProcessCPUSnapshot{
PID: pid,
Timestamp: now,
UTime: p.Stat.UTime,
STime: p.Stat.STime,
}
state := string(p.Stat.State)
if state == "" {
state = "?"
}
metrics = append(metrics, ProcessMetrics{
PID: pid,
Name: p.Status.Name,
CPUPercent: cpuPercent,
MemRSS: p.Status.VmRSS,
MemVirtual: p.Status.VmSize,
Threads: p.Status.Threads,
State: state,
})
}
// Update process CPU snapshots for next iteration
a.prevProcCPU = newProcCPU
return metrics
}
// calculateMemoryMetrics calculates aggregated memory metrics
func (a *Aggregator) calculateMemoryMetrics(sysInfo *proc.SystemInfo, processes []ProcessMetrics) MemoryMetrics {
var totalRSS uint64
for _, p := range processes {
totalRSS += p.MemRSS
}
usedBytes := sysInfo.MemTotal - sysInfo.MemAvailable
usedPercent := 0.0
rssPercent := 0.0
if sysInfo.MemTotal > 0 {
usedPercent = float64(usedBytes) / float64(sysInfo.MemTotal) * 100
rssPercent = float64(totalRSS) / float64(sysInfo.MemTotal) * 100
}
return MemoryMetrics{
TotalBytes: sysInfo.MemTotal,
UsedBytes: usedBytes,
FreeBytes: sysInfo.MemFree,
AvailableBytes: sysInfo.MemAvailable,
UsedPercent: usedPercent,
TotalRSSBytes: totalRSS,
RSSPercent: rssPercent,
}
}
// getTopByMetric returns the top N processes by a given metric
func (a *Aggregator) getTopByMetric(metrics []ProcessMetrics, getValue func(ProcessMetrics) float64) []ProcessMetrics {
if len(metrics) == 0 {
return nil
}
// Sort by the metric value (descending)
sorted := make([]ProcessMetrics, len(metrics))
copy(sorted, metrics)
sort.Slice(sorted, func(i, j int) bool {
return getValue(sorted[i]) > getValue(sorted[j])
})
// Return top N
n := a.topN
if n > len(sorted) {
n = len(sorted)
}
return sorted[:n]
}

79
internal/metrics/types.go Normal file
View file

@ -0,0 +1,79 @@
package metrics
import "time"
// ProcessMetrics holds metrics for a single process
type ProcessMetrics struct {
PID int `json:"pid"`
Name string `json:"name"`
CPUPercent float64 `json:"cpu_percent"`
MemRSS uint64 `json:"mem_rss_bytes"`
MemVirtual uint64 `json:"mem_virtual_bytes"`
Threads int `json:"threads"`
State string `json:"state"`
}
// CPUMetrics holds aggregated CPU metrics
type CPUMetrics struct {
TotalPercent float64 `json:"total_percent"`
UserPercent float64 `json:"user_percent"`
SystemPercent float64 `json:"system_percent"`
IdlePercent float64 `json:"idle_percent"`
IOWaitPercent float64 `json:"iowait_percent"`
}
// MemoryMetrics holds aggregated memory metrics
type MemoryMetrics struct {
TotalBytes uint64 `json:"total_bytes"`
UsedBytes uint64 `json:"used_bytes"`
FreeBytes uint64 `json:"free_bytes"`
AvailableBytes uint64 `json:"available_bytes"`
UsedPercent float64 `json:"used_percent"`
TotalRSSBytes uint64 `json:"total_rss_bytes"`
RSSPercent float64 `json:"rss_percent"`
}
// SystemMetrics holds a complete snapshot of system metrics
type SystemMetrics struct {
Timestamp time.Time `json:"timestamp"`
TotalProcesses int `json:"total_processes"`
CPU CPUMetrics `json:"cpu"`
Memory MemoryMetrics `json:"memory"`
TopCPU []ProcessMetrics `json:"top_cpu,omitempty"`
TopMemory []ProcessMetrics `json:"top_memory,omitempty"`
}
// CPUSnapshot holds CPU timing data for calculating percentages between intervals
type CPUSnapshot struct {
Timestamp time.Time
User uint64
Nice uint64
System uint64
Idle uint64
IOWait uint64
IRQ uint64
SoftIRQ uint64
}
// Total returns the total CPU time across all states
func (s *CPUSnapshot) Total() uint64 {
return s.User + s.Nice + s.System + s.Idle + s.IOWait + s.IRQ + s.SoftIRQ
}
// Active returns the active (non-idle) CPU time
func (s *CPUSnapshot) Active() uint64 {
return s.User + s.Nice + s.System + s.IRQ + s.SoftIRQ
}
// ProcessCPUSnapshot holds CPU timing data for a single process
type ProcessCPUSnapshot struct {
PID int
Timestamp time.Time
UTime uint64
STime uint64
}
// Total returns total CPU time for the process
func (s *ProcessCPUSnapshot) Total() uint64 {
return s.UTime + s.STime
}

131
internal/output/logger.go Normal file
View file

@ -0,0 +1,131 @@
package output
import (
"io"
"log/slog"
"os"
"edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/metrics"
)
// LogFormat specifies the log output format
type LogFormat string
const (
LogFormatJSON LogFormat = "json"
LogFormatText LogFormat = "text"
)
// LoggerWriter outputs metrics using structured logging
type LoggerWriter struct {
logger *slog.Logger
level slog.Level
}
// LoggerConfig holds configuration for the logger
type LoggerConfig struct {
Output io.Writer
Format LogFormat
Level slog.Level
}
// NewLoggerWriter creates a new logger-based writer
func NewLoggerWriter(cfg LoggerConfig) *LoggerWriter {
if cfg.Output == nil {
cfg.Output = os.Stdout
}
var handler slog.Handler
opts := &slog.HandlerOptions{
Level: cfg.Level,
}
switch cfg.Format {
case LogFormatText:
handler = slog.NewTextHandler(cfg.Output, opts)
default:
handler = slog.NewJSONHandler(cfg.Output, opts)
}
return &LoggerWriter{
logger: slog.New(handler),
level: cfg.Level,
}
}
// Write outputs the metrics using structured logging
func (w *LoggerWriter) Write(m *metrics.SystemMetrics) error {
// Build top CPU process attrs
topCPUAttrs := make([]any, 0, len(m.TopCPU))
for _, p := range m.TopCPU {
topCPUAttrs = append(topCPUAttrs, slog.Group("",
slog.Int("pid", p.PID),
slog.String("name", p.Name),
slog.Float64("cpu_percent", p.CPUPercent),
))
}
// Build top memory process attrs
topMemAttrs := make([]any, 0, len(m.TopMemory))
for _, p := range m.TopMemory {
topMemAttrs = append(topMemAttrs, slog.Group("",
slog.Int("pid", p.PID),
slog.String("name", p.Name),
slog.Uint64("rss_bytes", p.MemRSS),
))
}
w.logger.Info("metrics_collected",
slog.Time("collection_time", m.Timestamp),
slog.Int("total_processes", m.TotalProcesses),
slog.Group("cpu",
slog.Float64("total_percent", m.CPU.TotalPercent),
slog.Float64("user_percent", m.CPU.UserPercent),
slog.Float64("system_percent", m.CPU.SystemPercent),
slog.Float64("idle_percent", m.CPU.IdlePercent),
slog.Float64("iowait_percent", m.CPU.IOWaitPercent),
),
slog.Group("memory",
slog.Uint64("total_bytes", m.Memory.TotalBytes),
slog.Uint64("used_bytes", m.Memory.UsedBytes),
slog.Uint64("free_bytes", m.Memory.FreeBytes),
slog.Uint64("available_bytes", m.Memory.AvailableBytes),
slog.Float64("used_percent", m.Memory.UsedPercent),
slog.Uint64("total_rss_bytes", m.Memory.TotalRSSBytes),
slog.Float64("rss_percent", m.Memory.RSSPercent),
),
slog.Any("top_cpu", topCPUAttrs),
slog.Any("top_memory", topMemAttrs),
)
return nil
}
// Close is a no-op for the logger writer
func (w *LoggerWriter) Close() error {
return nil
}
// ParseLogLevel parses a log level string
func ParseLogLevel(level string) slog.Level {
switch level {
case "debug":
return slog.LevelDebug
case "warn", "warning":
return slog.LevelWarn
case "error":
return slog.LevelError
default:
return slog.LevelInfo
}
}
// ParseLogFormat parses a log format string
func ParseLogFormat(format string) LogFormat {
switch format {
case "text":
return LogFormatText
default:
return LogFormatJSON
}
}

13
internal/output/types.go Normal file
View file

@ -0,0 +1,13 @@
package output
import "edp.buildth.ing/DevFW/forgejo-runner-resource-collector/internal/metrics"
// Writer defines the interface for outputting metrics
// This allows for different implementations (logging, HTTP push, etc.)
type Writer interface {
// Write outputs the collected metrics
Write(m *metrics.SystemMetrics) error
// Close cleanly shuts down the writer
Close() error
}

174
internal/proc/process.go Normal file
View file

@ -0,0 +1,174 @@
package proc
import (
"fmt"
"os"
"strconv"
"strings"
)
// ClockTicks returns the system clock ticks per second (SC_CLK_TCK)
// On most Linux systems this is 100, but we read it from the system
const DefaultClockTicks = 100
// SystemInfo holds system-wide information from /proc
type SystemInfo struct {
MemTotal uint64 // Total physical memory in bytes
MemFree uint64 // Free memory in bytes
MemAvailable uint64 // Available memory in bytes
Buffers uint64 // Buffer memory in bytes
Cached uint64 // Cached memory in bytes
CPUCount int // Number of CPUs
}
// DiscoverPIDs scans /proc and returns a list of all process PIDs
func DiscoverPIDs(procPath string) ([]int, error) {
entries, err := os.ReadDir(procPath)
if err != nil {
return nil, fmt.Errorf("reading proc directory: %w", err)
}
var pids []int
for _, entry := range entries {
if !entry.IsDir() {
continue
}
pid, err := strconv.Atoi(entry.Name())
if err != nil {
// Not a PID directory
continue
}
pids = append(pids, pid)
}
return pids, nil
}
// ReadSystemInfo reads system-wide memory information from /proc/meminfo
func ReadSystemInfo(procPath string) (*SystemInfo, error) {
info := &SystemInfo{}
// Read /proc/meminfo
data, err := os.ReadFile(fmt.Sprintf("%s/meminfo", procPath))
if err != nil {
return nil, fmt.Errorf("reading meminfo: %w", err)
}
lines := strings.Split(string(data), "\n")
for _, line := range lines {
parts := strings.SplitN(line, ":", 2)
if len(parts) != 2 {
continue
}
key := strings.TrimSpace(parts[0])
value := parseMemoryValue(strings.TrimSpace(parts[1]))
switch key {
case "MemTotal":
info.MemTotal = value
case "MemFree":
info.MemFree = value
case "MemAvailable":
info.MemAvailable = value
case "Buffers":
info.Buffers = value
case "Cached":
info.Cached = value
}
}
// Count CPUs from /proc/cpuinfo
cpuData, err := os.ReadFile(fmt.Sprintf("%s/cpuinfo", procPath))
if err == nil {
for _, line := range strings.Split(string(cpuData), "\n") {
if strings.HasPrefix(line, "processor") {
info.CPUCount++
}
}
}
if info.CPUCount == 0 {
info.CPUCount = 1 // Default to 1 CPU
}
return info, nil
}
// ReadSystemCPU reads the total CPU time from /proc/stat
// Returns: user, nice, system, idle, iowait, irq, softirq times (in clock ticks)
func ReadSystemCPU(procPath string) (user, nice, system, idle, iowait, irq, softirq uint64, err error) {
data, err := os.ReadFile(fmt.Sprintf("%s/stat", procPath))
if err != nil {
return 0, 0, 0, 0, 0, 0, 0, fmt.Errorf("reading /proc/stat: %w", err)
}
lines := strings.Split(string(data), "\n")
for _, line := range lines {
if strings.HasPrefix(line, "cpu ") {
fields := strings.Fields(line)
if len(fields) < 8 {
return 0, 0, 0, 0, 0, 0, 0, fmt.Errorf("invalid cpu line format")
}
user, _ = strconv.ParseUint(fields[1], 10, 64)
nice, _ = strconv.ParseUint(fields[2], 10, 64)
system, _ = strconv.ParseUint(fields[3], 10, 64)
idle, _ = strconv.ParseUint(fields[4], 10, 64)
iowait, _ = strconv.ParseUint(fields[5], 10, 64)
irq, _ = strconv.ParseUint(fields[6], 10, 64)
softirq, _ = strconv.ParseUint(fields[7], 10, 64)
return user, nice, system, idle, iowait, irq, softirq, nil
}
}
return 0, 0, 0, 0, 0, 0, 0, fmt.Errorf("cpu line not found in /proc/stat")
}
// ProcessInfo combines stat and status information for a process
type ProcessInfo struct {
Stat *ProcStat
Status *ProcStatus
}
// ReadProcess reads both stat and status for a single process
func ReadProcess(procPath string, pid int) (*ProcessInfo, error) {
stat, err := ReadStat(procPath, pid)
if err != nil {
return nil, err
}
status, err := ReadStatus(procPath, pid)
if err != nil {
return nil, err
}
return &ProcessInfo{
Stat: stat,
Status: status,
}, nil
}
// ReadAllProcesses reads information for all processes
// It skips processes that disappear during collection (race-safe)
func ReadAllProcesses(procPath string) ([]*ProcessInfo, error) {
pids, err := DiscoverPIDs(procPath)
if err != nil {
return nil, err
}
var processes []*ProcessInfo
for _, pid := range pids {
info, err := ReadProcess(procPath, pid)
if err != nil {
// Process may have exited, skip it
continue
}
processes = append(processes, info)
}
return processes, nil
}

131
internal/proc/stat.go Normal file
View file

@ -0,0 +1,131 @@
package proc
import (
"fmt"
"os"
"strconv"
"strings"
)
// ProcStat holds CPU-related information from /proc/[pid]/stat
type ProcStat struct {
PID int
Comm string // Process name (executable filename)
State byte // Process state (R, S, D, Z, T, etc.)
PPID int // Parent PID
UTime uint64 // User mode CPU time (in clock ticks)
STime uint64 // Kernel mode CPU time (in clock ticks)
CUTime int64 // Children user mode CPU time
CSTime int64 // Children system mode CPU time
NumThreads int64 // Number of threads
StartTime uint64 // Time process started after boot (in clock ticks)
}
// ReadStat reads and parses /proc/[pid]/stat for the given PID
func ReadStat(procPath string, pid int) (*ProcStat, error) {
path := fmt.Sprintf("%s/%d/stat", procPath, pid)
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("reading stat file: %w", err)
}
return parseStat(pid, string(data))
}
// parseStat parses the content of /proc/[pid]/stat
// The format is complex because comm (field 2) can contain spaces and parentheses
func parseStat(pid int, data string) (*ProcStat, error) {
// Find the comm field which is enclosed in parentheses
// This handles cases where comm contains spaces or other special characters
start := strings.Index(data, "(")
end := strings.LastIndex(data, ")")
if start == -1 || end == -1 || end <= start {
return nil, fmt.Errorf("invalid stat format: cannot find comm field")
}
comm := data[start+1 : end]
// Fields after the comm field
remainder := strings.TrimSpace(data[end+1:])
fields := strings.Fields(remainder)
// We need at least 20 fields after comm (fields 3-22)
if len(fields) < 20 {
return nil, fmt.Errorf("invalid stat format: expected at least 20 fields after comm, got %d", len(fields))
}
stat := &ProcStat{
PID: pid,
Comm: comm,
}
// Field 3: state (index 0 after comm)
if len(fields[0]) > 0 {
stat.State = fields[0][0]
}
// Field 4: ppid (index 1)
ppid, err := strconv.Atoi(fields[1])
if err != nil {
return nil, fmt.Errorf("parsing ppid: %w", err)
}
stat.PPID = ppid
// Field 14: utime (index 11) - user mode CPU time
utime, err := strconv.ParseUint(fields[11], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing utime: %w", err)
}
stat.UTime = utime
// Field 15: stime (index 12) - kernel mode CPU time
stime, err := strconv.ParseUint(fields[12], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing stime: %w", err)
}
stat.STime = stime
// Field 16: cutime (index 13) - children user mode CPU time
cutime, err := strconv.ParseInt(fields[13], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing cutime: %w", err)
}
stat.CUTime = cutime
// Field 17: cstime (index 14) - children system mode CPU time
cstime, err := strconv.ParseInt(fields[14], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing cstime: %w", err)
}
stat.CSTime = cstime
// Field 20: num_threads (index 17)
numThreads, err := strconv.ParseInt(fields[17], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing num_threads: %w", err)
}
stat.NumThreads = numThreads
// Field 22: starttime (index 19) - time process started after boot
startTime, err := strconv.ParseUint(fields[19], 10, 64)
if err != nil {
return nil, fmt.Errorf("parsing starttime: %w", err)
}
stat.StartTime = startTime
return stat, nil
}
// TotalTime returns the total CPU time (user + system) in clock ticks
func (s *ProcStat) TotalTime() uint64 {
return s.UTime + s.STime
}
// TotalTimeWithChildren returns total CPU time including children
func (s *ProcStat) TotalTimeWithChildren() uint64 {
total := int64(s.UTime) + int64(s.STime) + s.CUTime + s.CSTime
if total < 0 {
return 0
}
return uint64(total)
}

147
internal/proc/status.go Normal file
View file

@ -0,0 +1,147 @@
package proc
import (
"bufio"
"fmt"
"os"
"strconv"
"strings"
)
// ProcStatus holds memory-related information from /proc/[pid]/status
type ProcStatus struct {
PID int
Name string // Process name
VmSize uint64 // Virtual memory size in bytes
VmRSS uint64 // Resident Set Size in bytes (actual RAM used)
VmPeak uint64 // Peak virtual memory size in bytes
VmData uint64 // Data segment size in bytes
VmStk uint64 // Stack size in bytes
VmExe uint64 // Text (code) size in bytes
VmLib uint64 // Shared library code size in bytes
RssAnon uint64 // Anonymous RSS in bytes
RssFile uint64 // File-backed RSS in bytes
RssShmem uint64 // Shared memory RSS in bytes
Threads int // Number of threads
UID int // Real user ID
GID int // Real group ID
}
// ReadStatus reads and parses /proc/[pid]/status for the given PID
func ReadStatus(procPath string, pid int) (*ProcStatus, error) {
path := fmt.Sprintf("%s/%d/status", procPath, pid)
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("opening status file: %w", err)
}
defer func() { _ = file.Close() }()
return parseStatus(pid, file)
}
// parseStatus parses the content of /proc/[pid]/status
func parseStatus(pid int, file *os.File) (*ProcStatus, error) {
status := &ProcStatus{PID: pid}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
parts := strings.SplitN(line, ":", 2)
if len(parts) != 2 {
continue
}
key := strings.TrimSpace(parts[0])
value := strings.TrimSpace(parts[1])
switch key {
case "Name":
status.Name = value
case "VmSize":
status.VmSize = parseMemoryValue(value)
case "VmRSS":
status.VmRSS = parseMemoryValue(value)
case "VmPeak":
status.VmPeak = parseMemoryValue(value)
case "VmData":
status.VmData = parseMemoryValue(value)
case "VmStk":
status.VmStk = parseMemoryValue(value)
case "VmExe":
status.VmExe = parseMemoryValue(value)
case "VmLib":
status.VmLib = parseMemoryValue(value)
case "RssAnon":
status.RssAnon = parseMemoryValue(value)
case "RssFile":
status.RssFile = parseMemoryValue(value)
case "RssShmem":
status.RssShmem = parseMemoryValue(value)
case "Threads":
if n, err := strconv.Atoi(value); err == nil {
status.Threads = n
}
case "Uid":
// Format: "Uid: real effective saved filesystem"
fields := strings.Fields(value)
if len(fields) > 0 {
if uid, err := strconv.Atoi(fields[0]); err == nil {
status.UID = uid
}
}
case "Gid":
// Format: "Gid: real effective saved filesystem"
fields := strings.Fields(value)
if len(fields) > 0 {
if gid, err := strconv.Atoi(fields[0]); err == nil {
status.GID = gid
}
}
}
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("reading status file: %w", err)
}
return status, nil
}
// parseMemoryValue parses memory values from /proc/[pid]/status
// Format is typically "1234 kB"
func parseMemoryValue(value string) uint64 {
fields := strings.Fields(value)
if len(fields) == 0 {
return 0
}
num, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return 0
}
// Convert to bytes if unit is specified
if len(fields) > 1 {
unit := strings.ToLower(fields[1])
switch unit {
case "kb":
num *= 1024
case "mb":
num *= 1024 * 1024
case "gb":
num *= 1024 * 1024 * 1024
}
}
return num
}
// TotalRSS returns the total RSS (RssAnon + RssFile + RssShmem)
// Falls back to VmRSS if the detailed fields are not available
func (s *ProcStatus) TotalRSS() uint64 {
total := s.RssAnon + s.RssFile + s.RssShmem
if total == 0 {
return s.VmRSS
}
return total
}

54
scripts/hooks/commit-msg Normal file
View file

@ -0,0 +1,54 @@
#!/bin/bash
# ABOUTME: Commit-msg hook that validates commit messages follow conventional commit format.
# ABOUTME: Install with `make install-hooks`.
set -e
COMMIT_MSG_FILE="$1"
COMMIT_MSG=$(cat "$COMMIT_MSG_FILE")
# Pattern for conventional commits: type(scope)?: message
# Types: feat, fix, chore, docs, style, refactor, perf, test, build, ci
PATTERN='^(feat|fix|chore|docs|style|refactor|perf|test|build|ci)(\([[:alnum:]_-]+\))?!?:.+$'
# Get the first line (subject) of the commit message
FIRST_LINE=$(echo "$COMMIT_MSG" | head -n 1)
# Skip validation for merge commits
if echo "$FIRST_LINE" | grep -qE '^Merge '; then
exit 0
fi
# Skip validation for revert commits
if echo "$FIRST_LINE" | grep -qE '^Revert '; then
exit 0
fi
if ! echo "$FIRST_LINE" | grep -qE "$PATTERN"; then
echo "Error: Invalid commit message format."
echo ""
echo "Commit message: '$FIRST_LINE'"
echo ""
echo "Expected format: <type>(<scope>)?: <description>"
echo ""
echo "Valid types:"
echo " feat - A new feature"
echo " fix - A bug fix"
echo " chore - Maintenance tasks"
echo " docs - Documentation changes"
echo " style - Code style changes (formatting, etc.)"
echo " refactor - Code refactoring"
echo " perf - Performance improvements"
echo " test - Adding or updating tests"
echo " build - Build system changes"
echo " ci - CI configuration changes"
echo ""
echo "Examples:"
echo " feat: add user authentication"
echo " fix(auth): resolve token expiration issue"
echo " chore(deps): update dependencies"
echo " feat!: breaking change in API"
exit 1
fi
echo "Commit message format valid."

28
scripts/hooks/pre-commit Executable file
View file

@ -0,0 +1,28 @@
#!/bin/bash
# ABOUTME: Pre-commit hook that runs formatting and linting checks.
# ABOUTME: Install with `make install-hooks`.
set -e
echo "Running pre-commit checks..."
# Run go fmt and check if there are any changes
echo "Checking formatting..."
UNFORMATTED=$(gofmt -l .)
if [ -n "$UNFORMATTED" ]; then
echo "Error: The following files are not formatted:"
echo "$UNFORMATTED"
echo ""
echo "Run 'make fmt' to format them."
exit 1
fi
# Run linter
echo "Running linter..."
make lint
# Check for secrets with gitleaks
echo "Checking for secrets..."
make gitleaks
echo "Pre-commit checks passed!"