added helm chart for garm

This commit is contained in:
Manuel Ganter 2025-12-01 15:43:13 +01:00
commit 8f2330528f
No known key found for this signature in database
14 changed files with 697 additions and 0 deletions

13
charts/garm/Chart.yaml Normal file
View file

@ -0,0 +1,13 @@
apiVersion: v2
name: garm
description: A Helm chart for Garm - GitHub Actions Runner Manager
type: application
version: 0.1.0
appVersion: "1.0.0"
keywords:
- garm
- github-actions
- runner
maintainers:
- name: Your Name
email: your.email@example.com

122
charts/garm/README.md Normal file
View file

@ -0,0 +1,122 @@
# Garm Helm Chart
This Helm chart deploys Garm (GitHub Actions Runner Manager) on a Kubernetes cluster.
## Prerequisites
- Kubernetes 1.19+
- Helm 3.0+
- Ingress controller (nginx recommended)
- Cert-manager (optional, for TLS)
## Installing the Chart
1. Add the Helm repository:
```bash
helm repo add garm https://your-repo-url
helm repo update
```
2. Create a values file (e.g., `values.yaml`) and configure the required parameters:
```yaml
# Only Edge Connect credentials are required, others will be auto-generated
credentials:
edgeConnect:
username: "your-ec-username"
password: "your-ec-password"
# Optional: Override auto-generated credentials
credentials:
admin:
generateCredentials: false # Set to false to use custom credentials
username: "custom-admin"
password: "custom-password"
email: "admin@example.com"
gitea:
generateToken: false # Set to false to use custom token
token: "your-custom-token"
ingress:
hosts:
- host: your-garm-domain.example.com
paths:
- path: /
pathType: Prefix
tls:
- secretName: garm-tls
hosts:
- your-garm-domain.example.com
```
Note: If you don't provide custom credentials, the chart will automatically generate secure random values for:
- Admin password
- Gitea token
- JWT secret
- Database passphrase
3. Install the chart:
```bash
helm install garm garm/garm -f values.yaml
```
## Configuration
The following table lists the configurable parameters of the Garm chart and their default values.
| Parameter | Description | Default |
|-----------|-------------|---------|
| `image.repository` | Garm image repository | `edp.buildth.ing/devfw-cicd/garm` |
| `image.tag` | Garm image tag | `provider-ec-30` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `replicaCount` | Number of Garm replicas | `1` |
| `persistence.enabled` | Enable persistence using PVC | `true` |
| `persistence.size` | PVC size | `100Gi` |
| `persistence.storageClass` | PVC storage class name | `""` |
| `ingress.enabled` | Enable ingress | `true` |
| `ingress.className` | Ingress class name | `nginx` |
| `credentials.gitea.generateToken` | Auto-generate Gitea token | `true` |
| `credentials.gitea.token` | Custom Gitea token (if generateToken=false) | `""` |
| `credentials.admin.generateCredentials` | Auto-generate admin credentials | `true` |
| `credentials.admin.username` | Admin username | `admin` |
| `credentials.admin.password` | Custom admin password (if generateCredentials=false) | `""` |
| `credentials.admin.email` | Admin email | `"admin@example.com"` |
| `credentials.edgeConnect.username` | Edge Connect username (required) | `""` |
| `credentials.edgeConnect.password` | Edge Connect password (required) | `""` |
## Security Considerations
1. Always change default passwords in production
2. Use secrets management solutions for sensitive data
3. Enable TLS via ingress configuration
4. Configure proper RBAC if needed
## Troubleshooting
1. Check the Garm logs:
```bash
kubectl logs -l app.kubernetes.io/name=garm
```
2. Verify the init job status:
```bash
kubectl get jobs -l app.kubernetes.io/name=garm
```
3. Check the ingress configuration:
```bash
kubectl get ingress -l app.kubernetes.io/name=garm
```
## Uninstalling the Chart
To uninstall/delete the `garm` deployment:
```bash
helm delete garm
```
Note: This will not delete the PVC. To delete the PVC as well:
```bash
kubectl delete pvc -l app.kubernetes.io/name=garm
```

View file

@ -0,0 +1,11 @@
Garm has been installed. Here's how to get started:
1. First, wait for the init job to complete:
kubectl wait --for=condition=complete job/{{ include "garm.fullname" . }}-init -n {{ .Release.Namespace }}
2. The Garm service is available at:
{{- range .Values.ingress.hosts }}
https://{{ .host }}
{{- end }}
Lookup: {{ (lookup "v1" "Secret" .Release.Namespace (print (include "garm.fullname" .) "-credentials")) }}

View file

@ -0,0 +1,56 @@
{{/*
Generate a random string for use as a secret
*/}}
{{- define "garm.randomString" -}}
{{- randAlphaNum 32 -}}
{{- end -}}
{{/*
Get admin password - either user-provided or generated
*/}}
{{- define "garm.adminPassword" -}}
{{ $credentials := (lookup "v1" "Secret" .Release.Namespace "garm-credentials") }}
{{- if hasKey $credentials "data" -}}
{{- if hasKey $credentials.data "GARM_ADMIN_PASSWORD" }}
{{- index $credentials.data "GARM_ADMIN_PASSWORD" | b64dec -}}
{{- else -}}
{{- "NO ADMIN PASSWORD" -}}
{{- end -}}
{{- else -}}
{{- include "garm.randomString" . -}}
{{- end -}}
{{- end -}}
{{/*
Get Gitea token - either user-provided or generated
*/}}
{{- define "garm.giteaToken" -}}
{{- if not .Values.credentials.giteaToken -}}
{{- include "garm.randomString" . -}}
{{- else -}}
{{- .Values.credentials.giteaToken -}}
{{- end -}}
{{- end -}}
{{/*
Get JWT secret - either user-provided or generated
*/}}
{{- define "garm.jwtSecret" -}}
{{- if .Values.garm.jwtAuth.secret -}}
{{- .Values.garm.jwtAuth.secret -}}
{{- else -}}
{{- include "garm.randomString" . -}}
{{- end -}}
{{- end -}}
{{/*
Get database passphrase - either user-provided or generated
*/}}
{{- define "garm.dbPassphrase" -}}
{{- if .Values.garm.database.passphrase -}}
{{- .Values.garm.database.passphrase -}}
{{- else -}}
{{- include "garm.randomString" . -}}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,46 @@
{{/*
Create a default fully qualified app name.
*/}}
{{- define "garm.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "garm.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "garm.labels" -}}
helm.sh/chart: {{ include "garm.chart" . }}
{{ include "garm.selectorLabels" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "garm.selectorLabels" -}}
app.kubernetes.io/name: {{ include "garm.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "garm.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}

View file

@ -0,0 +1,16 @@
# SPDX-License-Identifier: MIT
apiVersion: rbac.authorization.k8s.io/v1
# This cluster role binding allows anyone in the "manager" group to read secrets in any namespace.
kind: RoleBinding
metadata:
name: garm-provider-k8s
namespace: garm
subjects:
- kind: ServiceAccount
namespace: garm
name: default
roleRef:
kind: ClusterRole
name: garm-provider-k8s
apiGroup: rbac.authorization.k8s.io

View file

@ -0,0 +1,11 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: garm-provider-k8s
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "watch", "list", "create", "update", "patch", "delete"]

View file

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "garm.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "garm.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
{{- include "garm.selectorLabels" . | nindent 6 }}
strategy:
type: Recreate
template:
metadata:
labels:
{{- include "garm.selectorLabels" . | nindent 8 }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.garm.apiserver.port }}
protocol: TCP
volumeMounts:
- name: garm-config
mountPath: /etc/garm
readOnly: true
- name: edge-connect-creds
mountPath: /etc/garm-creds
readOnly: true
- name: garm-data
mountPath: /garm
readOnly: false
resources:
{{- toYaml .Values.resources | nindent 12 }}
livenessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 1
periodSeconds: 5
readinessProbe:
httpGet:
path: /
port: http
initialDelaySeconds: 1
periodSeconds: 5
volumes:
- name: garm-config
secret:
secretName: {{ include "garm.fullname" . }}-config
- name: edge-connect-creds
secret:
secretName: {{ include "garm.fullname" . }}-edge-connect-creds
- name: garm-data
persistentVolumeClaim:
claimName: {{ include "garm.fullname" . }}

View file

@ -0,0 +1,32 @@
{{- if .Values.ingress.enabled -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "garm.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
ingressClassName: {{ .Values.ingress.className }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "garm.fullname" $ }}
port:
number: {{ $.Values.service.port }}
{{- end }}
{{- end }}
{{- with .Values.ingress.tls }}
tls:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,60 @@
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "garm.fullname" . }}-init
namespace: {{ .Release.Namespace }}
spec:
ttlSecondsAfterFinished: 100
template:
spec:
initContainers:
- name: wait-for-garm
image: {{ .Values.initJob.waitForGarm.image }}
command:
- "/bin/sh"
- "-c"
- |
echo "Waiting for Garm to be available at ${GARM_URL}"
for i in $(seq 1 {{ .Values.initJob.waitForGarm.maxRetries }}); do
status=$(curl -s -o /dev/null -w "%{http_code}" "${GARM_URL}")
if [ "$status" -eq 301 ]; then
echo "Garm is up (HTTP 301)"
exit 0
fi
echo "Got HTTP $status, retrying in {{ .Values.initJob.waitForGarm.retryInterval }}s..."
sleep {{ .Values.initJob.waitForGarm.retryInterval }}
done
echo "Garm did not return HTTP 301 after {{ .Values.initJob.waitForGarm.maxRetries }} tries"
exit 1
envFrom:
- secretRef:
name: {{ include "garm.fullname" . }}-credentials
containers:
- image: {{ .Values.initJob.image }}
name: credentials-setup
command:
- "/bin/sh"
- "-c"
- |
garm-cli init --name gitea --password ${GARM_ADMIN_PASSWORD} --username ${GARM_ADMIN_USERNAME} --email ${GARM_ADMIN_EMAIL} --url ${GARM_URL}
if [ $? -ne 0 ]; then
echo "garm maybe already initialized"
exit 0
fi
garm-cli gitea endpoint create \
--api-base-url ${GIT_URL} \
--base-url ${GIT_URL} \
--description "My first Gitea endpoint" \
--name local-gitea
garm-cli gitea credentials add \
--endpoint local-gitea \
--auth-type pat \
--pat-oauth-token $GITEA_TOKEN \
--name autotoken \
--description "Gitea token"
envFrom:
- secretRef:
name: {{ include "garm.fullname" . }}-credentials
restartPolicy: Never

View file

@ -0,0 +1,20 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "garm.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.persistence.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.persistence.storageClass }}
storageClassName: {{ .Values.persistence.storageClass }}
{{- end }}
accessModes:
{{- range .Values.persistence.accessModes }}
- {{ . }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View file

@ -0,0 +1,101 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ include "garm.fullname" . }}-credentials
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-5"
stringData:
GITEA_TOKEN: {{ include "garm.giteaToken" . | quote }}
GARM_ADMIN_USERNAME: {{ .Values.credentials.admin.username | quote }}
GARM_ADMIN_PASSWORD: {{ include "garm.adminPassword" . | quote }}
GARM_ADMIN_EMAIL: {{ .Values.credentials.admin.email | quote }}
GARM_URL: {{ printf "https://%s" (index .Values.ingress.hosts 0).host | quote }}
GIT_URL: {{ .Values.credentials.gitea.url | quote }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "garm.fullname" . }}-config
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-5"
stringData:
config.toml: |
[default]
enable_webhook_management = true
[logging]
enable_log_streamer = {{ .Values.garm.logging.enableLogStreamer }}
log_format = "{{ .Values.garm.logging.logFormat }}"
log_level = "{{ .Values.garm.logging.logLevel }}"
log_source = {{ .Values.garm.logging.logSource }}
[metrics]
enable = {{ .Values.garm.metrics.enable }}
disable_auth = {{ .Values.garm.metrics.disableAuth }}
[jwt_auth]
secret = "{{ include "garm.jwtSecret" . }}"
time_to_live = "{{ .Values.garm.jwtAuth.timeToLive }}"
[apiserver]
bind = "{{ .Values.garm.apiserver.bind }}"
port = {{ .Values.garm.apiserver.port }}
use_tls = {{ .Values.garm.apiserver.useTls }}
[apiserver.webui]
enable = {{ .Values.garm.apiserver.webui.enable }}
[database]
backend = "{{ .Values.garm.database.backend }}"
passphrase = "{{ include "garm.dbPassphrase" . }}"
[database.sqlite3]
db_file = "{{ .Values.garm.database.sqlite3.dbFile }}"
{{- range .Values.garm.provider }}
[[provider]]
name = "{{ .name }}"
description = "{{ .description }}"
provider_type = "{{ .providerType }}"
[provider.external]
config_file = "{{ .external.configFile }}"
provider_executable = "{{ .external.providerExecutable }}"
environment_variables = {{ .external.environmentVariables | toJson }}
{{- end }}
k8s-provider-config.toml: |
kubeConfigPath: "" # path to a kubernetes config file - if empty the in cluster config will be used
runnerNamespace: {{ .Values.providerConfig.k8s.runnerNamespace | quote }}
podTemplate: # pod template to use for the runner pods / helpful to add sidecar containers
spec:
volumes:
- name: my-additional-volume
emptyDir: {}
flavors:
{{- toYaml .Values.providerConfig.k8s.flavors | nindent 6 }}
edge-connect-provider-config.toml: |
organization = {{ .Values.providerConfig.edgeConnect.organization | quote }}
region = {{ .Values.providerConfig.edgeConnect.region | quote }}
edge_connect_url = {{ .Values.providerConfig.edgeConnect.edgeConnectUrl | quote }}
log_file = "/garm/provider.log"
credentials_file = "/etc/garm-creds/creds.toml"
[cloudlet]
name = {{ .Values.providerConfig.edgeConnect.cloudlet.name | quote }}
organization = {{ .Values.providerConfig.edgeConnect.cloudlet.organization | quote }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "garm.fullname" . }}-edge-connect-creds
namespace: {{ .Release.Namespace }}
annotations:
"helm.sh/hook": pre-install,pre-upgrade
"helm.sh/hook-weight": "-5"
stringData:
creds.toml: |
username = "{{ required "Edge Connect username is required" .Values.credentials.edgeConnect.username }}"
password = "{{ required "Edge Connect password is required" .Values.credentials.edgeConnect.password }}"

View file

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "garm.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "garm.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}
protocol: TCP
name: http
selector:
{{- include "garm.selectorLabels" . | nindent 4 }}

132
charts/garm/values.yaml Normal file
View file

@ -0,0 +1,132 @@
# Default values for garm chart
nameOverride: ""
fullnameOverride: ""
image:
repository: edp.buildth.ing/devfw-cicd/garm
tag: provider-ec-40
pullPolicy: Always
replicaCount: 1
persistence:
enabled: true
size: 100Gi
storageClass: "csi-disk"
annotations:
everest.io/disk-volume-type: GPSSD
accessModes:
- ReadWriteOnce
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: garm.garm-provider-test.t09.de
paths:
- path: /
pathType: Prefix
tls:
- secretName: garm-net-tls
hosts:
- garm.garm-provider-test.t09.de
service:
type: ClusterIP
port: 80
# Garm Configuration
garm:
logging:
enableLogStreamer: true
logFormat: text # can be "text" or "json"
logLevel: info
logSource: false
metrics:
enable: true
disableAuth: false
jwtAuth:
# You should change this in production
# secret: "changeme-use-a-secure-random-string"
timeToLive: "8760h"
apiserver:
bind: "0.0.0.0"
port: 80
useTls: false
webui:
enable: true
database:
backend: sqlite3
# passphrase: "changeme-use-a-secure-random-string"
sqlite3:
dbFile: "/garm/garm.db"
provider:
- name: "k8s"
description: "kubernetes provider"
providerType: "external"
external:
configFile: "/etc/garm/k8s-provider-config.toml"
providerExecutable: "/opt/garm/providers.d/garm-provider-k8s"
environmentVariables: ["KUBERNETES_"]
- name: "edge-connect"
description: "edge connect provider"
providerType: "external"
external:
configFile: "/etc/garm/edge-connect-provider-config.toml"
providerExecutable: "/opt/garm/providers.d/garm-provider-edge-connect"
environmentVariables: ["EDP_EDGE_CONNECT_"]
# Provider Configuration
providerConfig:
k8s:
runnerNamespace: "garm"
flavors:
micro:
requests:
cpu: 50m
memory: 50Mi
limits:
memory: 200Mi
ultra:
requests:
cpu: 500m
memory: 500Mi
limits:
memory: 1Gi
edgeConnect:
organization: "edp-developer-framework"
region: "EU"
edgeConnectUrl: "https://hub.apps.edge.platform.mg3.mdb.osc.live"
cloudlet:
name: "Munich"
organization: "TelekomOP"
# Credentials and Secrets
credentials:
# giteaToken: "" # Required: Your Gitea access token
admin:
username: admin
# password: "changeme-generate-strong-password"
email: "admin@example.com"
edgeConnect:
username: "<insert username>" # Required
password: "<insert password>" # Required
gitea:
url: "https://garm-provider-test.t09.de" # Required
initJob:
image: edp.buildth.ing/devfw-cicd/garm-test
waitForGarm:
image: appropriate/curl
timeoutSeconds: 60
retryInterval: 6
maxRetries: 10