Automated upload for edp.buildth.ing

This commit is contained in:
Automated pipeline 2026-01-29 09:07:02 +00:00 committed by Martin McCaffery
parent 4d1eb663be
commit 27c03406bf
Signed by: martin.mccaffery
GPG key ID: 7C4D0F375BCEE533
19 changed files with 461 additions and 66 deletions

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coder-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "otc/edp.buildth.ing/stacks/coder"
repoURL: "https://observability.buildth.ing/DevFW-CICD/stacks-instances"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,24 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: garm-reg
namespace: argocd
labels:
env: dev
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
destination:
name: in-cluster
namespace: argocd
source:
path: "otc/edp.buildth.ing/stacks/garm"
repoURL: "https://observability.buildth.ing/DevFW-CICD/stacks-instances"
targetRevision: HEAD
project: default
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true

View file

@ -0,0 +1,32 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: coder
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: coder
sources:
- repoURL: https://helm.coder.com/v2
chart: coder
targetRevision: 2.28.3
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/coder/coder/values.yaml
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
path: "otc/edp.buildth.ing/stacks/coder/coder/manifests"

View file

@ -0,0 +1,38 @@
---
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: coder-db
namespace: coder
spec:
instances: 1
primaryUpdateStrategy: unsupervised
resources:
requests:
memory: "1Gi"
cpu: "1"
limits:
memory: "1Gi"
cpu: "1"
managed:
roles:
- name: coder
createdb: true
login: true
passwordSecret:
name: coder-db-user
storage:
size: 10Gi
storageClass: csi-disk
---
apiVersion: postgresql.cnpg.io/v1
kind: Database
metadata:
name: coder
namespace: coder
spec:
cluster:
name: coder-db
name: coder
owner: coder
---

View file

@ -0,0 +1,61 @@
coder:
# You can specify any environment variables you'd like to pass to Coder
# here. Coder consumes environment variables listed in
# `coder server --help`, and these environment variables are also passed
# to the workspace provisioner (so you can consume them in your Terraform
# templates for auth keys etc.).
#
# Please keep in mind that you should not set `CODER_HTTP_ADDRESS`,
# `CODER_TLS_ENABLE`, `CODER_TLS_CERT_FILE` or `CODER_TLS_KEY_FILE` as
# they are already set by the Helm chart and will cause conflicts.
env:
- name: CODER_ACCESS_URL
value: https://coder.edp.buildth.ing
- name: CODER_PG_CONNECTION_URL
valueFrom:
secretKeyRef:
# You'll need to create a secret called coder-db-url with your
# Postgres connection URL like:
# postgres://coder:password@postgres:5432/coder?sslmode=disable
name: coder-db-user
key: url
# For production deployments, we recommend configuring your own GitHub
# OAuth2 provider and disabling the default one.
- name: CODER_OAUTH2_GITHUB_DEFAULT_PROVIDER_ENABLE
value: "false"
- name: EDGE_CONNECT_ENDPOINT
valueFrom:
secretKeyRef:
name: edge-credential
key: endpoint
- name: EDGE_CONNECT_USERNAME
valueFrom:
secretKeyRef:
name: edge-credential
key: username
- name: EDGE_CONNECT_PASSWORD
valueFrom:
secretKeyRef:
name: edge-credential
key: password
# (Optional) For production deployments the access URL should be set.
# If you're just trying Coder, access the dashboard via the service IP.
# - name: CODER_ACCESS_URL
# value: "https://coder.example.com"
#tls:
# secretNames:
# - my-tls-secret-name
service:
type: ClusterIP
ingress:
enable: true
className: nginx
host: coder.edp.buildth.ing
annotations:
cert-manager.io/cluster-issuer: main
tls:
enable: true
secretName: coder-tls-secret

View file

@ -23,7 +23,7 @@ spec:
# TODO: RIRE Can be updated when https://github.com/argoproj/argo-cd/issues/20790 is fixed and merged
# As logout make problems, it is suggested to switch from path based routing to an own argocd domain,
# similar to the CNOE amazon reference implementation and in our case, Forgejo
targetRevision: argo-cd-7.8.28
targetRevision: argo-cd-9.1.5
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/core/argocd/values.yaml
@ -32,4 +32,4 @@ spec:
ref: values
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
path: "otc/edp.buildth.ing/stacks/core/argocd/manifests"
path: "otc/edp.buildth.ing/stacks/core/argocd/manifests"

View file

@ -5,6 +5,18 @@ configs:
params:
server.insecure: true
cm:
# This code never quite worked, always led to 503 errors
# In theory it allows access to ArgoCD via OIDC through Forgejo
# oidc.config: |
# name: FORGEJO
# issuer: https://dex.edp.buildth.ing
# clientID: controller-argocd-dex
# clientSecret: $dex-argo-client:clientSecret
# requestedScopes:
# - openid
# - profile
# - email
# - groups
application.resourceTrackingMethod: annotation
timeout.reconciliation: 60s
resource.exclusions: |
@ -18,10 +30,9 @@ configs:
- CiliumIdentity
clusters:
- "*"
accounts.provider-argocd: apiKey
url: https://argocd.edp.buildth.ing
rbac:
policy.csv: 'g, provider-argocd, role:admin'
policy.csv: 'g, DevFW, role:admin'
tls:
certificates:

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cloudnative-pg
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: cloudnative-pg
sources:
- repoURL: https://cloudnative-pg.github.io/charts
chart: cloudnative-pg
targetRevision: 0.26.1
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/core/cloudnative-pg/values.yaml
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -1,29 +1,31 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dex
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: dex
sources:
- repoURL: https://charts.dexidp.io
chart: dex
targetRevision: 0.23.0
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/core/dex/values.yaml
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values
# NOTE Dex is disabled as it never quite worked, and was taking up pods which caused us to hit node capacity.
# apiVersion: argoproj.io/v1alpha1
# kind: Application
# metadata:
# name: dex
# namespace: argocd
# labels:
# env: dev
# spec:
# project: default
# syncPolicy:
# automated:
# selfHeal: true
# syncOptions:
# - CreateNamespace=true
# retry:
# limit: -1
# destination:
# name: in-cluster
# namespace: dex
# sources:
# - repoURL: https://charts.dexidp.io
# chart: dex
# targetRevision: 0.23.0
# helm:
# valueFiles:
# - $values/otc/edp.buildth.ing/stacks/core/dex/values.yaml
# - repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
# targetRevision: HEAD
# ref: values

View file

@ -67,7 +67,7 @@ config:
- id: controller-argocd-dex
name: ArgoCD Client
redirectURIs:
- "http://argocd.edp.buildth.ing/auth/callback"
- "https://argocd.edp.buildth.ing/auth/callback"
secretEnv: "OIDC_DEX_ARGO_CLIENT_SECRET"
- id: grafana
redirectURIs:

View file

@ -1,4 +1,4 @@
# This is only used for deploying older versions of infra-catalogue where the bucket name is not an output of the terragrunt modules# We use recreate to make sure only one instance with one version is running, because Forgejo might break or data gets inconsistant.
# This is only used for deploying older versions of infra-catalogue where the bucket name is not an output of the terragrunt modules# We use recreate to make sure only one instance with one version is running, because Forgejo might break or data gets inconsistant.
strategy:
type: Recreate
@ -166,7 +166,7 @@ service:
nodePort: 32222
externalTrafficPolicy: Cluster
annotations:
kubernetes.io/elb.id: 4a8b3649-08a8-4da4-8d3d-5aed3781cf94
kubernetes.io/elb.id: 4a8b3649-08a8-4da4-8d3d-5aed3781cf94
image:
pullPolicy: "IfNotPresent"

View file

@ -0,0 +1,29 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: garm
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: garm
sources:
- repoURL: https://edp.buildth.ing/DevFW-CICD/garm-helm
path: charts/garm
targetRevision: v0.0.4
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/garm/garm/values.yaml
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,23 @@
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
nginx.ingress.kubernetes.io/backend-protocol: HTTP
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
hosts:
- host: garm.edp.buildth.ing
paths:
- path: /
pathType: Prefix
tls:
- secretName: garm-net-tls
hosts:
- garm.edp.buildth.ing
# Credentials and Secrets
credentials:
edgeConnect:
existingSecretName: "edge-credential"
gitea:
url: "https://edp.buildth.ing" # Required

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: simple-user-secret
namespace: observability
type: Opaque
stringData:
username: simple-user
password: simple-password

View file

@ -296,7 +296,8 @@ vmsingle:
# -- Enable deployment of ingress for server component
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
# -- Ingress extra labels
@ -346,7 +347,8 @@ vmcluster:
resources:
requests:
storage: 10Gi
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: 1500Mi
@ -363,7 +365,8 @@ vmcluster:
resources:
requests:
storage: 2Gi
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: "1000Mi"
@ -376,7 +379,8 @@ vmcluster:
port: "8480"
replicaCount: 2
extraArgs: {}
resources: {}
resources:
{}
# limits:
# cpu: "1"
# memory: 1000Mi
@ -469,7 +473,8 @@ vmcluster:
enabled: false
# -- Ingress annotations
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
@ -635,7 +640,8 @@ alertmanager:
enabled: true
# -- (object) Extra alert templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -649,7 +655,8 @@ alertmanager:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -692,7 +699,8 @@ vmalert:
externalLabels: {}
# -- (object) Extra VMAlert annotation templates
templateFiles: {}
templateFiles:
{}
# template_1.tmpl: |-
# {{ define "hello" -}}
# hello, Victoria!
@ -715,7 +723,8 @@ vmalert:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -782,7 +791,7 @@ vmagent:
port: "8429"
selectAllByDefault: true
scrapeInterval: 20s
externalLabels:
externalLabels:
cluster_environment: "edp"
# For multi-cluster setups it is useful to use "cluster" label to identify the metrics source.
# For example:
@ -799,7 +808,8 @@ vmagent:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -858,7 +868,7 @@ defaultDatasources:
implementation: prometheus
# -- Configure additional grafana datasources (passed through tpl).
# Check [here](http://docs.grafana.org/administration/provisioning/#datasources) for details
extra:
extra:
- name: victoria-logs
access: proxy
type: VictoriaLogs
@ -902,7 +912,8 @@ grafana:
# See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress
# ingressClassName: nginx
# Values can be templated
annotations: {}
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
labels: {}
@ -936,7 +947,7 @@ grafana:
matchLabels:
app.kubernetes.io/name: '{{ include "grafana.name" .Subcharts.grafana }}'
endpoints:
- port: "{{ .Values.grafana.service.portName }}"
- port: '{{ .Values.grafana.service.portName }}'
# -- prometheus-node-exporter dependency chart configuration. For possible values check [here](https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus-node-exporter/values.yaml)
prometheus-node-exporter:
@ -1067,7 +1078,7 @@ kubeApiServer:
# Component scraping the kube controller manager
kubeControllerManager:
# -- Enable kube controller manager metrics scraping
enabled: false
enabled: true
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1200,7 +1211,7 @@ kubeEtcd:
# Component scraping kube scheduler
kubeScheduler:
# -- Enable KubeScheduler metrics scraping
enabled: false
enabled: true
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1274,3 +1285,4 @@ kubeProxy:
# -- Add extra objects dynamically to this chart
extraObjects: []

View file

@ -5,11 +5,13 @@ metadata:
namespace: observability
spec:
username: simple-user
password: simple-password
passwordRef:
key: password
name: simple-user-secret
targetRefs:
- static:
url: http://vmsingle-o12y:8429
paths: ["/api/v1/write"]
- static:
url: http://vlogs-victorialogs:9428
paths: ["/insert/elasticsearch/.*"]
paths: ["/insert/elasticsearch/.*"]

View file

@ -201,13 +201,13 @@ defaultRules:
create: true
rules: {}
kubernetesSystemControllerManager:
create: true
create: false
rules: {}
kubeScheduler:
create: true
create: false
rules: {}
kubernetesSystemScheduler:
create: true
create: false
rules: {}
kubeStateMetrics:
create: true

View file

@ -0,0 +1,30 @@
# helm upgrade --install --create-namespace --namespace terralist terralist oci://ghcr.io/terralist/helm-charts/terralist -f terralist-values.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: terralist
namespace: argocd
labels:
env: dev
spec:
project: default
syncPolicy:
automated:
selfHeal: true
syncOptions:
- CreateNamespace=true
retry:
limit: -1
destination:
name: in-cluster
namespace: terralist
sources:
- repoURL: https://github.com/terralist/helm-charts
path: charts/terralist
targetRevision: terralist-0.8.1
helm:
valueFiles:
- $values/otc/edp.buildth.ing/stacks/terralist/terralist/values.yaml
- repoURL: https://observability.buildth.ing/DevFW-CICD/stacks-instances
targetRevision: HEAD
ref: values

View file

@ -0,0 +1,87 @@
controllers:
main:
strategy: Recreate
containers:
app:
env:
- name: TERRALIST_OAUTH_PROVIDER
value: oidc
- name: TERRALIST_OI_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-credentials
key: client-id
- name: TERRALIST_OI_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-credentials
key: client-secret
- name: TERRALIST_OI_AUTHORIZE_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: authorize-url
- name: TERRALIST_OI_TOKEN_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: token-url
- name: TERRALIST_OI_USERINFO_URL
valueFrom:
secretKeyRef:
name: oidc-credentials
key: userinfo-url
- name: TERRALIST_OI_SCOPE
valueFrom:
secretKeyRef:
name: oidc-credentials
key: scope
- name: TERRALIST_TOKEN_SIGNING_SECRET
valueFrom:
secretKeyRef:
name: terralist-secret
key: token-signing-secret
- name: TERRALIST_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: terralist-secret
key: cookie-secret
- name: TERRALIST_URL
value: https://terralist.edp.buildth.ing
- name: TERRALIST_SQLITE_PATH
value: /data/db.sqlite
- name: TERRALIST_LOCAL_STORE
value: /data/modules
- name: TERRALIST_PROVIDERS_ANONYMOUS_READ
value: "true"
ingress:
main:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: main
hosts:
- host: terralist.edp.buildth.ing
paths:
- path: /
pathType: Prefix
service:
identifier: main
port: http
tls:
- hosts:
- terralist.edp.buildth.ing
secretName: terralist-tls-secret
persistence:
data:
enabled: true
accessMode: ReadWriteOnce
size: 10Gi
retain: false
storageClass: "csi-disk"
annotations:
everest.io/disk-volume-type: GPSSD
globalMounts:
- path: /data