Automated upload for edp.buildth.ing

This commit is contained in:
Automated pipeline 2025-08-13 07:58:03 +00:00 committed by Actions pipeline
parent 00a382cc7f
commit 89437b3b6d
6 changed files with 94 additions and 138 deletions

View file

@ -5,57 +5,69 @@ metadata:
namespace: gitea
spec:
schedule: "0 1 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
startingDeadlineSeconds: 600 # 10 minutes
jobTemplate:
spec:
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
activeDeadlineSeconds: 1350
backoffLimit: 2
ttlSecondsAfterFinished: 259200 #
template:
spec:
containers:
- name: rclone
image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent
env:
- name: SOURCE_BUCKET
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: bucket-name
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: secret-key
volumeMounts:
- name: rclone-config
mountPath: /config/rclone
readOnly: true
- name: backup-dir
mountPath: /backup
readOnly: false
command:
- /bin/sh
- -c
- |
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum
- name: rclone
image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent
env:
- name: SOURCE_BUCKET
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: bucket-name
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: access-key
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: forgejo-cloud-credentials
key: secret-key
volumeMounts:
- name: rclone-config
mountPath: /config/rclone
readOnly: true
- name: backup-dir
mountPath: /backup
readOnly: false
command:
- /bin/sh
- -c
- |
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
restartPolicy: OnFailure
volumes:
- name: rclone-config
secret:
secretName: forgejo-s3-backup
- name: backup-dir
persistentVolumeClaim:
claimName: s3-backup
- name: rclone-config
secret:
secretName: forgejo-s3-backup
- name: backup-dir
persistentVolumeClaim:
claimName: s3-backup
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: s3-backup
namespace: gitea
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: { { { .Env.PVC_KMS_KEY_ID } } }
spec:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:

View file

@ -17,8 +17,10 @@ postgresql-ha:
persistence:
enabled: true
size: 200Gi
storageClass: csi-disk
annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: GPSSD
test:
enabled: false

View file

@ -6,7 +6,12 @@ metadata:
dashboards: "grafana"
spec:
persistentVolumeClaim:
metadata:
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
spec:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:

View file

@ -11,8 +11,19 @@ spec:
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
for: 30s
labels:
severity: major
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
- name: forgejo-backup
rules:
- alert: forgejo s3 backup job failed
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
for: 30s
labels:
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'

View file

@ -9,7 +9,9 @@ spec:
storageMetadata:
annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: SATA
storage:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:
@ -21,4 +23,4 @@ spec:
cpu: 500m
limits:
memory: 10Gi
cpu: 2
cpu: 2

View file

@ -289,7 +289,9 @@ vmsingle:
storageMetadata:
annotations:
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
everest.io/disk-volume-type: SATA
storage:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:
@ -536,108 +538,30 @@ alertmanager:
# If you're migrating existing config, please make sure that `.Values.alertmanager.config`:
# - with `useManagedConfig: false` has structure described [here](https://prometheus.io/docs/alerting/latest/configuration/).
# - with `useManagedConfig: true` has structure described [here](https://docs.victoriametrics.com/operator/api/#vmalertmanagerconfig).
useManagedConfig: false
useManagedConfig: true
# -- (object) Alertmanager configuration
config:
route:
receiver: "blackhole"
# group_by: ["alertgroup", "job"]
# group_wait: 30s
# group_interval: 5m
# repeat_interval: 12h
# routes:
#
# # Duplicate code_owner routes to teams
# # These will send alerts to team channels but continue
# # processing through the rest of the tree to handled by on-call
# - matchers:
# - code_owner_channel!=""
# - severity=~"info|warning|critical"
# group_by: ["code_owner_channel", "alertgroup", "job"]
# receiver: slack-code-owners
#
# # Standard on-call routes
# - matchers:
# - severity=~"info|warning|critical"
# receiver: slack-monitoring
# continue: true
#
# inhibit_rules:
# - target_matchers:
# - severity=~"warning|info"
# source_matchers:
# - severity=critical
# equal:
# - cluster
# - namespace
# - alertname
# - target_matchers:
# - severity=info
# source_matchers:
# - severity=warning
# equal:
# - cluster
# - namespace
# - alertname
# - target_matchers:
# - severity=info
# source_matchers:
# - alertname=InfoInhibitor
# equal:
# - cluster
# - namespace
routes:
- matchers:
- severity=~"critical|major"
receiver: outlook
receivers:
- name: blackhole
# - name: "slack-monitoring"
# slack_configs:
# - channel: "#channel"
# send_resolved: true
# title: '{{ template "slack.monzo.title" . }}'
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}'
# color: '{{ template "slack.monzo.color" . }}'
# text: '{{ template "slack.monzo.text" . }}'
# actions:
# - type: button
# text: "Runbook :green_book:"
# url: "{{ (index .Alerts 0).Annotations.runbook_url }}"
# - type: button
# text: "Query :mag:"
# url: "{{ (index .Alerts 0).GeneratorURL }}"
# - type: button
# text: "Dashboard :grafana:"
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
# - type: button
# text: "Silence :no_bell:"
# url: '{{ template "__alert_silence_link" . }}'
# - type: button
# text: '{{ template "slack.monzo.link_button_text" . }}'
# url: "{{ .CommonAnnotations.link_url }}"
# - name: slack-code-owners
# slack_configs:
# - channel: "#{{ .CommonLabels.code_owner_channel }}"
# send_resolved: true
# title: '{{ template "slack.monzo.title" . }}'
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}'
# color: '{{ template "slack.monzo.color" . }}'
# text: '{{ template "slack.monzo.text" . }}'
# actions:
# - type: button
# text: "Runbook :green_book:"
# url: "{{ (index .Alerts 0).Annotations.runbook }}"
# - type: button
# text: "Query :mag:"
# url: "{{ (index .Alerts 0).GeneratorURL }}"
# - type: button
# text: "Dashboard :grafana:"
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
# - type: button
# text: "Silence :no_bell:"
# url: '{{ template "__alert_silence_link" . }}'
# - type: button
# text: '{{ template "slack.monzo.link_button_text" . }}'
# url: "{{ .CommonAnnotations.link_url }}"
#
- name: outlook
email_configs:
- smarthost: 'mail.mms-support.de:465'
auth_username: 'ipcei-cis-devfw@mms-support.de'
auth_password:
name: email-user-credentials
key: connection-string
from: '"IPCEI CIS DevFW" <ipcei-cis-devfw@mms-support.de>'
to: 'f9f9953a.mg.telekom.de@de.teams.ms'
headers:
subject: 'Grafana Mail Alerts'
require_tls: false
# -- Better alert templates for [slack source](https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512)
monzoTemplate:
enabled: true
@ -880,7 +804,7 @@ grafana:
enabled: false
# all values for grafana helm chart can be specified here
persistence:
enabled: true
enabled: false
type: pvc
storageClassName: "default"
grafana.ini:
@ -1096,7 +1020,7 @@ kubeApiServer:
# Component scraping the kube controller manager
kubeControllerManager:
# -- Enable kube controller manager metrics scraping
enabled: true
enabled: false
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1229,7 +1153,7 @@ kubeEtcd:
# Component scraping kube scheduler
kubeScheduler:
# -- Enable KubeScheduler metrics scraping
enabled: true
enabled: false
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: []