Automated upload for forgejo-test.t09.de

This commit is contained in:
Automated pipeline 2025-08-13 08:26:49 +00:00 committed by Actions pipeline
parent ec5a7b43c9
commit 3bb4d1cb60
5 changed files with 77 additions and 44 deletions

View file

@ -5,8 +5,16 @@ metadata:
namespace: gitea
spec:
schedule: "0 1 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
startingDeadlineSeconds: 600 # 10 minutes
jobTemplate:
spec:
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
activeDeadlineSeconds: 1350
backoffLimit: 2
ttlSecondsAfterFinished: 259200 #
template:
spec:
containers:
@ -40,7 +48,7 @@ spec:
- /bin/sh
- -c
- |
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
restartPolicy: OnFailure
volumes:
- name: rclone-config
@ -55,7 +63,11 @@ kind: PersistentVolumeClaim
metadata:
name: s3-backup
namespace: gitea
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
spec:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:

View file

@ -6,7 +6,12 @@ metadata:
dashboards: "grafana"
spec:
persistentVolumeClaim:
metadata:
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
spec:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:

View file

@ -11,8 +11,19 @@ spec:
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
for: 30s
labels:
severity: major
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
- name: forgejo-backup
rules:
- alert: forgejo s3 backup job failed
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
for: 30s
labels:
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'

View file

@ -9,7 +9,9 @@ spec:
storageMetadata:
annotations:
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
everest.io/disk-volume-type: SATA
storage:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:

View file

@ -289,7 +289,9 @@ vmsingle:
storageMetadata:
annotations:
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
everest.io/disk-volume-type: SATA
storage:
storageClassName: csi-disk
accessModes:
- ReadWriteOnce
resources:
@ -540,12 +542,13 @@ alertmanager:
# -- (object) Alertmanager configuration
config:
route:
receiver: "outlook"
receiver: "blackhole"
routes:
- matchers:
- alertname=~".*"
- severity=~"critical|major"
receiver: outlook
receivers:
- name: blackhole
- name: outlook
email_configs:
- smarthost: 'mail.mms-support.de:465'
@ -801,7 +804,7 @@ grafana:
enabled: false
# all values for grafana helm chart can be specified here
persistence:
enabled: true
enabled: false
type: pvc
storageClassName: "default"
grafana.ini:
@ -1017,7 +1020,7 @@ kubeApiServer:
# Component scraping the kube controller manager
kubeControllerManager:
# -- Enable kube controller manager metrics scraping
enabled: true
enabled: false
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: []
@ -1150,7 +1153,7 @@ kubeEtcd:
# Component scraping kube scheduler
kubeScheduler:
# -- Enable KubeScheduler metrics scraping
enabled: true
enabled: false
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: []