Automated upload for forgejo-test.t09.de

This commit is contained in:
Automated pipeline 2025-08-13 08:26:49 +00:00 committed by Actions pipeline
parent ec5a7b43c9
commit 3bb4d1cb60
5 changed files with 77 additions and 44 deletions

View file

@ -5,57 +5,69 @@ metadata:
namespace: gitea namespace: gitea
spec: spec:
schedule: "0 1 * * *" schedule: "0 1 * * *"
concurrencyPolicy: "Forbid"
successfulJobsHistoryLimit: 5
failedJobsHistoryLimit: 5
startingDeadlineSeconds: 600 # 10 minutes
jobTemplate: jobTemplate:
spec: spec:
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
activeDeadlineSeconds: 1350
backoffLimit: 2
ttlSecondsAfterFinished: 259200 #
template: template:
spec: spec:
containers: containers:
- name: rclone - name: rclone
image: rclone/rclone:1.70 image: rclone/rclone:1.70
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
env: env:
- name: SOURCE_BUCKET - name: SOURCE_BUCKET
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: bucket-name key: bucket-name
- name: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY_ID
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: access-key key: access-key
- name: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_ACCESS_KEY
valueFrom: valueFrom:
secretKeyRef: secretKeyRef:
name: forgejo-cloud-credentials name: forgejo-cloud-credentials
key: secret-key key: secret-key
volumeMounts: volumeMounts:
- name: rclone-config - name: rclone-config
mountPath: /config/rclone mountPath: /config/rclone
readOnly: true readOnly: true
- name: backup-dir - name: backup-dir
mountPath: /backup mountPath: /backup
readOnly: false readOnly: false
command: command:
- /bin/sh - /bin/sh
- -c - -c
- | - |
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
restartPolicy: OnFailure restartPolicy: OnFailure
volumes: volumes:
- name: rclone-config - name: rclone-config
secret: secret:
secretName: forgejo-s3-backup secretName: forgejo-s3-backup
- name: backup-dir - name: backup-dir
persistentVolumeClaim: persistentVolumeClaim:
claimName: s3-backup claimName: s3-backup
--- ---
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim kind: PersistentVolumeClaim
metadata: metadata:
name: s3-backup name: s3-backup
namespace: gitea namespace: gitea
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
spec: spec:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:

View file

@ -6,7 +6,12 @@ metadata:
dashboards: "grafana" dashboards: "grafana"
spec: spec:
persistentVolumeClaim: persistentVolumeClaim:
metadata:
annotations:
everest.io/disk-volume-type: SATA
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
spec: spec:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:

View file

@ -11,8 +11,19 @@ spec:
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1 expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
for: 30s for: 30s
labels: labels:
severity: major severity: critical
job: "{{ $labels.job }}" job: "{{ $labels.job }}"
annotations: annotations:
value: "{{ $value }}" value: "{{ $value }}"
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}' description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
- name: forgejo-backup
rules:
- alert: forgejo s3 backup job failed
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
for: 30s
labels:
severity: critical
job: "{{ $labels.job }}"
annotations:
value: "{{ $value }}"
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'

View file

@ -9,7 +9,9 @@ spec:
storageMetadata: storageMetadata:
annotations: annotations:
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
everest.io/disk-volume-type: SATA
storage: storage:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
@ -21,4 +23,4 @@ spec:
cpu: 500m cpu: 500m
limits: limits:
memory: 10Gi memory: 10Gi
cpu: 2 cpu: 2

View file

@ -289,7 +289,9 @@ vmsingle:
storageMetadata: storageMetadata:
annotations: annotations:
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
everest.io/disk-volume-type: SATA
storage: storage:
storageClassName: csi-disk
accessModes: accessModes:
- ReadWriteOnce - ReadWriteOnce
resources: resources:
@ -540,12 +542,13 @@ alertmanager:
# -- (object) Alertmanager configuration # -- (object) Alertmanager configuration
config: config:
route: route:
receiver: "outlook" receiver: "blackhole"
routes: routes:
- matchers: - matchers:
- alertname=~".*" - severity=~"critical|major"
receiver: outlook receiver: outlook
receivers: receivers:
- name: blackhole
- name: outlook - name: outlook
email_configs: email_configs:
- smarthost: 'mail.mms-support.de:465' - smarthost: 'mail.mms-support.de:465'
@ -801,7 +804,7 @@ grafana:
enabled: false enabled: false
# all values for grafana helm chart can be specified here # all values for grafana helm chart can be specified here
persistence: persistence:
enabled: true enabled: false
type: pvc type: pvc
storageClassName: "default" storageClassName: "default"
grafana.ini: grafana.ini:
@ -1017,7 +1020,7 @@ kubeApiServer:
# Component scraping the kube controller manager # Component scraping the kube controller manager
kubeControllerManager: kubeControllerManager:
# -- Enable kube controller manager metrics scraping # -- Enable kube controller manager metrics scraping
enabled: true enabled: false
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on # -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
endpoints: [] endpoints: []
@ -1150,7 +1153,7 @@ kubeEtcd:
# Component scraping kube scheduler # Component scraping kube scheduler
kubeScheduler: kubeScheduler:
# -- Enable KubeScheduler metrics scraping # -- Enable KubeScheduler metrics scraping
enabled: true enabled: false
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on # -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
endpoints: [] endpoints: []