Merge branch 'main' of ssh://edp.buildth.ing/DevFW-CICD/stacks-instances
This commit is contained in:
commit
2b0c062a2a
15 changed files with 175 additions and 186 deletions
|
|
@ -5,57 +5,69 @@ metadata:
|
|||
namespace: gitea
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
concurrencyPolicy: "Forbid"
|
||||
successfulJobsHistoryLimit: 5
|
||||
failedJobsHistoryLimit: 5
|
||||
startingDeadlineSeconds: 600 # 10 minutes
|
||||
jobTemplate:
|
||||
spec:
|
||||
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
|
||||
activeDeadlineSeconds: 1350
|
||||
backoffLimit: 2
|
||||
ttlSecondsAfterFinished: 259200 #
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: rclone
|
||||
image: rclone/rclone:1.70
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: SOURCE_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: bucket-name
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: access-key
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: secret-key
|
||||
volumeMounts:
|
||||
- name: rclone-config
|
||||
mountPath: /config/rclone
|
||||
readOnly: true
|
||||
- name: backup-dir
|
||||
mountPath: /backup
|
||||
readOnly: false
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum
|
||||
- name: rclone
|
||||
image: rclone/rclone:1.70
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: SOURCE_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: bucket-name
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: access-key
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: secret-key
|
||||
volumeMounts:
|
||||
- name: rclone-config
|
||||
mountPath: /config/rclone
|
||||
readOnly: true
|
||||
- name: backup-dir
|
||||
mountPath: /backup
|
||||
readOnly: false
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
|
||||
restartPolicy: OnFailure
|
||||
volumes:
|
||||
- name: rclone-config
|
||||
secret:
|
||||
secretName: forgejo-s3-backup
|
||||
- name: backup-dir
|
||||
persistentVolumeClaim:
|
||||
claimName: s3-backup
|
||||
- name: rclone-config
|
||||
secret:
|
||||
secretName: forgejo-s3-backup
|
||||
- name: backup-dir
|
||||
persistentVolumeClaim:
|
||||
claimName: s3-backup
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: s3-backup
|
||||
namespace: gitea
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -17,8 +17,10 @@ postgresql-ha:
|
|||
persistence:
|
||||
enabled: true
|
||||
size: 200Gi
|
||||
storageClass: csi-disk
|
||||
annotations:
|
||||
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
|
||||
everest.io/disk-volume-type: GPSSD
|
||||
|
||||
test:
|
||||
enabled: false
|
||||
|
|
|
|||
|
|
@ -6,7 +6,12 @@ metadata:
|
|||
dashboards: "grafana"
|
||||
spec:
|
||||
persistentVolumeClaim:
|
||||
metadata:
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -11,8 +11,19 @@ spec:
|
|||
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
|
||||
for: 30s
|
||||
labels:
|
||||
severity: major
|
||||
severity: critical
|
||||
job: "{{ $labels.job }}"
|
||||
annotations:
|
||||
value: "{{ $value }}"
|
||||
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
|
||||
- name: forgejo-backup
|
||||
rules:
|
||||
- alert: forgejo s3 backup job failed
|
||||
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
|
||||
for: 30s
|
||||
labels:
|
||||
severity: critical
|
||||
job: "{{ $labels.job }}"
|
||||
annotations:
|
||||
value: "{{ $value }}"
|
||||
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'
|
||||
|
|
|
|||
|
|
@ -9,7 +9,9 @@ spec:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
|
||||
everest.io/disk-volume-type: SATA
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
@ -21,4 +23,4 @@ spec:
|
|||
cpu: 500m
|
||||
limits:
|
||||
memory: 10Gi
|
||||
cpu: 2
|
||||
cpu: 2
|
||||
|
|
|
|||
|
|
@ -289,7 +289,9 @@ vmsingle:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: 7032bf53-33aa-4bfa-bca2-052df19f6225
|
||||
everest.io/disk-volume-type: SATA
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
@ -536,108 +538,30 @@ alertmanager:
|
|||
# If you're migrating existing config, please make sure that `.Values.alertmanager.config`:
|
||||
# - with `useManagedConfig: false` has structure described [here](https://prometheus.io/docs/alerting/latest/configuration/).
|
||||
# - with `useManagedConfig: true` has structure described [here](https://docs.victoriametrics.com/operator/api/#vmalertmanagerconfig).
|
||||
useManagedConfig: false
|
||||
useManagedConfig: true
|
||||
# -- (object) Alertmanager configuration
|
||||
config:
|
||||
route:
|
||||
receiver: "blackhole"
|
||||
# group_by: ["alertgroup", "job"]
|
||||
# group_wait: 30s
|
||||
# group_interval: 5m
|
||||
# repeat_interval: 12h
|
||||
# routes:
|
||||
#
|
||||
# # Duplicate code_owner routes to teams
|
||||
# # These will send alerts to team channels but continue
|
||||
# # processing through the rest of the tree to handled by on-call
|
||||
# - matchers:
|
||||
# - code_owner_channel!=""
|
||||
# - severity=~"info|warning|critical"
|
||||
# group_by: ["code_owner_channel", "alertgroup", "job"]
|
||||
# receiver: slack-code-owners
|
||||
#
|
||||
# # Standard on-call routes
|
||||
# - matchers:
|
||||
# - severity=~"info|warning|critical"
|
||||
# receiver: slack-monitoring
|
||||
# continue: true
|
||||
#
|
||||
# inhibit_rules:
|
||||
# - target_matchers:
|
||||
# - severity=~"warning|info"
|
||||
# source_matchers:
|
||||
# - severity=critical
|
||||
# equal:
|
||||
# - cluster
|
||||
# - namespace
|
||||
# - alertname
|
||||
# - target_matchers:
|
||||
# - severity=info
|
||||
# source_matchers:
|
||||
# - severity=warning
|
||||
# equal:
|
||||
# - cluster
|
||||
# - namespace
|
||||
# - alertname
|
||||
# - target_matchers:
|
||||
# - severity=info
|
||||
# source_matchers:
|
||||
# - alertname=InfoInhibitor
|
||||
# equal:
|
||||
# - cluster
|
||||
# - namespace
|
||||
|
||||
routes:
|
||||
- matchers:
|
||||
- severity=~"critical|major"
|
||||
receiver: outlook
|
||||
receivers:
|
||||
- name: blackhole
|
||||
# - name: "slack-monitoring"
|
||||
# slack_configs:
|
||||
# - channel: "#channel"
|
||||
# send_resolved: true
|
||||
# title: '{{ template "slack.monzo.title" . }}'
|
||||
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}'
|
||||
# color: '{{ template "slack.monzo.color" . }}'
|
||||
# text: '{{ template "slack.monzo.text" . }}'
|
||||
# actions:
|
||||
# - type: button
|
||||
# text: "Runbook :green_book:"
|
||||
# url: "{{ (index .Alerts 0).Annotations.runbook_url }}"
|
||||
# - type: button
|
||||
# text: "Query :mag:"
|
||||
# url: "{{ (index .Alerts 0).GeneratorURL }}"
|
||||
# - type: button
|
||||
# text: "Dashboard :grafana:"
|
||||
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
|
||||
# - type: button
|
||||
# text: "Silence :no_bell:"
|
||||
# url: '{{ template "__alert_silence_link" . }}'
|
||||
# - type: button
|
||||
# text: '{{ template "slack.monzo.link_button_text" . }}'
|
||||
# url: "{{ .CommonAnnotations.link_url }}"
|
||||
# - name: slack-code-owners
|
||||
# slack_configs:
|
||||
# - channel: "#{{ .CommonLabels.code_owner_channel }}"
|
||||
# send_resolved: true
|
||||
# title: '{{ template "slack.monzo.title" . }}'
|
||||
# icon_emoji: '{{ template "slack.monzo.icon_emoji" . }}'
|
||||
# color: '{{ template "slack.monzo.color" . }}'
|
||||
# text: '{{ template "slack.monzo.text" . }}'
|
||||
# actions:
|
||||
# - type: button
|
||||
# text: "Runbook :green_book:"
|
||||
# url: "{{ (index .Alerts 0).Annotations.runbook }}"
|
||||
# - type: button
|
||||
# text: "Query :mag:"
|
||||
# url: "{{ (index .Alerts 0).GeneratorURL }}"
|
||||
# - type: button
|
||||
# text: "Dashboard :grafana:"
|
||||
# url: "{{ (index .Alerts 0).Annotations.dashboard }}"
|
||||
# - type: button
|
||||
# text: "Silence :no_bell:"
|
||||
# url: '{{ template "__alert_silence_link" . }}'
|
||||
# - type: button
|
||||
# text: '{{ template "slack.monzo.link_button_text" . }}'
|
||||
# url: "{{ .CommonAnnotations.link_url }}"
|
||||
#
|
||||
- name: outlook
|
||||
email_configs:
|
||||
- smarthost: 'mail.mms-support.de:465'
|
||||
auth_username: 'ipcei-cis-devfw@mms-support.de'
|
||||
auth_password:
|
||||
name: email-user-credentials
|
||||
key: connection-string
|
||||
from: '"IPCEI CIS DevFW" <ipcei-cis-devfw@mms-support.de>'
|
||||
to: 'f9f9953a.mg.telekom.de@de.teams.ms'
|
||||
headers:
|
||||
subject: 'Grafana Mail Alerts'
|
||||
require_tls: false
|
||||
|
||||
# -- Better alert templates for [slack source](https://gist.github.com/milesbxf/e2744fc90e9c41b47aa47925f8ff6512)
|
||||
monzoTemplate:
|
||||
enabled: true
|
||||
|
|
@ -880,7 +804,7 @@ grafana:
|
|||
enabled: false
|
||||
# all values for grafana helm chart can be specified here
|
||||
persistence:
|
||||
enabled: true
|
||||
enabled: false
|
||||
type: pvc
|
||||
storageClassName: "default"
|
||||
grafana.ini:
|
||||
|
|
@ -1096,7 +1020,7 @@ kubeApiServer:
|
|||
# Component scraping the kube controller manager
|
||||
kubeControllerManager:
|
||||
# -- Enable kube controller manager metrics scraping
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
|
||||
endpoints: []
|
||||
|
|
@ -1229,7 +1153,7 @@ kubeEtcd:
|
|||
# Component scraping kube scheduler
|
||||
kubeScheduler:
|
||||
# -- Enable KubeScheduler metrics scraping
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
|
||||
endpoints: []
|
||||
|
|
|
|||
|
|
@ -5,57 +5,69 @@ metadata:
|
|||
namespace: gitea
|
||||
spec:
|
||||
schedule: "0 1 * * *"
|
||||
concurrencyPolicy: "Forbid"
|
||||
successfulJobsHistoryLimit: 5
|
||||
failedJobsHistoryLimit: 5
|
||||
startingDeadlineSeconds: 600 # 10 minutes
|
||||
jobTemplate:
|
||||
spec:
|
||||
# 60 min until backup - 10 min start - (backoffLimit * activeDeadlineSeconds) - some time sync buffer
|
||||
activeDeadlineSeconds: 1350
|
||||
backoffLimit: 2
|
||||
ttlSecondsAfterFinished: 259200 #
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: rclone
|
||||
image: rclone/rclone:1.70
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: SOURCE_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: bucket-name
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: access-key
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: secret-key
|
||||
volumeMounts:
|
||||
- name: rclone-config
|
||||
mountPath: /config/rclone
|
||||
readOnly: true
|
||||
- name: backup-dir
|
||||
mountPath: /backup
|
||||
readOnly: false
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
rclone sync source:/${SOURCE_BUCKET}/packages /backup -v --ignore-checksum
|
||||
- name: rclone
|
||||
image: rclone/rclone:1.70
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: SOURCE_BUCKET
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: bucket-name
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: access-key
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: forgejo-cloud-credentials
|
||||
key: secret-key
|
||||
volumeMounts:
|
||||
- name: rclone-config
|
||||
mountPath: /config/rclone
|
||||
readOnly: true
|
||||
- name: backup-dir
|
||||
mountPath: /backup
|
||||
readOnly: false
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
rclone sync source:/${SOURCE_BUCKET} /backup -v --ignore-checksum
|
||||
restartPolicy: OnFailure
|
||||
volumes:
|
||||
- name: rclone-config
|
||||
secret:
|
||||
secretName: forgejo-s3-backup
|
||||
- name: backup-dir
|
||||
persistentVolumeClaim:
|
||||
claimName: s3-backup
|
||||
- name: rclone-config
|
||||
secret:
|
||||
secretName: forgejo-s3-backup
|
||||
- name: backup-dir
|
||||
persistentVolumeClaim:
|
||||
claimName: s3-backup
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: s3-backup
|
||||
namespace: gitea
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -6,7 +6,12 @@ metadata:
|
|||
dashboards: "grafana"
|
||||
spec:
|
||||
persistentVolumeClaim:
|
||||
metadata:
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
|
|||
|
|
@ -11,8 +11,19 @@ spec:
|
|||
expr: sum by(cluster_environment) (up{pod=~"forgejo-server-.*"}) < 1
|
||||
for: 30s
|
||||
labels:
|
||||
severity: major
|
||||
severity: critical
|
||||
job: "{{ $labels.job }}"
|
||||
annotations:
|
||||
value: "{{ $value }}"
|
||||
description: 'forgejo is down in cluster environment {{ $labels.cluster_environment }}'
|
||||
- name: forgejo-backup
|
||||
rules:
|
||||
- alert: forgejo s3 backup job failed
|
||||
expr: max by(cluster_environment) (kube_job_status_failed{job_name=~"forgejo-s3-backup-.*"}) != 0
|
||||
for: 30s
|
||||
labels:
|
||||
severity: critical
|
||||
job: "{{ $labels.job }}"
|
||||
annotations:
|
||||
value: "{{ $value }}"
|
||||
description: 'forgejo s3 backup job failed in cluster environment {{ $labels.cluster_environment }}'
|
||||
|
|
|
|||
|
|
@ -9,7 +9,9 @@ spec:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
|
||||
everest.io/disk-volume-type: SATA
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
@ -21,4 +23,4 @@ spec:
|
|||
cpu: 500m
|
||||
limits:
|
||||
memory: 10Gi
|
||||
cpu: 2
|
||||
cpu: 2
|
||||
|
|
|
|||
|
|
@ -289,7 +289,9 @@ vmsingle:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: b0e0a24d-d5a6-4a16-b745-8af2ed8bf46d
|
||||
everest.io/disk-volume-type: SATA
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
|
|
@ -540,12 +542,13 @@ alertmanager:
|
|||
# -- (object) Alertmanager configuration
|
||||
config:
|
||||
route:
|
||||
receiver: "outlook"
|
||||
receiver: "blackhole"
|
||||
routes:
|
||||
- matchers:
|
||||
- alertname=~".*"
|
||||
- severity=~"critical|major"
|
||||
receiver: outlook
|
||||
receivers:
|
||||
- name: blackhole
|
||||
- name: outlook
|
||||
email_configs:
|
||||
- smarthost: 'mail.mms-support.de:465'
|
||||
|
|
@ -801,7 +804,7 @@ grafana:
|
|||
enabled: false
|
||||
# all values for grafana helm chart can be specified here
|
||||
persistence:
|
||||
enabled: true
|
||||
enabled: false
|
||||
type: pvc
|
||||
storageClassName: "default"
|
||||
grafana.ini:
|
||||
|
|
@ -1017,7 +1020,7 @@ kubeApiServer:
|
|||
# Component scraping the kube controller manager
|
||||
kubeControllerManager:
|
||||
# -- Enable kube controller manager metrics scraping
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
# -- If your kube controller manager is not deployed as a pod, specify IPs it can be found on
|
||||
endpoints: []
|
||||
|
|
@ -1150,7 +1153,7 @@ kubeEtcd:
|
|||
# Component scraping kube scheduler
|
||||
kubeScheduler:
|
||||
# -- Enable KubeScheduler metrics scraping
|
||||
enabled: true
|
||||
enabled: false
|
||||
|
||||
# -- If your kube scheduler is not deployed as a pod, specify IPs it can be found on
|
||||
endpoints: []
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ metadata:
|
|||
name: s3-backup
|
||||
namespace: gitea
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/disk-volume-type: GPSSD
|
||||
everest.io/crypt-key-id: b6a1d001-da76-48d7-a9ea-079512888d33
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ spec:
|
|||
persistentVolumeClaim:
|
||||
metadata:
|
||||
annotations:
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/disk-volume-type: GPSSD
|
||||
everest.io/crypt-key-id: b6a1d001-da76-48d7-a9ea-079512888d33
|
||||
spec:
|
||||
storageClassName: csi-disk
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ spec:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: b6a1d001-da76-48d7-a9ea-079512888d33
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/disk-volume-type: GPSSD
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
|
|
|
|||
|
|
@ -289,7 +289,7 @@ vmsingle:
|
|||
storageMetadata:
|
||||
annotations:
|
||||
everest.io/crypt-key-id: b6a1d001-da76-48d7-a9ea-079512888d33
|
||||
everest.io/disk-volume-type: SATA
|
||||
everest.io/disk-volume-type: GPSSD
|
||||
storage:
|
||||
storageClassName: csi-disk
|
||||
accessModes:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue