added test_03

This commit is contained in:
Manuel Ganter 2025-07-22 15:20:15 +02:00
parent 2edc6c0c39
commit 26f85f8a58
No known key found for this signature in database
10 changed files with 315 additions and 3 deletions

View file

@ -2,10 +2,16 @@
State: **SUCCESSFUL** State: **SUCCESSFUL**
Question Answered: Does Snapshotting/Restore with CSI volumes work? Question: Does Snapshotting/Restore with CSI volumes work?
# test_02 # test_02
State: **FAILED** State: **FAILED**
Question Answered: Can i clone a non-csi volume to a csi-volume? Question: Can i clone a non-csi volume to a csi-volume?
# test_03
State: **SUCCESSFUL**
Question: Can i migrate the storage driver from flexvolume to csi w/o destroying the volume on the cloud provider?

View file

@ -2,7 +2,8 @@
"$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.15.0/.schema/devbox.schema.json", "$schema": "https://raw.githubusercontent.com/jetify-com/devbox/0.15.0/.schema/devbox.schema.json",
"packages": [ "packages": [
"k9s@latest", "k9s@latest",
"kubectl@latest" "kubectl@latest",
"jq@latest"
], ],
"shell": { "shell": {
"init_hook": [ "init_hook": [

View file

@ -5,6 +5,122 @@
"last_modified": "2025-07-08T04:39:49Z", "last_modified": "2025-07-08T04:39:49Z",
"resolved": "github:NixOS/nixpkgs/9b008d60392981ad674e04016d25619281550a9d?lastModified=1751949589&narHash=sha256-mgFxAPLWw0Kq%2BC8P3dRrZrOYEQXOtKuYVlo9xvPntt8%3D" "resolved": "github:NixOS/nixpkgs/9b008d60392981ad674e04016d25619281550a9d?lastModified=1751949589&narHash=sha256-mgFxAPLWw0Kq%2BC8P3dRrZrOYEQXOtKuYVlo9xvPntt8%3D"
}, },
"jq@latest": {
"last_modified": "2025-07-20T07:42:04Z",
"resolved": "github:NixOS/nixpkgs/7c688a0875df5a8c28a53fb55ae45e94eae0dddb#jq",
"source": "devbox-search",
"version": "1.8.1",
"systems": {
"aarch64-darwin": {
"outputs": [
{
"name": "bin",
"path": "/nix/store/9g64fqz8v74v5ixgalm8a074jp754423-jq-1.8.1-bin",
"default": true
},
{
"name": "man",
"path": "/nix/store/d6h860ssnqqm5pb18k7cha953j8v41rp-jq-1.8.1-man",
"default": true
},
{
"name": "doc",
"path": "/nix/store/2hc8qfk7rp04hafsr260dri1bd6ywvgc-jq-1.8.1-doc"
},
{
"name": "out",
"path": "/nix/store/5g1fsdjybk63bvnradhy861cv1j0zvbj-jq-1.8.1"
},
{
"name": "dev",
"path": "/nix/store/97sbzpy66rngcyzdx2xnwc58fd4xanz2-jq-1.8.1-dev"
}
],
"store_path": "/nix/store/9g64fqz8v74v5ixgalm8a074jp754423-jq-1.8.1-bin"
},
"aarch64-linux": {
"outputs": [
{
"name": "bin",
"path": "/nix/store/l5rpi6yjzh77xr06zw3npj5b4wh8nd2r-jq-1.8.1-bin",
"default": true
},
{
"name": "man",
"path": "/nix/store/df5cp877l2zda5hzs5aaj57nsb630mqj-jq-1.8.1-man",
"default": true
},
{
"name": "out",
"path": "/nix/store/ywm87lf11xxj4qbc26sm466cy2krazhf-jq-1.8.1"
},
{
"name": "dev",
"path": "/nix/store/lxkdipbc6lnyjfaxc8993p0f3awzaxv7-jq-1.8.1-dev"
},
{
"name": "doc",
"path": "/nix/store/1lln8d94np1gvpa4xd3a7s70hpxcrjja-jq-1.8.1-doc"
}
],
"store_path": "/nix/store/l5rpi6yjzh77xr06zw3npj5b4wh8nd2r-jq-1.8.1-bin"
},
"x86_64-darwin": {
"outputs": [
{
"name": "bin",
"path": "/nix/store/97l9ssj90sn8aaggnqn2ds8jbwxbjkmv-jq-1.8.1-bin",
"default": true
},
{
"name": "man",
"path": "/nix/store/mm9hiakwqhmqrc0ajdwy9pld482a5xh6-jq-1.8.1-man",
"default": true
},
{
"name": "dev",
"path": "/nix/store/cbmrkzdp2rv163jj3m5415609jcg07qa-jq-1.8.1-dev"
},
{
"name": "doc",
"path": "/nix/store/z2cnrn59may5ngxgydjrhnsam4cl2chg-jq-1.8.1-doc"
},
{
"name": "out",
"path": "/nix/store/nvj03nflpfrpmza7cnnlyshvsnpa8hxc-jq-1.8.1"
}
],
"store_path": "/nix/store/97l9ssj90sn8aaggnqn2ds8jbwxbjkmv-jq-1.8.1-bin"
},
"x86_64-linux": {
"outputs": [
{
"name": "bin",
"path": "/nix/store/ddvkwrb70cx04g7a42rlg6ka1j819kv4-jq-1.8.1-bin",
"default": true
},
{
"name": "man",
"path": "/nix/store/mkpcj600czkqcr542ysi8pzssadl4yrc-jq-1.8.1-man",
"default": true
},
{
"name": "out",
"path": "/nix/store/zrv91dsf75ixqjym4syy0f6g87wrziw7-jq-1.8.1"
},
{
"name": "dev",
"path": "/nix/store/x0kva02y0iyh7l0qvnx3l8ci7ll1r5si-jq-1.8.1-dev"
},
{
"name": "doc",
"path": "/nix/store/fc6m4lbv9g4j1hfrx2yip2w83b4pjf42-jq-1.8.1-doc"
}
],
"store_path": "/nix/store/ddvkwrb70cx04g7a42rlg6ka1j819kv4-jq-1.8.1-bin"
}
}
},
"k9s@latest": { "k9s@latest": {
"last_modified": "2025-07-20T07:42:04Z", "last_modified": "2025-07-20T07:42:04Z",
"resolved": "github:NixOS/nixpkgs/7c688a0875df5a8c28a53fb55ae45e94eae0dddb#k9s", "resolved": "github:NixOS/nixpkgs/7c688a0875df5a8c28a53fb55ae45e94eae0dddb#k9s",

54
test_03/01_deploy.yaml Normal file
View file

@ -0,0 +1,54 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: non-csi
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: test-03
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: "/non-csi"
name: non-csi
volumes:
- name: non-csi
persistentVolumeClaim:
claimName: non-csi
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30

View file

@ -0,0 +1,3 @@
Open shell to default/test_03 and `cd /test`.
Then create a file here.

View file

@ -0,0 +1,13 @@
#!/bin/bash
echo "Patching PV reclaim policy to Retain: "
kubectl patch pv $(kubectl get pvc non-csi -ojson | jq -r .spec.volumeName) -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
echo "Volume Handle: "
kubectl get pv $(kubectl get pvc non-csi -ojson | jq -r .spec.volumeName) -o json | jq -r .spec.flexVolume.options.volumeID
echo ""
echo "nodeAffinity: "
kubectl get pv $(kubectl get pvc non-csi -ojson | jq -r .spec.volumeName) -o json | jq -r .spec.nodeAffinity

49
test_03/04_pv.yaml Normal file
View file

@ -0,0 +1,49 @@
apiVersion: v1
kind: PersistentVolume
metadata:
annotations:
pv.kubernetes.io/provisioned-by: everest-csi-provisioner
finalizers:
- external-provisioner.volume.kubernetes.io/finalizer
- kubernetes.io/pv-protection
- everest.io/disk-metadata-protection
- everest-csi-attacher/disk-csi-everest-io
labels:
failure-domain.beta.kubernetes.io/zone: eu-de-03
name: rebound-pv
spec:
accessModes:
- ReadWriteOnce
capacity:
storage: 8Gi
csi:
driver: disk.csi.everest.io
fsType: ext4
volumeAttributes:
everest.io/disk-mode: SCSI
everest.io/disk-volume-type: SAS
everest.io/enterprise-project-id: "0"
storage.kubernetes.io/csiProvisionerIdentity: everest-csi-provisioner
volumeHandle: 9cd53e0a-333f-4f9b-83f9-de81d2a9364b # insert volume handle here
nodeAffinity:
{
"required":
{
"nodeSelectorTerms":
[
{
"matchExpressions":
[
{
"key": "failure-domain.beta.kubernetes.io/zone",
"operator": "In",
"values": ["eu-de-03"],
},
],
},
],
},
}
persistentVolumeReclaimPolicy: Delete
storageClassName: csi-disk
volumeMode: Filesystem

22
test_03/05_pvc.yaml Normal file
View file

@ -0,0 +1,22 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
annotations:
everest.io/set-disk-tags: "true"
pv.kubernetes.io/bind-completed: "yes"
pv.kubernetes.io/bound-by-controller: "yes"
volume.beta.kubernetes.io/storage-provisioner: everest-csi-provisioner
volume.kubernetes.io/storage-provisioner: everest-csi-provisioner
finalizers:
- kubernetes.io/pvc-protection
name: rebound-pvc
namespace: default
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi
storageClassName: csi-disk
volumeMode: Filesystem
volumeName: rebound-pv

42
test_03/06_deploy.yaml Normal file
View file

@ -0,0 +1,42 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: test-03
namespace: default
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: nginx
strategy:
type: Recreate
template:
metadata:
creationTimestamp: null
labels:
app: nginx
spec:
containers:
- image: nginx
imagePullPolicy: Always
name: nginx
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: "/rebound-pvc"
name: rebound-pvc
volumes:
- name: rebound-pvc
persistentVolumeClaim:
claimName: rebound-pvc
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30

View file

@ -0,0 +1,6 @@
#!/bin/bash
PV_NAME=$(kubectl get pvc non-csi -ojson | jq -r .spec.volumeName)
kubectl delete pvc non-csi
kubectl delete pv -f $PV_NAME