Open saurabhwani5 opened 4 months ago
RHEL Worker node :
[root@worker3 ~]# sestatus
SELinux status: enabled
SELinuxfs mount: /sys/fs/selinux
SELinux root directory: /etc/selinux
Loaded policy name: targeted
Current mode: permissive
Mode from config file: permissive
Policy MLS status: enabled
Policy deny_unknown status: allowed
Memory protection checking: actual (secure)
Max kernel policy version: 33
This is the expected behaviour when selinux is enabled on the cluster as the snapshot path is Read-Only. This is mentioned in the design doc and it is working as expected. This would be documented in KC as well.
Describe the bug
When I am trying to create shallow copy pod from shallow copy volume then it is giving CreateContainerError on ocp with rhel setup with error as :
How to Reproduce?
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: scale-advance-pvc-1 spec: accessModes:
apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: ibm-spectrum-scale-csi-advance provisioner: spectrumscale.csi.ibm.com parameters: volBackendFs: "fs1" version: "2" reclaimPolicy: Delete [root@ocp1-helper Upgradetesting]# oc get pvc -w NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE scale-advance-pvc-1 Pending ibm-spectrum-scale-csi-advance 46s scale-advance-pvc-1 Pending pvc-ec50c949-9c14-405b-b92d-fe70cdb13210 0 ibm-spectrum-scale-csi-advance 65s scale-advance-pvc-1 Bound pvc-ec50c949-9c14-405b-b92d-fe70cdb13210 1Gi RWX ibm-spectrum-scale-csi-advance 65s [root@ocp1-helper Upgradetesting]# oc get pods -w NAME READY STATUS RESTARTS AGE csi-scale-fsetdemo-pod-2 1/1 Running 0 88s
[root@ocp1-helper Upgradetesting]# cat snapshot.yaml apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshot metadata: name: ibm-spectrum-scale-snapshot spec: volumeSnapshotClassName: ibm-spectrum-scale-snapshotclass-advance source: persistentVolumeClaimName: scale-advance-pvc-1
apiVersion: snapshot.storage.k8s.io/v1 kind: VolumeSnapshotClass metadata: name: ibm-spectrum-scale-snapshotclass-advance driver: spectrumscale.csi.ibm.com parameters: snapWindow: "30" #Optional : Time in minutes (default=30) deletionPolicy: Delete [root@ocp1-helper Upgradetesting]# oc apply -f snapshot.yaml volumesnapshot.snapshot.storage.k8s.io/ibm-spectrum-scale-snapshot created volumesnapshotclass.snapshot.storage.k8s.io/ibm-spectrum-scale-snapshotclass-advance unchanged [root@ocp1-helper Upgradetesting]# oc get vs NAME READYTOUSE SOURCEPVC SOURCESNAPSHOTCONTENT RESTORESIZE SNAPSHOTCLASS SNAPSHOTCONTENT CREATIONTIME AGE ibm-spectrum-scale-snapshot true scale-advance-pvc-1 1Gi ibm-spectrum-scale-snapshotclass-advance snapcontent-e6474bc9-24b9-4060-856c-0624cd13ef86 26s 55s
[root@ocp1-helper Upgradetesting]# cat shallowcopy.yaml apiVersion: v1 kind: Pod metadata: name: csi-scale-fsetdemo-pod-snapshot labels: app: nginx spec: containers:
name: mypvc persistentVolumeClaim: claimName: ibm-spectrum-scale-pvc-from-snapshot readOnly: false
apiVersion: v1 kind: PersistentVolumeClaim metadata: name: ibm-spectrum-scale-pvc-from-snapshot spec: accessModes:
[root@ocp1-helper Upgradetesting]# oc describe pod csi-scale-fsetdemo-pod-snapshot Name: csi-scale-fsetdemo-pod-snapshot Namespace: ibm-spectrum-scale-csi-driver Priority: 0 Service Account: default Node: worker3.ocp1.vmlocal/172.16.1.23 Start Time: Wed, 03 Apr 2024 08:23:58 +0200 Labels: app=nginx Annotations: k8s.ovn.org/pod-networks: {"default":{"ip_addresses":["10.130.2.156/23"],"mac_address":"0a:58:0a:82:02:9c","gateway_ips":["10.130.2.1"],"ip_address":"10.130.2.156/2... k8s.v1.cni.cncf.io/network-status: [{ "name": "ovn-kubernetes", "interface": "eth0", "ips": [ "10.130.2.156" ], "mac": "0a:58:0a:82:02:9c", "default": true, "dns": {} }] openshift.io/scc: privileged Status: Pending IP: 10.130.2.156 IPs: IP: 10.130.2.156 Containers: web-server: Container ID: Image: docker-na-public.artifactory.swg-devops.com/sys-spectrum-scale-team-test-environment-docker-local/nginx:1.22.0 Image ID: Port: 80/TCP Host Port: 0/TCP State: Waiting Reason: CreateContainerError Ready: False Restart Count: 0 Environment:
Mounts:
/usr/share/nginx/html/scale from mypvc (rw)
/var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-nh2tc (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
mypvc:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: ibm-spectrum-scale-pvc-from-snapshot
ReadOnly: false
kube-api-access-nh2tc:
Type: Projected (a volume that contains injected data from multiple sources)
TokenExpirationSeconds: 3607
ConfigMapName: kube-root-ca.crt
ConfigMapOptional:
DownwardAPI: true
ConfigMapName: openshift-service-ca.crt
ConfigMapOptional:
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
Normal SuccessfulAttachVolume 59s attachdetach-controller AttachVolume.Attach succeeded for volume "pvc-0400590b-c4f7-4896-ab5f-c9cda7db7589" Normal AddedInterface 58s multus Add eth0 [10.130.2.156/23] from ovn-kubernetes Normal Pulled 14s (x6 over 58s) kubelet Container image "docker-na-public.artifactory.swg-devops.com/sys-spectrum-scale-team-test-environment-docker-local/nginx:1.22.0" already present on machine Warning Failed 14s (x6 over 58s) kubelet Error: relabel failed /var/lib/kubelet/pods/245585b6-8d3f-42e5-a13f-5fd1a005d3a0/volumes/kubernetes.io~csi/pvc-0400590b-c4f7-4896-ab5f-c9cda7db7589/mount: lsetxattr /var/lib/kubelet/pods/245585b6-8d3f-42e5-a13f-5fd1a005d3a0/volumes/kubernetes.io~csi/pvc-0400590b-c4f7-4896-ab5f-c9cda7db7589/mount: operation not permitted [root@ocp1-helper Upgradetesting]#
Expected behavior
Shallow copy pod should be in running state on ocp with rhel worker node setup
Data Collection and Debugging
CSI Snap