User Tools

Site Tools


wikiv3:kube_nfs_dynamic

NFS Dynamic

$ sudo apt install nfs-kernel-server nfs-common
$ MOUNT_POINT=/nfs
$ DISK_DEVICE=/dev/sdb
$ echo -e "n\np\n1\n\n\nw" | sudo fdisk ${DISK_DEVICE}
 
Bem-vindo ao fdisk (util-linux 2.36.1).
As alterações permanecerão apenas na memória, até que você decida gravá-las.
Tenha cuidado antes de usar o comando de gravação.
 
A unidade não contém uma tabela de partição conhecida.
Criado um novo rótulo de disco DOS com o identificador de disco 0x9249d39a.
 
Comando (m para ajuda): Tipo da partição
   p   primária (0 primárias, 0 estendidas, 4 livre)
   e   estendida (recipiente para partições lógicas)
Selecione (padrão p): Número da partição (1-4, padrão 1): Primeiro setor (2048-268435455, padrão 2048): Último setor, +/-setores ou +/-tamanho{K,M,G,T,P} (2048-268435455, padrão 268435455):
Criada uma nova partição 1 do tipo "Linux" e de tamanho 128 GiB.
 
Comando (m para ajuda): A tabela de partição foi alterada.
Chamando ioctl() para reler tabela de partição.
Sincronizando discos.
$ sudo mkfs.ext4 ${DISK_DEVICE}1
mke2fs 1.46.2 (28-Feb-2021)
Creating filesystem with 33554176 4k blocks and 8388608 inodes
Filesystem UUID: b0497037-8889-48d8-b96f-9cad139aa3ce
Superblock backups stored on blocks:
        32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
        4096000, 7962624, 11239424, 20480000, 23887872
 
Allocating group tables: done
Writing inode tables: done
Creating journal (131072 blocks): done
Writing superblocks and filesystem accounting information: done
$ UUID=`sudo blkid -o export ${DISK_DEVICE}1 | grep UUID | grep -v PARTUUID`
$ sudo mkdir ${MOUNT_POINT}
$ sudo cp -p /etc/fstab{,.dist}
$ echo "${UUID}  ${MOUNT_POINT}    ext4    defaults 1 2" | sudo tee -a /etc/fstab
UUID=b0497037-8889-48d8-b96f-9cad139aa3ce  /nfs    ext4    defaults 1 2
$ sudo mount ${MOUNT_POINT}
$ df -hT | grep nfs
/dev/sdb1      ext4      126G   24K  120G   1% /nfs
$ sudo mkdir /nfs/kubedata
$ sudo chown nobody:nogroup /nfs/
$ sudo chown nobody:nogroup /nfs/kubedata
$ sudo chmod 0777 /nfs/kubedata
$ echo '/nfs/kubedata 172.28.128.96/27(rw,sync,no_subtree_check)' | sudo tee /etc/exports
/nfs/kubedata 172.28.128.96/27(rw,sync,no_subtree_check)
$ sudo systemctl restart nfs-kernel-server
$ sudo exportfs -s
/nfs/kubedata  172.28.128.96/27(rw,wdelay,root_squash,no_subtree_check,sec=sys,rw,secure,root_squash,no_all_squash)
$ sudo exportfs -arv
exporting 172.28.128.96/27:/nfs/kubedata

Clientes - Kube nodes

$ apt install nfs-common
$ sudo showmount -e 172.28.128.126
Export list for 172.28.128.126:
/nfs/kubedata 172.28.128.96/27

Control Plane

$ git clone https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner.git
</code bash>
 
<code bash>
$ cd nfs-subdir-external-provisioner/deploy/
$ ls -l
total 24
-rw-r--r-- 1 suporte suporte  246 abr 28 11:48 class.yaml
-rw-r--r-- 1 suporte suporte 1064 abr 28 11:48 deployment.yaml
drwxr-xr-x 2 suporte suporte 4096 abr 28 11:48 objects
-rw-r--r-- 1 suporte suporte 1900 abr 28 11:48 rbac.yaml
-rw-r--r-- 1 suporte suporte  190 abr 28 11:48 test-claim.yaml
-rw-r--r-- 1 suporte suporte  401 abr 28 11:48 test-pod.yaml
$ kubectl create ns nfs-system
namespace/nfs-system created
$ vim rbac.yaml
<ESC>:%s/default/nfs-system/g
$ kubectl apply -f rbac.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner created
$ vim class.yaml
<ESC>:%s/false/true/g
$ kubectl apply -f class.yaml
storageclass.storage.k8s.io/nfs-client created
$ kubectl get sc nfs-client
NAME         PROVISIONER                                   RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
nfs-client   k8s-sigs.io/nfs-subdir-external-provisioner   Delete          Immediate           false                  47s
deployment.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs-system
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: k8s-sigs.io/nfs-subdir-external-provisioner
            - name: NFS_SERVER
              value: 172.28.128.126
            - name: NFS_PATH
              value: /nfs/kubedata
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.28.128.126
            path: /nfs/kubedata
$ kubectl apply -f deployment.yaml
deployment.apps/nfs-client-provisioner created
</code bash>
 
<code bash>
$ kubectl get all -n nfs-system
NAME                                          READY   STATUS    RESTARTS   AGE
pod/nfs-client-provisioner-7d976fc48b-qdh2l   1/1     Running   0          78s
 
NAME                                     READY   UP-TO-DATE   AVAILABLE   AGE
deployment.apps/nfs-client-provisioner   1/1     1            1           78s
 
NAME                                                DESIRED   CURRENT   READY   AGE
replicaset.apps/nfs-client-provisioner-7d976fc48b   1         1         1       78s
$ kubectl describe pod nfs-client-provisioner-7d976fc48b-qdh2l -n nfs-system
Name:         nfs-client-provisioner-7d976fc48b-qdh2l
Namespace:    nfs-system
Priority:     0
Node:         kube-worker-02.juntotelecom.com.br/172.28.128.100
Start Time:   Thu, 28 Apr 2022 12:12:39 -0300
Labels:       app=nfs-client-provisioner
              pod-template-hash=7d976fc48b
Annotations:  cni.projectcalico.org/containerID: 2b9bec73f90a3b61b9b4ee2b4e387d8af1804008f9c5a9effa6d808f58b34532
              cni.projectcalico.org/podIP: 10.244.213.129/32
              cni.projectcalico.org/podIPs: 10.244.213.129/32,fd00::e:1334:c75d:e2cb:7ec1/128
Status:       Running
IP:           10.244.213.129
IPs:
  IP:           10.244.213.129
  IP:           fd00::e:1334:c75d:e2cb:7ec1
Controlled By:  ReplicaSet/nfs-client-provisioner-7d976fc48b
Containers:
  nfs-client-provisioner:
    Container ID:   cri-o://e11a1ece9b09790c8eb6ecd80577d84f171b8530cb6f9254de853bea70ddeada
    Image:          k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2
    Image ID:       k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner@sha256:374f80dde8bbd498b1083348dd076b8d8d9f9b35386a793f102d5deebe593626
    Port:           <none>
    Host Port:      <none>
    State:          Running
      Started:      Thu, 28 Apr 2022 12:12:55 -0300
    Ready:          True
    Restart Count:  0
    Environment:
      PROVISIONER_NAME:  k8s-sigs.io/nfs-subdir-external-provisioner
      NFS_SERVER:        172.28.128.126
      NFS_PATH:          /nfs/kubedata
    Mounts:
      /persistentvolumes from nfs-client-root (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-82tcc (ro)
Conditions:
  Type              Status
  Initialized       True
  Ready             True
  ContainersReady   True
  PodScheduled      True
Volumes:
  nfs-client-root:
    Type:      NFS (an NFS mount that lasts the lifetime of a pod)
    Server:    172.28.128.126
    Path:      /nfs/kubedata
    ReadOnly:  false
  kube-api-access-82tcc:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type    Reason     Age   From               Message
  ----    ------     ----  ----               -------
  Normal  Scheduled  110s  default-scheduler  Successfully assigned nfs-system/nfs-client-provisioner-7d976fc48b-qdh2l to kube-worker-02.juntotelecom.com.br
  Normal  Pulling    108s  kubelet            Pulling image "k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2"
  Normal  Pulled     94s   kubelet            Successfully pulled image "k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2" in 14.553152139s
  Normal  Created    94s   kubelet            Created container nfs-client-provisioner
  Normal  Started    94s   kubelet            Started container nfs-client-provisioner
test-claim.yaml
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
spec:
  storageClassName: nfs-client
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
$ kubectl apply -f test-claim.yaml
persistentvolumeclaim/test-claim created
$ kubectl get pv,pvc
NAME                                                        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS    REASON   AGE
persistentvolume/pv-wiki                                    4Gi        RWO            Retain           Bound    wiki/volume-claim-wiki   local-storage            18h
persistentvolume/pvc-a2a73c3c-6c30-4011-b10e-0a4929635786   1Mi        RWX            Delete           Bound    default/test-claim       nfs-client               17s
 
NAME                               STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
persistentvolumeclaim/test-claim   Bound    pvc-a2a73c3c-6c30-4011-b10e-0a4929635786   1Mi        RWX            nfs-client     17s

No nfs server

$ ls /nfs/kubedata/
default-test-claim-pvc-a2a73c3c-6c30-4011-b10e-0a4929635786
$ cat test-pod.yaml
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
spec:
  containers:
  - name: test-pod
    image: busybox:stable
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim
$ kubectl apply -f test-pod.yaml
pod/test-pod created
$ kubectl get pods -o wide
NAME       READY   STATUS      RESTARTS   AGE    IP               NODE                                 NOMINATED NODE   READINESS GATES
test-pod   0/1     Completed   0          118s   10.244.213.130   kube-worker-02.juntotelecom.com.br   <none>           <none>
$ kubectl delete -f test-pod.yaml -f test-claim.yaml
pod "test-pod" deleted
persistentvolumeclaim "test-claim" deleted
wikiv3/kube_nfs_dynamic.txt · Last modified: by 127.0.0.1