文章目录
emptyDir
        
          
            
            
              
              复制代码
              
            
          
          重启文件还有,但是如果杀了进程,则会丢失文件
创建pod
# kubectl apply --f redis.yaml
校验pod是否处于运行,并观察pod的改变
# kubectl get pod redis ---watch
在其它终端上执行如下命令进入容器
# kubectl exec --it redis -- /bin/bash
在shell中,进入/data/redis,然后创建文件
# cd /data/redis/
# echo Hello > test-file
运行以下命令查找redis的进程
# apt-get update
# apt-get install procps
# ps aux
杀掉redis进程,并观察redis pod的改变
# kill <pid>
再次进入到redis的容器,查看文件是否存在。
# kubectl exec --it redis -- /bin/bash
[root@k8s-01 chapter07]# cat redis.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: redis
spec:
  containers:
  - name: redis
    image: redis
    volumeMounts:
    - name: redis-storage
      mountPath: /data/redis
  volumes:
  - name: redis-storage
    emptyDir: {}
         
      hostpath
        
          
            
            
              
              复制代码
              
            
          
          只要在一个node 里面,就会找到文件
[root@k8s-01 chapter07]# cat hostpath.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: test-pod
spec:
  containers:
  - image: nginx
    name: test-container
    volumeMounts:
    - name: test-volume
      mountPath: /usr/share/nginx
  volumes:
  - name: test-volume
    hostPath:
      path: /data
         
      pv和pvc介绍
        
          
            
            
              
              复制代码
              
            
          
          Pv: 是集群中的一段存储,由管理员提供或使用存储类动态提供。
Pvc(PersistentVolumeClaim)是用户对存储资源的请求。
         
      
nfs作为静态pv案例
        
          
            
            
              
              复制代码
              
            
          
          新增nfs
[root@k8s-01 data]# vim /etc/exports
[root@k8s-01 data]# exportfs -rv
exporting 192.168.100.0/24:/data/nfs5
exporting 192.168.100.0/24:/data/nfs4
exporting 192.168.100.0/24:/data/nfs3
exporting 192.168.100.0/24:/data/nfs2
exporting 192.168.100.0/24:/data/nfs1
[root@k8s-01 data]# cat /etc/exports
/data/nfs1 192.168.100.0/24(rw,async,insecure,no_root_squash)
/data/nfs2 192.168.100.0/24(rw,async,insecure,no_root_squash)
/data/nfs3 192.168.100.0/24(rw,async,insecure,no_root_squash)
/data/nfs4 192.168.100.0/24(rw,async,insecure,no_root_squash)
/data/nfs5 192.168.100.0/24(rw,async,insecure,no_root_squash)
[root@k8s-01 data]# yum install -y nfs-utils rpcbind
客户端安装 
yum install -y utils
创建pv并查看Pv
# showmount --e 192.168.20.88 
# kubectl create --f nfs-pv.yaml
# kubectl get pv
创建pvc
# kubectl create --f nfs-pvc.yaml
使用以下命令查看pv和pvc是否绑定
kubectl get pvc
创建pod使用先前创建的pvc
# kubectl create --f nginx-pvc.yaml
# kubectl get pod nginx-vol-pvc --o yaml
[root@k8s-01 chapter07]# cat nfs-pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv001
  labels:
    name: pv001
spec:
  nfs:
    path: /data/nfs1
    server: 192.168.20.111
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv002
  labels:
    name: pv002
spec:
  nfs:
    path: /data/nfs2
    server: 192.168.20.111
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv003
  labels:
    name: pv003
spec:
  nfs:
    path: /data/nfs3
    server: 192.168.20.111
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 1Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv004
  labels:
    name: pv004
spec:
  nfs:
    path: /data/nfs4
    server: 192.168.20.111
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 2Gi
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: pv005
  labels:
    name: pv005
spec:
  nfs:
    path: /data/nfs5
    server: 192.168.20.111
  accessModes: ["ReadWriteMany","ReadWriteOnce"]
  capacity:
    storage: 2Gi[root@k8s-01 chapter07]# 
[root@k8s-01 chapter07]# 
[root@k8s-01 chapter07]# cat nfs-pvc.yml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mypvc
  namespace: default
spec:
  accessModes: ["ReadWriteMany"]
  resources:
    requests:
      storage: 2Gi
[root@k8s-01 chapter07]# cat nginx-pvc.yml 
apiVersion: v1
kind: Pod
metadata:
  name: nginx-vol-pvc
  namespace: default
spec:
  containers:
  - name: mywww
    image: nginx
    volumeMounts:
    - name: www
      mountPath: /usr/share/nginx/html
  volumes:
  - name: www
    persistentVolumeClaim:
      claimName: mypvc
[root@k8s-01 chapter07]# kubectl get pv
NAME    CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM           STORAGECLASS   REASON   AGE
pv001   1Gi        RWO,RWX        Retain           Available                                           2m46s
pv002   1Gi        RWO,RWX        Retain           Available                                           2m46s
pv003   1Gi        RWO,RWX        Retain           Available                                           2m46s
pv004   2Gi        RWO,RWX        Retain           Bound       default/mypvc                           2m46s
pv005   2Gi        RWO,RWX        Retain           Available                                           2m46s
[root@k8s-01 chapter07]# kubectl get pvc
NAME    STATUS   VOLUME   CAPACITY   ACCESS MODES   STORAGECLASS   AGE
mypvc   Bound    pv004    2Gi        RWO,RWX                       2m8s
进去查看
[root@k8s-01 chapter07]# kubectl exec -it nginx-vol-pvc -- bash
root@nginx-vol-pvc:/# df -h   
Filesystem                 Size  Used Avail Use% Mounted on
overlay                     50G   11G   40G  21% /
tmpfs                       64M     0   64M   0% /dev
tmpfs                      1.9G     0  1.9G   0% /sys/fs/cgroup
/dev/sda2                   50G   11G   40G  21% /etc/hosts
shm                         64M     0   64M   0% /dev/shm
192.168.100.30:/data/nfs4   50G  8.9G   41G  18% /usr/share/nginx/html
tmpfs                      1.9G   12K  1.9G   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                      1.9G     0  1.9G   0% /proc/acpi
tmpfs                      1.9G     0  1.9G   0% /proc/scsi
tmpfs                      1.9G     0  1.9G   0% /sys/firmware
         
      nfs作为动态pv案例
        
          
            
            
              
              复制代码
              
            
          
          安装部署存储
创建服务帐户
# kubectl create --f serviceaccount.yaml
创建集群角色并与服务帐户绑定
# kubectl create --f clusterrole.yaml
# kubectl create --f clusterrolebinding.yaml
创建角色,并与服务帐户绑定
# kubectl create --f role.yaml
# kubectl create --f rolebinding.yaml
创建动态存储类
# kubectl create --f class.yaml
部署
# kubectl create --f deployment.yaml]
注意:以上也可以直接执行 kubectl apply --f ./nfs-de
创建pv,pod及查看pvc是否通过类创建了pv并且绑定
# kubectl create --f test-claim.yaml
# kubectl create --f test-pod.yaml
# kubectl get pvc --n aishangwei
文件内容
[root@k8s-01 nfs-de]# ll
total 28
-rw-r--r--. 1 root root 247 Aug 22 10:32 class.yaml
-rw-r--r--. 1 root root 306 Aug 22 10:32 clusterrolebinding.yaml
-rw-r--r--. 1 root root 525 Aug 22 10:32 clusterrole.yaml
-rw-r--r--. 1 root root 901 Aug 24 13:58 deployment.yaml
-rw-r--r--. 1 root root 311 Aug 22 10:32 rolebinding.yaml
-rw-r--r--. 1 root root 228 Aug 22 10:32 role.yaml
-rw-r--r--. 1 root root  76 Aug 22 10:32 serviceaccount.yaml
[root@k8s-01 nfs-de]# cat class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain
parameters:
  archiveOnDelete: "false"[root@k8s-01 nfs-de]# cat clusterrolebinding.yaml 
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io[root@k8s-01 nfs-de]# cat clusterrole.yaml 
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"][root@k8s-01 nfs-de]# cat deployment.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 192.168.100.30
            - name: NFS_PATH
              value: /data/nfs1
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.100.30
            path: /data/nfs1[root@k8s-01 nfs-de]# ls
class.yaml  clusterrolebinding.yaml  clusterrole.yaml  deployment.yaml  rolebinding.yaml  role.yaml  serviceaccount.yaml
[root@k8s-01 nfs-de]# ll
total 28
-rw-r--r--. 1 root root 247 Aug 22 10:32 class.yaml
-rw-r--r--. 1 root root 306 Aug 22 10:32 clusterrolebinding.yaml
-rw-r--r--. 1 root root 525 Aug 22 10:32 clusterrole.yaml
-rw-r--r--. 1 root root 901 Aug 24 13:58 deployment.yaml
-rw-r--r--. 1 root root 311 Aug 22 10:32 rolebinding.yaml
-rw-r--r--. 1 root root 228 Aug 22 10:32 role.yaml
-rw-r--r--. 1 root root  76 Aug 22 10:32 serviceaccount.yaml
[root@k8s-01 nfs-de]# cat rolebinding.yaml 
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: default
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io[root@k8s-01 nfs-de]# cat role.yaml 
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"][root@k8s-01 nfs-de]# cat serviceaccount.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner[root@k8s-01 nfs-de]# 
[root@k8s-01 chapter07]# cat test-claim.yaml 
apiVersion: v1
kind: Namespace
metadata:
  name: aishangwei
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: test-claim
  namespace: aishangwei
  annotations:
    volume.beta.kubernetes.io/storage-class: "managed-nfs-storage"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 10Mi
[root@k8s-01 chapter07]# cat test-pod.yaml 
kind: Pod
apiVersion: v1
metadata:
  name: test-pod
  namespace: aishangwei
spec:
  containers:
  - name: test-pod
    image: busybox
    command:
      - "/bin/sh"
    args:
      - "-c"
      - "touch /mnt/aishangwei-SUCCESS && exit 0 || exit 1"
    volumeMounts:
      - name: nfs-pvc
        mountPath: "/mnt"
  restartPolicy: "Never"
  volumes:
    - name: nfs-pvc
      persistentVolumeClaim:
        claimName: test-claim
         
      
使用本地文件夹作为pv
        
          
            
            
              
              复制代码
              
            
          
          创建文件夹,并创建文件
# mkdir /mnt/data
# echo 'Hello from Kubernetes storage' > /mnt/data/index.html
执行如下命令创建pv,并查看创建的pv信息
# kubectl create --f pv-volume.yaml
# kubectl get pv task-pv-volume
创建pvc并校验pv和pvc的信息
# kubectl create --f pvc-claim.yaml
# kubectl get pv task-pv-volume
# kubectl get pvc task-pv-claim
创建pod,并引用使用的pvc
# kubectl create --f pv-pod.yaml
# kubectl get pod taks-pv-pod
[root@k8s-01 chapter07]# cat pv-volume.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: task-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"[root@k8s-01 chapter07]# cat pv-claim.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: task-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 3Gi[root@k8s-01 chapter07]# cat pv-
pv-claim.yaml   pv-pod.yaml     pv-volume.yaml  
[root@k8s-01 chapter07]# cat pv-
pv-claim.yaml   pv-pod.yaml     pv-volume.yaml  
[root@k8s-01 chapter07]# cat pv-pod.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: task-pv-pod
spec:
  volumes:
    - name: task-pv-storage
      persistentVolumeClaim:
        claimName: task-pv-claim
  containers:
    - name: task-pv-container
      image: nginx
      ports:
        - containerPort: 80
          name: "http-server"
      volumeMounts:
        - mountPath: "/usr/share/nginx/html"
          name: task-pv-storage
         
      改变默认存储类及回收策略
        
          
            
            
              
              复制代码
              
            
          
          查看存储类
# kubectl get storageclass
2. 将存储类设置为非默认的
# kubectl patch storageclass <your-class-name> -p '{"metadata":{"annotations":{"storageclass.Kubernetes.io/is-default-class":"false"}}}'
3. 标记存储类为默认的
# kubectl patch storageclass <your-class-name> -p '{"metadata":{"annotations":{"storageclass.Kubernetes.io/is-default-class":"true"}}}'
列出持久卷
# kubectl get pv
选择一个持久卷来改变它的回收策略
# kubectl patch pv <your-pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
查看设置是否正确
# kubectl get pv
         
      参考文档
        
          
            
            
              
              复制代码
              
            
          
          https://edu.csdn.net/course/detail/27762?spm=1003.2449.3001.8295.3