一、PV和PVC
1.PersistentVolume (PV)
PersistentVolume (PV) 是外部存储系统中的⼀块存储空间,由管理员创建和维护。与 Volume⼀样, PV 具有持久性,⽣命周期独⽴于 Pod;
2.PersistentVolumeClaim (PVC)
PersistentVolumeClaim (PVC) 是对 PV 的申请 (Claim) 。 PVC 通常由普通⽤户创建和维护。需要为 Pod 分配存储资源时,⽤户可以创建⼀个 PVC ,指明存储资源的容量⼤⼩和访问模式(⽐如只读)等信息,Kubernetes 会查找并提供满⾜条件的 PV;
二、通过NFS实现持久化存储
1.配置nfs
nfs--server | nfs-client |
---|---|
k8s-master | k8s-node1、k8s-node2 |
1)安装nfs服务------所有节点
[root@k8s-master ~] # yum install -y nfs-common nfs-utils
2)创建共享目录并授权------在nfs-server操作
[root@k8s-master ~]# mkdir /nfsdata
[root@k8s-master ~]# chmod 666 /nfsdata
3)编辑exports文件------在nfs-server操作
[root@k8s-master ~] # vim /etc/exports
[root@k8s-master ~] # cat /etc/exports
/nfsdata *(rw,no_root_squash,no_all_squash,sync)
4)启动rpc和nfs------在nfs-server操作
[root@k8s-master ~] # systemctl start rpcbind
[root@k8s-master ~] # systemctl start nfs
5)测试NFS挂载是否可用
在nfs-client操作
[root@k8s-node2 ~] # mkdir /test
[root@k8s-node2 ~] # mount -t nfs 192.168.22.139:/nfsdata /test/ #nfs-server的IP
[root@k8s-node2 ~] # df -Th|grep "/test"
192.168.22.139: /nfsdata nfs4 19G 9 .9G 9 .0G 53 % /test
[root@k8s-node2 ~] # touch /test/ip.txt
[root@k8s-node2 ~] # ls /test/
ip.txt
在nfs-server操作
[root@k8s-master ~] # ls /nfsdata/
ip.txt
[root@k8s-node2 ~] # umount /test #测试完成之后,就可以卸载了
2.创建PV
1)编写yaml配置文件
[root@k8s-master ~]# vim nfs-pv1.yaml
[root@k8s-master ~]# cat nfs-pv1.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: mypv1
spec:
capacity: #指定PV的容量
storage: 1Gi
accessModes: #指定访问模式
- ReadWriteOnce #指PV能以read-write模式mount到单个节点
persistentVolumeReclaimPolicy: Recycle #指定当前PV的回收策略为Recycle,清除PV中的数据
storageClassName: nfs #指定PV的class为nfs;相当于为PV设置了一个分类
nfs:
path: /nfsdata
server: 192.168.22.139 #指定nfs目录所在的机器的地址
PS:
1)accessModes 指定访问模式为 ReadWriteOnce ,⽀持的访问模式有:
ReadWriteOnce -- PV 能以 read-write 模式 mount 到单个节点。
ReadOnlyMany -- PV 能以 read-only 模式 mount 到多个节点。
ReadWriteMany -- PV 能以 read-write 模式 mount 到多个节点
2)persistentVolumeReclaimPolicy 指定当前 PV 的回收策略, ⽀持的策略有:
Retain -- 需要管理员⼿⼯回收。
Recycle -- 清除 PV 中的数据,效果相当于执⾏ rm -rf /nfsdata/* 。
Delete -- 删除 Storage Provider 上的对应存储资源,例如 AWS EBS、GCE PD、Azure Disk、OpenStack Cinder Volume 等
2)应用并创建mypv1
[root@k8s-master ~]# kubectl apply -f nfs-pv1.yaml
persistentvolume/mypv1 created
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mypv1 1Gi RWO Recycle Available nfs 8s
# STATUS为Available,表示mypv1准备就绪,可以被PVC申请
3.创建PVC
1)编写yaml文件
[root@k8s-master ~]# vim nfs-pvc1.yaml
[root@k8s-master ~]# cat nfs-pvc1.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mypvc1
spec:
accessModes: #指定访问模式
- ReadWriteOnce
resources:
requests:
storage: 1Gi #指定访问PV的容量
storageClassName: nfs #指定访问PV的class
2)应用并查看
[root@k8s-master ~]# kubectl apply -f nfs-pvc1.yaml
persistentvolumeclaim/mypvc1 created
[root@k8s-master ~]# kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
mypvc1 Bound mypv1 1Gi RWO nfs 6s
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mypv1 1Gi RWO Recycle Bound default/mypvc1 nfs 12m
#从查询pv和pvc的结果来看,mypvc1已经Bound到mypv1,申请成功
4.创建pod
1)编写pod的yaml文件并应用
[root@k8s-master ~]# vim pod1.yaml
[root@k8s-master ~]# cat pod1.yaml
apiVersion: v1
kind: Pod
metadata:
name: nfs-pod-nginx
labels:
app: nginx
spec:
containers:
- name: mypod1
image: daocloud.io/library/nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: mydata
volumes:
- name: mydata
persistentVolumeClaim:
claimName: mypvc1
[root@k8s-master ~]# kubectl apply -f pod1.yaml
pod/nfs-pod-nginx created
5.验证
[root@k8s-master ~]# kubectl exec -it nfs-pod-nginx /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@nfs-pod-nginx:/# ls /usr/share/nginx/html/
ip.txt #上述/nfsdata中的文件
root@nfs-pod-nginx:/# echo "hello!" > /usr/share/nginx/html/index.html
root@nfs-pod-nginx:/# exit
exit
command terminated with exit code 130
[root@k8s-master ~]# ls /nfsdata/ #也可在nfs的共享⽬录中查看到,说明卷共享成功
index.html ip.txt
[root@k8s-master ~]# cat /nfsdata/index.html
hello!
三、PV的回收
1.Retain回收策略
2.删除pod、pvc、pv
[root@k8s-master ~]# kubectl delete pod nfs-pod-nginx
pod "nfs-pod-nginx" deleted
[root@k8s-master ~]# kubectl delete pvc mypvc1
persistentvolumeclaim "mypvc1" deleted
[root@k8s-master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
mypv1 1Gi RWO Retain Released default/mypvc1 nfs 98m
# 虽然 mypv1 中的数据得到了保留,但其 PV 状态会⼀直处于 Released ,不能被其他PVC申请;
# 为了重新使⽤存储资源,可以删除并重新创建mypv1;删除操作只是删除了PV对象,存储空间中的数据并不会被删除
[root@k8s-master ~]# kubectl delete pv mypv1
persistentvolume "mypv1" deleted
[root@k8s-master ~]# ls /nfsdata/index.html
/nfsdata/index.html
[root@k8s-master ~]# cat /nfsdata/index.html
hello!
四、PV&PVC应用在MySQL的持久化存储
1.创建pv和pvc
1)创建pv,mysql-pv.yaml文件
[root@k8s-master mysqlpv]# vim mysql-pv.yaml
[root@k8s-master mysqlpv]# cat mysql-pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysql-pv
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: nfs
nfs:
path: /nfsdata/mysql-pv #记得创建这个目录
server: 192.168.22.139
[root@k8s-master mysqlpv]# kubectl apply -f mysql-pv.yaml
persistentvolume/mysql-pv created
2)创建PVC,mysql-pvc.yaml文件
[root@k8s-master mysqlpv]# vim mysql-pvc.yaml
[root@k8s-master mysqlpv]# cat mysql-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: nfs
[root@k8s-master mysqlpv]# kubectl apply -f mysql-pvc.yaml
persistentvolumeclaim/mysql-pvc created
2.部署MySQL
编写mysql-pod.yaml文件
[root@k8s-master mysqlpv]# vim mysql-pod.yaml
[root@k8s-master mysqlpv]# cat mysql-pod.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql
spec:
ports:
- port: 3306
targetPort: 3306
selector:
app: mysql
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: mysql
spec:
selector:
matchLabels:
app: mysql
template:
metadata:
labels:
app: mysql
spec:
containers:
- image: daocloud.io/library/mysql:5.7.5-m15
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: qinxue@123
ports:
- containerPort: 3306
name: mysql
volumeMounts:
- name: mysql-persistent-storage
mountPath: /var/lib/mysql
volumes:
- name: mysql-persistent-storage
persistentVolumeClaim:
claimName: mysql-pvc
[root@k8s-master mysqlpv]# kubectl apply -f mysql-pod.yaml
service/mysql created
deployment.apps/mysql created
3.向MySQL添加数据
1)查看MySQL部署在node2节点
[root@k8s-master mysqlpv]# kubectl get pod -o wide | grep mysql
mysql 1/1 Running 6 (3h27m ago) 13d 10.244.1.45 k8s-node1 <none> <none>
mysql-55c4f546d-4nkt9 1/1 Running 0 43s 10.244.2.52 k8s-node2 <none> <none>
2)进入容器并登录mysql
[root@k8s-master mysqlpv]# kubectl exec -it mysql-bd87b4f8f-l6tdx /bin/bash
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.
root@mysql-bd87b4f8f-l6tdx:/# mysql -uroot -p'qinxue@123'
3)添加数据
mysql> create database db1
Query OK, 1 row affected (0.00 sec)
4.验证数据一致性
1)删除deployment,pvc,pv;然后重新创建pv,pvc,deployment;数据在Mysql中,仍然挂载成功;
五、PV/PVC动态供应项目实战
Dynamic Provisioning机制⼯作的核⼼在于StorageClass的API对象。
StorageClass声明存储插件,⽤于⾃动创建PV
Kubernetes⽀持动态供给的存储插件:
https://kubernetes.io/docs/concepts/storage/storage-classes/
因为NFS不⽀持动态存储,所以我们需要借⽤这个存储插件。
nfs动态相关部署可以参考:
https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client/deploy
1.定义一个storage
[root@k8s-master pv-pvc]# vim storageclass-nfs.yaml
[root@k8s-master pv-pvc]# cat storageclass-nfs.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs
[root@k8s-master pv-pvc]# kubectl apply -f storageclass-nfs.yaml
storageclass.storage.k8s.io/managed-nfs-storage created
[root@k8s-master pv-pvc]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
managed-nfs-storage fuseim.pri/ifs Delete Immediate false 20s
2.部署授权
1)因为storage⾃动创建pv需要经过kube-apiserver,所以要进⾏授权
2)创建1个serviceaccount;创建1个clusterrole,并赋予应该具有的权限,⽐如对于⼀些基本api资源的增删改查; 创建1个clusterrolebinding,将sa和clusterrole绑定到⼀起;这样sa就有权限了;然后pod中再使⽤这个sa,那么pod再创建的时候,会⽤到sa,sa具有创建pv的权限,便可以⾃动创建pv;
[root@k8s-master pv-pvc]# vim rabc.yaml
[root@k8s-master pv-pvc]# cat rabc.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
[root@k8s-master pv-pvc]# kubectl apply -f rabc.yaml
serviceaccount/nfs-client-provisioner created
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner created
[root@k8s-master pv-pvc]# kubectl get sa
NAME SECRETS AGE
default 0 14d
nfs-client-provisioner 0 14s
[root@k8s-master pv-pvc]# kubectl get cr |grep nfs
error: the server doesn't have a resource type "cr"
[root@k8s-master pv-pvc]# kubectl get clusterrole |grep nfs
nfs-client-provisioner-runner 2024-08-06T04:52:23Z
[root@k8s-master pv-pvc]# kubectl get clusterrolebinding |grep nfs
run-nfs-client-provisioner ClusterRole/nfs-client-provisioner-runner 3m58s
3.部署一个自动创建pv的pod服务
这⾥⾃动创建pv的服务由nfs-client-provisioner 完成
[root@k8s-master pv-pvc]# vim deployment-nfs.yaml
[root@k8s-master pv-pvc]# cat deployment-nfs.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
selector:
matchLabels:
app: nfs-client-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
nodeName: k8s-node2
serviceAccount: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 192.168.22.139
- name: NFS_PATH
value: /opt/container_data
volumes:
- name: nfs-client-root
nfs:
server: 192.168.22.139
path: /opt/container_data
[root@k8s-master pv-pvc]# kubectl apply -f deployment-nfs.yaml
deployment.apps/nfs-client-provisioner created
# nfs-client-provisioner 会以pod运⾏在k8s中
[root@k8s-master pv-pvc]# kubectl get pod |grep nfs
nfs-client-provisioner-6c745f9d9-msrtp 1/1 Running 0 6s
4.部署有状态服务,测试自动创建pv
部署yaml⽂件参考:https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/
这里部署nginx服务
[root@k8s-master pv-pvc]# cat nginx.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx
labels:
app: nginx
spec:
ports:
- port: 80
name: web
clusterIP: None
selector:
app: nginx
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: web
spec:
serviceName: "nginx"
replicas: 2
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: daocloud.io/library/nginx:1.13.0-alpine
ports:
- containerPort: 80
name: web
volumeMounts:
- name: www
mountPath: /usr/share/nginx/html
volumeClaimTemplates:
- metadata:
name: www
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "managed-nfs-storage"
resources:
requests:
storage: 1Gi
[root@k8s-master pv-pvc]# kubectl apply -f nginx.yaml
service/nginx created
statefulset.apps/web created
[root@k8s-master pv-pvc]# kubectl get pod
NAME READY STATUS RESTARTS AGE
configmap-pod 1/1 Running 5 (52m ago) 12d
configmap-test-pod 1/1 Running 5 (52m ago) 12d
mypod 1/1 Running 7 (52m ago) 13d
mysql 1/1 Running 7 (52m ago) 13d
nfs-client-provisioner-6c745f9d9-msrtp 1/1 Running 0 19m
tomcat 1/1 Running 7 (52m ago) 14d
web-0 1/1 Running 0 42s
web-1 1/1 Running 0 16s
# web-0创建成功后才会创建web-1
2)进入容器内在/usr/share/nginx/html目录下创建文件验证;删除一个pod后,数据仍然存在,不会丢失;