架构图
在ceph上创建存储池
sh
[root@node1 ~]# ceph osd pool create kubernetes
pool 'kubernetes' created
新创建的存储池在使用之前需要初始化,使用rbd工具进行初始化
sh
rbd pool init kubernetes
配置ceph-csi
设置客户端认证
为kubernetes和ceph-csi创建一个新用户
sh
[root@node1 ~]# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
[client.kubernetes]
key = AQCslENlL+ohNhAACL/fr6vv7XX4CkkKjq7Mug==
创建ceph-csi使用的configMap
ceph-csi需要一个存储在k8s中的ConfigMap,这个ConfigMap中保存了ceph集群的fsid和monitors的地址
yaml
# csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "9e7b59a6-c3ee-43d4-9baf-60d5bb05484a",
"monitors": [
"192.168.0.100:6789",
"192.168.0.164:6789",
"192.168.0.184:6789",
"192.168.0.101:6789",
"192.168.0.110:6789"
]
}
]
metadata:
name: ceph-csi-config
应用yaml文件
sh
[root@node1 ceph-csi]# kubectl apply -f csi-config-map.yaml
configmap/ceph-csi-config created
创建KMS provider所使用的ConfigMap文件
最新版本的ceph-csi还需要一个额外的ConfigMap对象来定义密钥管理服务(KMS)提供程序的详细信息。如果没有设置KMS,在csi-kms-config-map中放置一个空配置即可
yaml
# csi-kms-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
{}
metadata:
name: ceph-csi-encryption-kms-config
应用yaml文件
sh
[root@node1 ceph-csi]# kubectl apply -f csi-kms-config-map.yaml
configmap/ceph-csi-encryption-kms-config created
创建一个保存Ceph配置的ConfigMap
最新版本的Ceph-CSI还需要另一个ConfigMap对象来定义Ceph配置,以便添加到CSI容器内的ceph.conf文件中
yaml
# ceph-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
ceph.conf: |
[global]
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# keyring is a required key and its value should be empty
keyring: |
metadata:
name: ceph-config
应用yaml文件
sh
[root@node1 ceph-csi]# kubectl apply -f ceph-config-map.yaml
configmap/ceph-config created
创建ceph-csi cephx secret
Ceph-csi需要cephx凭据才能与Ceph集群通信
创建yaml文件
yaml
# csi-rbd-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQCslENlL+ohNhAACL/fr6vv7XX4CkkKjq7Mug==
应用yaml文件
sh
kubectl apply -f csi-rbd-secret.yaml
配置ceph-csi plugins
创建需要的ServiceAccount和RBAC ClusterRole/ClusterRoleBinding
sh
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
创建ceph-csi provisioner 和node plugins
sh
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin.yaml
由于yaml文件里面的镜像是国外的地址源,因此这里改成自己的镜像地址
查看yaml文件里面所使用的镜像
sh
[root@node1 ceph-csi]# grep 'image:' csi-rbdplugin-provisioner.yaml
image: registry.k8s.io/sig-storage/csi-provisioner:v3.6.0
image: registry.k8s.io/sig-storage/csi-snapshotter:v6.3.0
image: registry.k8s.io/sig-storage/csi-attacher:v4.4.0
image: gcr.io/k8s-staging-sig-storage/csi-resizer:v1.9.0
image: quay.io/cephcsi/cephcsi:canary
image: quay.io/cephcsi/cephcsi:canary
image: quay.io/cephcsi/cephcsi:canary
[root@node1 ceph-csi]# grep 'image:' csi-rbdplugin.yaml
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.0
image: quay.io/cephcsi/cephcsi:canary
image: quay.io/cephcsi/cephcsi:canary
将上面文件中的镜像替换成aliyun中保存的镜像
sh
sed -i s#registry.k8s.io/sig-storage/csi-provisioner:v3.6.0#registry.cn-hangzhou.aliyuncs.com/postkarte/csi-provisioner:v3.6.0#g csi-rbdplugin-provisioner.yaml
sed -i s#registry.k8s.io/sig-storage/csi-snapshotter:v6.3.0#registry.cn-hangzhou.aliyuncs.com/postkarte/csi-snapshotter:v6.3.0#g csi-rbdplugin-provisioner.yaml
sed -i s#registry.k8s.io/sig-storage/csi-attacher:v4.4.0#registry.cn-hangzhou.aliyuncs.com/postkarte/csi-attacher:v4.4.0#g csi-rbdplugin-provisioner.yaml
sed -i s#gcr.io/k8s-staging-sig-storage/csi-resizer:v1.9.0#registry.cn-hangzhou.aliyuncs.com/postkarte/csi-resizer:v1.9.0#g csi-rbdplugin-provisioner.yaml
sed -i s#quay.io/cephcsi/cephcsi:canary#registry.cn-hangzhou.aliyuncs.com/postkarte/cephcsi:canary#g csi-rbdplugin-provisioner.yaml
sed -i s#registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.9.0#registry.cn-hangzhou.aliyuncs.com/postkarte/csi-node-driver-registrar:v2.9.0#g csi-rbdplugin.yaml
sed -i s#quay.io/cephcsi/cephcsi:canary#registry.cn-hangzhou.aliyuncs.com/postkarte/cephcsi:canary#g csi-rbdplugin.yaml
应用yaml文件
sh
kubectl apply -f csi-rbdplugin-provisioner.yaml
kubectl apply -f csi-rbdplugin.yaml
创建storageclass
yaml
# csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 9e7b59a6-c3ee-43d4-9baf-60d5bb05484a
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
应用yaml文件
sh
[root@node1 ceph-csi]# kubectl apply -f csi-rbd-sc.yaml
storageclass.storage.k8s.io/csi-rbd-sc created
部署应用测试
这里部署一个gitlab进行测试