openstack对接ceph后端存储
openstack T版对接ceph 14版本做glance、nova、cinder后端存储,openstack集群和ceph集群均搭建完毕
节点 | IP |
---|---|
controller | 192.168.200.10 |
compute | 192.168.200.20 |
storage01 | 192.168.200.30 |
storage02 | 192.168.200.31 |
storage03 | 192.168.200.32 |
创建cinder、glance、nova存储池
powershell
[root@storage01 ceph-cluster]# ceph osd pool create volumes 8
pool 'volumes' created
[root@storage01 ceph-cluster]# ceph osd pool create images 8
pool 'images' created
[root@storage01 ceph-cluster]# ceph osd pool create vms 8
pool 'vms' created
[root@storage01 ceph-cluster]# ceph osd lspools
4 volumes
5 images
6 vms
创建用户以及密钥
powershell
[root@storage01 ceph-cluster]# ceph auth get-or-create client.cinder mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images"
[client.cinder]
key = AQDRE3tkDVGeFxAAPkGPKGlqh74pRl2dzIJXAw==
[root@storage01 ceph-cluster]# ceph auth get-or-create client.glance mon "allow r" osd "allow class-read object_prefix rbd_children,allow rwx pool=images"
[client.glance]
key = AQDaE3tkKTQCIRAAwRQ/VeIjL4G5GjmsJIbTwg==
[root@storage01 ceph-cluster]#
openstack节点创建ceph目录,T版计算节点默认创建了
powershell
[root@controller ~]# mkdir /etc/ceph/
ceph导出密钥glance和cinder,并导入到opesntack
powershell
[root@storage01 ceph-cluster]# ceph auth get client.glance -o ceph.client.glance.keyring
exported keyring for client.glance
[root@storage01 ceph-cluster]# ceph auth get client.cinder -o ceph.client.cinder.keyring
exported keyring for client.cinder
[root@storage01 ceph-cluster]#
powershell
[root@storage01 ceph-cluster]# scp ceph.client.glance.keyring root@192.168.200.10:/etc/ceph/
powershell
[root@storage01 ceph-cluster]# scp ceph.client.glance.keyring root@192.168.200.20:/etc/ceph/
powershell
[root@storage01 ceph-cluster]# scp ceph.client.cinder.keyring root@192.168.200.10:/etc/ceph/
powershell
[root@storage01 ceph-cluster]# scp ceph.client.cinder.keyring root@192.168.200.20:/etc/ceph/
拷贝认证文件到集群
powershell
[root@storage01 ceph-cluster]# scp ceph.conf root@192.168.200.20:/etc/ceph/
powershell
[root@storage01 ceph-cluster]# scp ceph.conf root@192.168.200.10:/etc/ceph/
计算节点添加libvirt密钥
powershell
[root@compute ~]# cd /etc/ceph/
[root@compute ceph]# UUID=$(uuidgen)
powershell
cat >> secret.xml << EOF
<secret ephemeral='no' private='no'>
<uuid>$UUID</uuid>
<usage type='ceph'>
<name>client.cinder secret</name>
</usage>
</secret>
EOF
powershell
[root@compute ceph]# virsh secret-define --file secret.xml
Secret 8ea0cbae-86a9-4c1c-9f03-fd4b144b8839 created
powershell
[root@compute ceph]# cat ceph.client.cinder.keyring
[client.cinder]
key = AQDRE3tkDVGeFxAAPkGPKGlqh74pRl2dzIJXAw==
caps mon = "allow r"
caps osd = "allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images"
#设置secret
[root@compute ceph]# virsh secret-set-value --secret ${UUID} --base64 $(cat ceph.client.cinder.keyring | grep key | awk -F ' ' '{print $3}')
Secret value set
[root@compute ceph]# virsh secret-list
UUID Usage
--------------------------------------------------------------------------------
8ea0cbae-86a9-4c1c-9f03-fd4b144b8839 ceph client.cinder secret
[root@compute ceph]#
安装客户端工具,需计算节点添加ceph的yum源
powershell
[root@controller ~]# yum install -y ceph-common
powershell
[root@compute ~]# yum install -y ceph-common
对接glance后端存储
更改配置文件属性
powershell
[root@controller ~]# chown glance.glance /etc/ceph/ceph.client.glance.keyring
powershell
[root@controller ~]# vi /etc/glance/glance-api.conf
powershell
[glance_store]
#stores = file,http
#default_store = file
#filesystem_store_datadir = /var/lib/glance/images/
stores = rbd
default_store = rbd
show_image_direct_url = True
rbd_store_pool = images
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8
重启服务
powershell
[root@controller ~]# systemctl restart openstack-glance*
测试
powershell
[root@controller ~]# openstack image create --disk-format qcow2 cirros-ceph --file /opt/iaas/images/cirros-0.3.4-x86_64-disk.img
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| checksum | ee1eca47dc88f4879d8a229cc70a07c6 |
| container_format | bare |
| created_at | 2023-06-03T14:36:37Z |
| disk_format | qcow2 |
| file | /v2/images/c015136d-f3e0-4150-abd2-f7fd3fd6dbd7/file |
| id | c015136d-f3e0-4150-abd2-f7fd3fd6dbd7 |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros-ceph |
| owner | fb7a2a10b81f43fcbf4ccef895c56937
|
| properties | os_hash_algo='sha512', os_hash_value='1b03ca1bc3fafe448b90583c12f367949f8b0e665685979d95b004e48574b953316799e23240f4f739d1b5eb4c4ca24d38fdc6f4f9d8247a2bc64db25d6bbdb2', os_hidden='False' |
| protected | False
|
| schema | /v2/schemas/image
|
| size | 13287936
|
| status | active
|
| tags |
|
| updated_at | 2023-06-03T14:36:42Z
|
| virtual_size | None
|
| visibility | shared
|
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
[root@controller ~]#
ceph节点查看
powershell
[root@storage01 ceph]# rbd ls images
c015136d-f3e0-4150-abd2-f7fd3fd6dbd7
与openstack创建镜像id对应一致
对接nova后端存储
修改计算节点配置文件
powershell
[root@compute ceph]# vi /etc/nova/nova.conf
powershell
[DEFAULT]
.....
live_migration_flag = "VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE"
[libvirt]
images_type = rbd
images_rbd_pool = vms
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = cinder
rbd_secret_uuid = 8ea0cbae-86a9-4c1c-9f03-fd4b144b8839
重启服务
powershell
[root@compute ceph]# systemctl restart openstack-nova-compute.service
测试
powershell
[root@controller ~]# openstack server list
+--------------------------------------+------+--------+--------------------+-------------+---------+
| ID | Name | Status | Networks | Image | Flavor |
+--------------------------------------+------+--------+--------------------+-------------+---------+
| d999b371-9e98-4cdb-98b8-407e900c631c | test | ACTIVE | int-net=10.0.0.206 | cirros-ceph | m1.tiny |
+--------------------------------------+------+--------+--------------------+-------------+---------+
powershell
[root@storage01 ceph]# rbd ls vms
d999b371-9e98-4cdb-98b8-407e900c631c_disk
对接cinder后端存储
修改文件属性,集群节点
powershell
[root@controller ~]# chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
powershell
[root@compute ~]# chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
修改配置文件,控制节点
powershell
[root@controller ~]# vi /etc/cinder/cinder.conf
powershell
[DEFAULT]
.....
default_volume_type = ceph
重启服务
powershell
[root@controller ~]# systemctl restart openstack-cinder-api.service openstack-cinder-scheduler.service httpd
修改文件,计算节点
powershell
[root@compute ceph]# vi /etc/cinder/cinder.conf
powershell
[DEFAULT]
.....
enabled_backends = ceph,lvm
[ceph]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
glance_api_version = 2
rbd_user = cinder
rbd_secret_uuid = 8ea0cbae-86a9-4c1c-9f03-fd4b144b8839
volume_backend_name = ceph
重启服务
powershell
[root@compute ceph]# systemctl restart openstack-cinder-volume.service
测试
powershell
[root@controller ~]# openstack volume type create ceph
+-------------+--------------------------------------+
| Field | Value |
+-------------+--------------------------------------+
| description | None |
| id | c357956b-2521-439d-af59-ae2e9cc5c5aa |
| is_public | True |
| name | ceph |
+-------------+--------------------------------------+
[root@controller ~]# openstack volume create ceph-test --type ceph --size 1
+---------------------+--------------------------------------+
| Field | Value |
+---------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2023-06-03T15:36:09.000000 |
| description | None |
| encrypted | False |
| id | fdf6c10a-31dc-4245-ae7b-b7bc0e24477d |
| migration_status | None |
| multiattach | False |
| name | ceph-test |
| properties | |
| replication_status | None |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| type | ceph |
| updated_at | None |
| user_id | 9ff19a0bedd5461aa3fff4a80f10054c |
+---------------------+--------------------------------------+
[root@controller ~]#
powershell
[root@storage01 ceph]# rbd ls volumes
volume-fdf6c10a-31dc-4245-ae7b-b7bc0e24477d
卷id相同,验证成功