创建池
root@ceph-0 \~\]#**ceph osd pool create cephfs_data 64**
pool 'cephfs_data' created
\[root@ceph-0 \~\]# **ceph osd pool create cephfs_metadata 32**
pool 'cephfs_metadata' created
cephfs_metadata 64 报错

官方说明:
元数据池通常最多可容纳几 GB 的数据。为 因此,通常建议使用较小的PG计数。通常为 64 或 128 在实践中用于大型集群。
### pool列表
\[root@ceph-0 \~\]# **ceph osd pool ls**
vdbench
cephfs_data
cephfs_metadata
## 创建文件系统
\[root@ceph-0 \~\]# **ceph fs new cephfs cephfs_metadata cephfs_data**
new fs with metadata pool 4 and data pool 3
\[root@ceph-0 \~\]# **ceph fs ls**
name: cephfs, metadata pool: cephfs_metadata, data pools: \[cephfs_data
文件系统状态
root@ceph-0 \~\]# **ceph fs status cephfs**
cephfs - 0 clients
======
+------+-------+-----+----------+-----+------+
\| Rank \| State \| MDS \| Activity \| dns \| inos \|
+------+-------+-----+----------+-----+------+
+------+-------+-----+----------+-----+------+
+-----------------+----------+-------+-------+
\| Pool \| type \| used \| avail \|
+-----------------+----------+-------+-------+
\| cephfs_metadata \| metadata \| 0 \| 103G \|
\| cephfs_data \| data \| 0 \| 103G \|
+-----------------+----------+-------+-------+
+-------------+
\| Standby MDS \|
+-------------+
+-------------+
+---------+---------+
\| version \| daemons \|
+---------+---------+
+---------+---------+
集群状态
\[root@ceph-0 \~\]# **ceph -s**
cluster:
id: ff72b496-d036-4f1b-b2ad-55358f3c16cb
health: HEALTH_ERR
1 filesystem is offline
1 filesystem is online with fewer MDS than max_mds
mon ceph-0 is low on available space
services:
mon: 4 daemons, quorum ceph-3,ceph-1,ceph-0,ceph-2 (age 4h)
mgr: ceph-0(active, since 45h), standbys: ceph-1, ceph-3, ceph-2
mds: cephfs:0
osd: 4 osds: 3 up (since 45h), 3 in (since 44h)
data:
pools: 3 pools, 224 pgs
objects: 4.30k objects, 17 GiB
usage: 53 GiB used, 247 GiB / 300 GiB avail
pgs: 224 active+clean
## 查看mds状态(x)
\[root@ceph-0 \~\]# **ceph mds stat**
cephfs:0
### mds没有就绪节点

这个服务没有基于ceph-deploy部署

## ceph-deploy 部署 mds
ceph-deploy mds create ceph-0 ceph-1 ceph-2 ceph-3
部署成功日志
[root@ceph-0 ~]# cd /etc/ceph/
[root@ceph-0 ceph]# ceph-deploy mds create ceph-0 ceph-1 ceph-2 ceph-3
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy mds create ceph-0 ceph-1 ceph-2 ceph-3
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] subcommand : create
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf :
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] func :
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] mds : [('ceph-0', 'ceph-0'), ('ceph-1', 'ceph-1'), ('ceph-2', 'ceph-2'), ('ceph-3', 'ceph-3')]
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.mds][DEBUG ] Deploying mds, cluster ceph hosts ceph-0:ceph-0 ceph-1:ceph-1 ceph-2:ceph-2 ceph-3:ceph-3
[ceph-0][DEBUG ] connected to host: ceph-0
[ceph-0][DEBUG ] detect platform information from remote host
21.10U3 LTS
bclinux
[ceph-0][DEBUG ] detect machine type
[ceph_deploy.mds][INFO ] Distro info: bclinux 21.10U3 21.10U3 LTS
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to ceph-0
[ceph-0][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-0][WARNIN] mds keyring does not exist yet, creating one
[ceph-0][DEBUG ] create a keyring file
[ceph-0][DEBUG ] create path if it doesn't exist
[ceph-0][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.ceph-0 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-ceph-0/keyring
[ceph-0][INFO ] Running command: systemctl enable ceph-mds@ceph-0
[ceph-0][WARNIN] Created symlink /etc/systemd/system/ceph-mds.target.wants/[email protected] → /usr/lib/systemd/system/[email protected].
[ceph-0][INFO ] Running command: systemctl start ceph-mds@ceph-0
[ceph-0][INFO ] Running command: systemctl enable ceph.target
dhclient(1787) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
dhclient(1787) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
[ceph-1][DEBUG ] connected to host: ceph-1
[ceph-1][DEBUG ] detect platform information from remote host
21.10U3 LTS
bclinux
[ceph-1][DEBUG ] detect machine type
[ceph_deploy.mds][INFO ] Distro info: bclinux 21.10U3 21.10U3 LTS
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to ceph-1
[ceph-1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-1][WARNIN] mds keyring does not exist yet, creating one
[ceph-1][DEBUG ] create a keyring file
[ceph-1][DEBUG ] create path if it doesn't exist
[ceph-1][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.ceph-1 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-ceph-1/keyring
[ceph-1][INFO ] Running command: systemctl enable ceph-mds@ceph-1
[ceph-1][WARNIN] Created symlink /etc/systemd/system/ceph-mds.target.wants/[email protected] → /usr/lib/systemd/system/[email protected].
[ceph-1][INFO ] Running command: systemctl start ceph-mds@ceph-1
[ceph-1][INFO ] Running command: systemctl enable ceph.target
dhclient(1742) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
dhclient(1742) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
[ceph-2][DEBUG ] connected to host: ceph-2
[ceph-2][DEBUG ] detect platform information from remote host
21.10U3 LTS
bclinux
[ceph-2][DEBUG ] detect machine type
[ceph_deploy.mds][INFO ] Distro info: bclinux 21.10U3 21.10U3 LTS
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to ceph-2
[ceph-2][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-2][WARNIN] mds keyring does not exist yet, creating one
[ceph-2][DEBUG ] create a keyring file
[ceph-2][DEBUG ] create path if it doesn't exist
[ceph-2][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.ceph-2 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-ceph-2/keyring
[ceph-2][INFO ] Running command: systemctl enable ceph-mds@ceph-2
[ceph-2][WARNIN] Created symlink /etc/systemd/system/ceph-mds.target.wants/[email protected] → /usr/lib/systemd/system/[email protected].
[ceph-2][INFO ] Running command: systemctl start ceph-mds@ceph-2
[ceph-2][INFO ] Running command: systemctl enable ceph.target
dhclient(1722) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
dhclient(1722) is already running - exiting.
This version of ISC DHCP is based on the release available
on ftp.isc.org. Features have been added and other changes
have been made to the base software release in order to make
it work better with this distribution.
Please report issues with this software via:
https://gitee.com/src-openeuler/dhcp/issues
exiting.
[ceph-3][DEBUG ] connected to host: ceph-3
[ceph-3][DEBUG ] detect platform information from remote host
21.10U3 LTS
bclinux
[ceph-3][DEBUG ] detect machine type
[ceph_deploy.mds][INFO ] Distro info: bclinux 21.10U3 21.10U3 LTS
[ceph_deploy.mds][DEBUG ] remote host will use systemd
[ceph_deploy.mds][DEBUG ] deploying mds bootstrap to ceph-3
[ceph-3][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph-3][WARNIN] mds keyring does not exist yet, creating one
[ceph-3][DEBUG ] create a keyring file
[ceph-3][DEBUG ] create path if it doesn't exist
[ceph-3][INFO ] Running command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.ceph-3 osd allow rwx mds allow mon allow profile mds -o /var/lib/ceph/mds/ceph-ceph-3/keyring
[ceph-3][INFO ] Running command: systemctl enable ceph-mds@ceph-3
[ceph-3][WARNIN] Created symlink /etc/systemd/system/ceph-mds.target.wants/[email protected] → /usr/lib/systemd/system/[email protected].
[ceph-3][INFO ] Running command: systemctl start ceph-mds@ceph-3
[ceph-3][INFO ] Running command: systemctl enable ceph.target
[root@ceph-0 ceph]#
### mds状态正常

## 准备一台客户端虚拟机
[root@ceph-0 ~]# rsync -avr ceph-14.2.10-rpms [email protected]:~/

### 只安装ceph客户端
安装liboath

需要配置外部源,安装成功日志
\[root@ceph-client aarch64\]# **yum install -y ceph-common-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librados2-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> libcephfs2-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librbd1-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-cephfs-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rados-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rbd-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librgw2-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rgw-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python-rgw-14.2.10-0.oe1.bclinux.aarch64.rpm**
Unable to connect to Registration Management Service
Last metadata expiration check: 0:08:18 ago on Mon 13 Nov 2023 03:08:56 PM CST.
Dependencies resolved.
===========================================================================================================================================================================================================================================================================
Package Architecture Version Repository Size
===========================================================================================================================================================================================================================================================================
Installing:
ceph-common aarch64 14.2.10-0.oe1.bclinux @commandline 17 M
libcephfs2 aarch64 14.2.10-0.oe1.bclinux @commandline 436 k
librgw2 aarch64 14.2.10-0.oe1.bclinux @commandline 4.5 M
python-ceph-argparse aarch64 14.2.10-0.oe1.bclinux @commandline 39 k
python-rgw aarch64 14.2.10-0.oe1.bclinux @commandline 99 k
python3-ceph-argparse aarch64 14.2.10-0.oe1.bclinux @commandline 39 k
python3-cephfs aarch64 14.2.10-0.oe1.bclinux @commandline 113 k
python3-rados aarch64 14.2.10-0.oe1.bclinux @commandline 197 k
python3-rbd aarch64 14.2.10-0.oe1.bclinux @commandline 181 k
python3-rgw aarch64 14.2.10-0.oe1.bclinux @commandline 81 k
Installing dependencies:
jemalloc aarch64 5.1.0-4.oe1 everything 169 k
jemalloc-help aarch64 5.1.0-4.oe1 everything 54 k
leveldb aarch64 1.20-4.oe1 everything 150 k
Downgrading:
librados2 aarch64 14.2.10-0.oe1.bclinux @commandline 3.0 M
librbd1 aarch64 14.2.10-0.oe1.bclinux @commandline 1.4 M
Transaction Summary
===========================================================================================================================================================================================================================================================================
Install 13 Packages
Downgrade 2 Packages
Total size: 28 M
Total download size: 373 k
Downloading Packages:
(1/3): jemalloc-help-5.1.0-4.oe1.aarch64.rpm 3.5 MB/s \| 54 kB 00:00
(2/3): leveldb-1.20-4.oe1.aarch64.rpm 4.1 MB/s \| 150 kB 00:00
(3/3): jemalloc-5.1.0-4.oe1.aarch64.rpm 3.5 MB/s \| 169 kB 00:00
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Total 7.3 MB/s \| 373 kB 00:00
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
Preparing : 1/1
Downgrading : librados2-14.2.10-0.oe1.bclinux.aarch64 1/17
Running scriptlet: librados2-14.2.10-0.oe1.bclinux.aarch64 1/17
Installing : python3-rados-14.2.10-0.oe1.bclinux.aarch64 2/17
Downgrading : librbd1-14.2.10-0.oe1.bclinux.aarch64 3/17
Running scriptlet: librbd1-14.2.10-0.oe1.bclinux.aarch64 3/17
Installing : libcephfs2-14.2.10-0.oe1.bclinux.aarch64 4/17
Running scriptlet: libcephfs2-14.2.10-0.oe1.bclinux.aarch64 4/17
Installing : librgw2-14.2.10-0.oe1.bclinux.aarch64 5/17
Running scriptlet: librgw2-14.2.10-0.oe1.bclinux.aarch64 5/17
Installing : python3-rgw-14.2.10-0.oe1.bclinux.aarch64 6/17
Installing : python3-rbd-14.2.10-0.oe1.bclinux.aarch64 7/17
Installing : python3-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 8/17
Installing : python3-cephfs-14.2.10-0.oe1.bclinux.aarch64 9/17
Installing : python-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 10/17
Installing : leveldb-1.20-4.oe1.aarch64 11/17
Running scriptlet: leveldb-1.20-4.oe1.aarch64 11/17
Installing : jemalloc-help-5.1.0-4.oe1.aarch64 12/17
Installing : jemalloc-5.1.0-4.oe1.aarch64 13/17
Running scriptlet: jemalloc-5.1.0-4.oe1.aarch64 13/17
Running scriptlet: ceph-common-14.2.10-0.oe1.bclinux.aarch64 14/17
Installing : ceph-common-14.2.10-0.oe1.bclinux.aarch64 14/17
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
Running scriptlet: ceph-common-14.2.10-0.oe1.bclinux.aarch64 14/17
Installing : python-rgw-14.2.10-0.oe1.bclinux.aarch64 15/17
Cleanup : librbd1-2:12.2.8-16.oe1.aarch64 16/17
Running scriptlet: librbd1-2:12.2.8-16.oe1.aarch64 16/17
Cleanup : librados2-2:12.2.8-16.oe1.aarch64 17/17
Running scriptlet: librados2-2:12.2.8-16.oe1.aarch64 17/17
Verifying : librados2-14.2.10-0.oe1.bclinux.aarch64 1/17
Verifying : librados2-2:12.2.8-16.oe1.aarch64 2/17
Verifying : librbd1-14.2.10-0.oe1.bclinux.aarch64 3/17
Verifying : librbd1-2:12.2.8-16.oe1.aarch64 4/17
Verifying : jemalloc-5.1.0-4.oe1.aarch64 5/17
Verifying : jemalloc-help-5.1.0-4.oe1.aarch64 6/17
Verifying : leveldb-1.20-4.oe1.aarch64 7/17
Verifying : ceph-common-14.2.10-0.oe1.bclinux.aarch64 8/17
Verifying : libcephfs2-14.2.10-0.oe1.bclinux.aarch64 9/17
Verifying : python-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 10/17
Verifying : python3-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 11/17
Verifying : python3-cephfs-14.2.10-0.oe1.bclinux.aarch64 12/17
Verifying : python3-rados-14.2.10-0.oe1.bclinux.aarch64 13/17
Verifying : python3-rbd-14.2.10-0.oe1.bclinux.aarch64 14/17
Verifying : librgw2-14.2.10-0.oe1.bclinux.aarch64 15/17
Verifying : python3-rgw-14.2.10-0.oe1.bclinux.aarch64 16/17
Verifying : python-rgw-14.2.10-0.oe1.bclinux.aarch64 17/17
Downgraded:
librados2-14.2.10-0.oe1.bclinux.aarch64 librbd1-14.2.10-0.oe1.bclinux.aarch64
Installed:
ceph-common-14.2.10-0.oe1.bclinux.aarch64 jemalloc-5.1.0-4.oe1.aarch64 jemalloc-help-5.1.0-4.oe1.aarch64 leveldb-1.20-4.oe1.aarch64 libcephfs2-14.2.10-0.oe1.bclinux.aarch64
librgw2-14.2.10-0.oe1.bclinux.aarch64 python-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 python-rgw-14.2.10-0.oe1.bclinux.aarch64 python3-ceph-argparse-14.2.10-0.oe1.bclinux.aarch64 python3-cephfs-14.2.10-0.oe1.bclinux.aarch64
python3-rados-14.2.10-0.oe1.bclinux.aarch64 python3-rbd-14.2.10-0.oe1.bclinux.aarch64 python3-rgw-14.2.10-0.oe1.bclinux.aarch64
Complete!
#### 故障 ImportError: No module named rados

#### 再次安装
\[root@ceph-client aarch64\]#**yum install -y ceph-common-14.2.10-0.oe1.bclinux.aarch64.rpm ceph-base-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librados2-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librados-devel-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> libradospp-devel-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librbd1-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librbd-devel-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librgw2-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> librgw-devel-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rados-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rbd-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python3-rgw-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python-rados-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python-rbd-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> python-rgw-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> rados-objclass-devel-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> rbd-fuse-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> rbd-mirror-14.2.10-0.oe1.bclinux.aarch64.rpm \\
\> rbd-nbd-14.2.10-0.oe1.bclinux.aarch64.rpm**
Unable to connect to Registration Management Service
Last metadata expiration check: 0:14:26 ago on Mon 13 Nov 2023 03:08:56 PM CST.
Package ceph-common-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package librados2-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package librbd1-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package librgw2-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package python3-rados-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package python3-rbd-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package python3-rgw-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Package python-rgw-14.2.10-0.oe1.bclinux.aarch64 is already installed.
Dependencies resolved.
===========================================================================================================================================================================================================================================================================
Package Architecture Version Repository Size
===========================================================================================================================================================================================================================================================================
Installing:
ceph-base aarch64 14.2.10-0.oe1.bclinux @commandline 4.5 M
librados-devel aarch64 14.2.10-0.oe1.bclinux @commandline 83 k
libradospp-devel aarch64 14.2.10-0.oe1.bclinux @commandline 31 k
librbd-devel aarch64 14.2.10-0.oe1.bclinux @commandline 21 k
librgw-devel aarch64 14.2.10-0.oe1.bclinux @commandline 10 k
python-rados aarch64 14.2.10-0.oe1.bclinux @commandline 227 k
python-rbd aarch64 14.2.10-0.oe1.bclinux @commandline 212 k
rados-objclass-devel aarch64 14.2.10-0.oe1.bclinux @commandline 7.8 k
rbd-fuse aarch64 14.2.10-0.oe1.bclinux @commandline 66 k
rbd-mirror aarch64 14.2.10-0.oe1.bclinux @commandline 2.0 M
rbd-nbd aarch64 14.2.10-0.oe1.bclinux @commandline 130 k
Transaction Summary
===========================================================================================================================================================================================================================================================================
Install 11 Packages
Total size: 7.3 M
Installed size: 31 M
Downloading Packages:
Running transaction check
Transaction check succeeded.
Running transaction test
Transaction test succeeded.
Running transaction
Preparing : 1/1
Installing : librados-devel-14.2.10-0.oe1.bclinux.aarch64 1/11
Installing : libradospp-devel-14.2.10-0.oe1.bclinux.aarch64 2/11
Installing : python-rados-14.2.10-0.oe1.bclinux.aarch64 3/11
Installing : ceph-base-14.2.10-0.oe1.bclinux.aarch64 4/11
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
warning: user ceph does not exist - using root
warning: group ceph does not exist - using root
Running scriptlet: ceph-base-14.2.10-0.oe1.bclinux.aarch64 4/11
Installing : rbd-mirror-14.2.10-0.oe1.bclinux.aarch64 5/11
Running scriptlet: rbd-mirror-14.2.10-0.oe1.bclinux.aarch64 5/11
Installing : python-rbd-14.2.10-0.oe1.bclinux.aarch64 6/11
Installing : librbd-devel-14.2.10-0.oe1.bclinux.aarch64 7/11
Installing : rados-objclass-devel-14.2.10-0.oe1.bclinux.aarch64 8/11
Installing : librgw-devel-14.2.10-0.oe1.bclinux.aarch64 9/11
Installing : rbd-nbd-14.2.10-0.oe1.bclinux.aarch64 10/11
Installing : rbd-fuse-14.2.10-0.oe1.bclinux.aarch64 11/11
Running scriptlet: rbd-fuse-14.2.10-0.oe1.bclinux.aarch64 11/11
Verifying : ceph-base-14.2.10-0.oe1.bclinux.aarch64 1/11
Verifying : librados-devel-14.2.10-0.oe1.bclinux.aarch64 2/11
Verifying : libradospp-devel-14.2.10-0.oe1.bclinux.aarch64 3/11
Verifying : librbd-devel-14.2.10-0.oe1.bclinux.aarch64 4/11
Verifying : librgw-devel-14.2.10-0.oe1.bclinux.aarch64 5/11
Verifying : python-rados-14.2.10-0.oe1.bclinux.aarch64 6/11
Verifying : python-rbd-14.2.10-0.oe1.bclinux.aarch64 7/11
Verifying : rados-objclass-devel-14.2.10-0.oe1.bclinux.aarch64 8/11
Verifying : rbd-fuse-14.2.10-0.oe1.bclinux.aarch64 9/11
Verifying : rbd-mirror-14.2.10-0.oe1.bclinux.aarch64 10/11
Verifying : rbd-nbd-14.2.10-0.oe1.bclinux.aarch64 11/11
Installed:
ceph-base-14.2.10-0.oe1.bclinux.aarch64 librados-devel-14.2.10-0.oe1.bclinux.aarch64 libradospp-devel-14.2.10-0.oe1.bclinux.aarch64 librbd-devel-14.2.10-0.oe1.bclinux.aarch64 librgw-devel-14.2.10-0.oe1.bclinux.aarch64
python-rados-14.2.10-0.oe1.bclinux.aarch64 python-rbd-14.2.10-0.oe1.bclinux.aarch64 rados-objclass-devel-14.2.10-0.oe1.bclinux.aarch64 rbd-fuse-14.2.10-0.oe1.bclinux.aarch64 rbd-mirror-14.2.10-0.oe1.bclinux.aarch64
rbd-nbd-14.2.10-0.oe1.bclinux.aarch64
Complete!

#### 故障 ImportError: No module named prettytable
yum install python2-prettytable
### 客户端指令验证ok

## ceph.conf 配置
#### ceph-0上生成最小配置
\[root@ceph-0 \~\]# **ceph config generate-minimal-conf**
# minimal ceph.conf for ff72b496-d036-4f1b-b2ad-55358f3c16cb
\[global
fsid = ff72b496-d036-4f1b-b2ad-55358f3c16cb
mon_host = [v2:172.17.163.105:3300/0,v1:172.17.163.105:6789/0] [v2:172.17.112.206:3300/0,v1:172.17.112.206:6789/0] [v2:172.17.227.100:3300/0,v1:172.17.227.100:6789/0] [v2:172.17.67.157:3300/0,v1:172.17.67.157:6789/0]
ceph-client ceph.conf
编辑/etc/ceph/ceph.conf
复制代码
# minimal ceph.conf for ff72b496-d036-4f1b-b2ad-55358f3c16cb
[global]
fsid = ff72b496-d036-4f1b-b2ad-55358f3c16cb
mon_host = [v2:172.17.163.105:3300/0,v1:172.17.163.105:6789/0] [v2:172.17.112.206:3300/0,v1:172.17.112.206:6789/0] [v2:172.17.227.100:3300/0,v1:172.17.227.100:6789/0] [v2:172.17.67.157:3300/0,v1:172.17.67.157:6789/0]
秘钥
ceph-0 上生成密码
root@ceph-0 \~\]# **ceph fs authorize cephfs client.foo / rw**
\[client.foo
key = AQDI1FFlhnz6KhAAe3TA0YhZy3I8oW0Fus3WfQ==
ceph-client配置秘钥文件
复制代码
/etc/ceph/ceph.client.foo.keyring
复制代码
[client.foo]
key = AQDI1FFlhnz6KhAAe3TA0YhZy3I8oW0Fus3WfQ==
配置权限
root@ceph-client ceph\]# **chmod 600 ceph.client.foo.keyring**
\[root@ceph-client ceph\]# **chmod 644 ceph.conf**
## 挂载失败
最新官网文档方案报错,估计版本差异大
\[root@ceph-client ceph\]# **mount.ceph [email protected]=/ /mnt/cephfs -o mon_addr=ceph-0:6789**
source mount path was not specified
unable to parse mount source: -22
\[root@ceph-client \~\]# **mount -t ceph ceph-0:/ /mnt/cephfs/**
unable to get monitor info from DNS SRV with service name: ceph-mon
2023-11-13 16:33:07.761 ffff9a776010 -1 failed for service _ceph-mon._tcp
2023-11-13 16:33:07.761 ffff9a776010 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.guest.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
mount error 22 = Invalid argument
\[root@ceph-client ceph\]# **mv ceph.client.foo.keyring ceph.keyring**
\[root@ceph-client ceph\]# **mount -t ceph ceph-0:/ /mnt/cephfs/**
unable to get monitor info from DNS SRV with service name: ceph-mon
2023-11-13 16:35:04.049 ffffa93fe010 -1 failed for service _ceph-mon._tcp
mount error 22 = Invalid argument
\[root@ceph-client ceph\]#**mount -t ceph ceph-0:/ /mnt/cephfs/ -o name=client.foo,secret=AQDI1FFlhnz6KhAAe3TA0YhZy3I8oW0Fus3WfQ==**
mount error 1 = Operation not permitted
## 挂载成功
mount -t ceph ceph-0:/ /mnt/cephfs/ -o name=foo,secret=AQDI1FFlhnz6KhAAe3TA0YhZy3I8oW0Fus3WfQ==

## 高可用挂载
mount -t ceph ceph-0,ceph-1,ceph-2,ceph-3:/ /mnt/cephfs/ -o name=foo,secret=AQDI1FFlhnz6KhAAe3TA0YhZy3I8oW0Fus3WfQ==
## dd测试

参考:
[Ceph Filesystem --- Ceph Documentation](https://docs.ceph.com/en/nautilus/cephfs/ "Ceph Filesystem — Ceph Documentation") nautilus 14.2.10版本文档
[Ceph文件系统_mount -t ceph-CSDN博客](https://blog.csdn.net/qq_28903377/article/details/128053382 "Ceph文件系统_mount -t ceph-CSDN博客")