ceph osd tree //先查看down掉的osd编号 假设down掉的是osd.1
ceph osd out osd.1 //在部署节点执行 先将osd.1移出集群
systemctl stop ceph-osd@1.service
ceph-osd -i 1 //在osd所在节点执行
3.Resource temporarily unavailable和is another process using it?
[ceph1][WARNIN] E: Could not get lock /var/lib/dpkg/lock-frontend - open (11: Resource temporarily unavailable)
[ceph1][WARNIN] E: Unable to acquire the dpkg frontend lock (/var/lib/dpkg/lock-frontend), is another process using it?
root@ceph0:~/ceph-deploy# ceph -s
cluster:
id: e34e62c3-d8a7-484e-8d46-4707b03b8f71
health: HEALTH_WARN
application not enabled on 1 pool(s)
clock skew detected on mon.ceph2
services:
mon: 3 daemons, quorum ceph0,ceph1,ceph2
mgr: ceph2(active), standbys: ceph1, ceph0
osd: 3 osds: 3 up, 3 in
rgw: 1 daemon active
data:
pools: 5 pools, 160 pgs
objects: 188 objects, 1.2 KiB
usage: 3.0 GiB used, 27 GiB / 30 GiB avail
pgs: 160 active+clean
root@ceph0:~/ceph-deploy# ceph health detail
HEALTH_WARN application not enabled on 1 pool(s)
POOL_APP_NOT_ENABLED application not enabled on 1 pool(s)
application not enabled on pool 'testPool'
use 'ceph osd pool application enable <pool-name> <app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or freeform for custom applications.
ceph health detail //命令发现是新加入的存储池testPool没有被应用程序标记,因为之前添加的是RGW实例,所以此处依提示将testPool被rgw标记即可:
root@ceph0:~/ceph-deploy# ceph osd pool application enable testPool rgw
enabled application 'rgw' on pool 'testPool'