...
Then move this host (ceph-osd-06 in our example) in the relevant rack:
| Code Block | ||
|---|---|---|
| ||
ceph osd crush move ceph-osd-06 rack=Rack12-PianoAlto |
Ri-verify with ceph osd df and ceph osd tree.
Verify that the OSD is using the right vgs:
[
| Code Block | ||
|---|---|---|
| ||
root@ceph-osd-06 ~]# ceph-bluestore-tool show-label --path /var/lib/ceph/osd/ceph-50 |
...
infering bluefs devices from bluestore path |
...
{ |
...
"/var/lib/ceph/osd/ceph-50/block": { |
...
"osd_uuid": "dc72b996-d035-4dcd-ba42-1a6433eb78f7", |
...
"size": 10000827154432, |
...
"btime": "2019-02-19 11:55:47.553215", |
...
"description": "main", |
...
"bluefs": "1", |
...
"ceph_fsid": "8162f291-00b6-4b40-a8b4-1981a8c09b64", |
...
"kv_backend": "rocksdb", |
...
"magic": "ceph osd volume v026", |
...
"mkfs_done": "yes", |
...
"osd_key": "AQCu4Gtc+jKSJhAAKzaAAYuTKWZs9rjJlBXWww==", |
...
"ready": "ready", |
...
"whoami": "50" |
...
}, |
...
"/var/lib/ceph/osd/ceph-50/block.db": { |
...
"osd_uuid": "dc72b996-d035-4dcd-ba42-1a6433eb78f7", |
...
"size": 95563022336, |
...
"btime": "2019-02-19 11:55:47.573213", |
...
"description": "bluefs db" |
...
} |
...
} |
...
[root@ceph-osd-06 ~]# ls -l /var/lib/ceph/osd/ceph-50/block |
...
lrwxrwxrwx 1 ceph ceph 27 Feb 19 12:23 /var/lib/ceph/osd/ceph-50/block -> /dev/ceph-block-50/block-50 |
...
[root@ceph-osd-06 ~]# ls -l /var/lib/ceph/osd/ceph-50/block.db |
...
lrwxrwxrwx 1 ceph ceph 24 Feb 19 12:23 /var/lib/ceph/osd/ceph-50/block.db -> /dev/ceph-db-50-54/db-50 |
...
[root@ceph-osd-06 ~]# |
...
Create the other OSDs (use also –osd-id if needed, e.g. when migrating OSDs from filestore to bluestore): |
...
ceph-volume lvm create --bluestore --data ceph-block-51/block-51 --block.db ceph-db-50-54/db-51 |
...
ceph-volume lvm create --bluestore --data ceph-block-52/block-52 --block.db ceph-db-50-54/db-52 |
...
ceph-volume lvm create --bluestore --data ceph-block-53/block-53 --block.db ceph-db-50-54/db-53 |
...
ceph-volume lvm create --bluestore --data ceph-block-54/block-54 --block.db ceph-db-50-54/db-54 |
...
ceph-volume lvm create --bluestore --data ceph-block-55/block-55 --block.db ceph-db-55-59/db-55 |
...
ceph-volume lvm create --bluestore --data ceph-block-56/block-56 --block.db ceph-db-55-59/db-56 |
...
ceph-volume lvm create --bluestore --data ceph-block-57/block-57 --block.db ceph-db-55-59/db-57 |
...
ceph-volume lvm create --bluestore --data ceph-block-58/block-58 --block.db ceph-db-55-59/db-58 |
...
ceph-volume lvm create --bluestore --data ceph-block-59/block-59 --block.db ceph-db-55-59/db-59 |
Reboot the new osd node:
shutdown -r now
Verify that the new OSDs are up.
...