块设备 RBD
[root@node1 my-cluster]# ceph osd pool create ceph-demo 64 64
pool 'ceph-demo' created
[root@node1 my-cluster]# ceph osd lspools
1 ceph-demo
[root@node1 ~]# rbd create -p ceph-demo --image reb-demo.img --size 10G
[root@node1 ~]# rbd info ceph-demo/reb-demo.img
rbd image 'reb-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 11d3ddd26151
block_name_prefix: rbd_data.11d3ddd26151
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Fri Dec 18 11:14:37 2020
access_timestamp: Fri Dec 18 11:14:37 2020
modify_timestamp: Fri Dec 18 11:14:37 2020
[root@node1 ~]# rbd rm -p ceph-demo --image reb-demo.img
Removing image: 100% complete...done.
[root@node1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12091f7d9fec
block_name_prefix: rbd_data.12091f7d9fec
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Fri Dec 18 11:18:54 2020
access_timestamp: Fri Dec 18 11:18:54 2020
modify_timestamp: Fri Dec 18 11:18:54 2020
# 只保留 layering 功能
[root@node1 ~]# rbd feature disable ceph-demo/rbd-demo.img deep-flatten
[root@node1 ~]# rbd feature disable ceph-demo/rbd-demo.img fast-diff
[root@node1 ~]# rbd feature disable ceph-demo/rbd-demo.img object-map
[root@node1 ~]# rbd feature disable ceph-demo/rbd-demo.img exclusive-lock
[root@node1 ~]# rbd info ceph-demo/rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12091f7d9fec
block_name_prefix: rbd_data.12091f7d9fec
format: 2
features: layering
op_features:
flags:
create_timestamp: Fri Dec 18 11:18:54 2020
access_timestamp: Fri Dec 18 11:18:54 2020
modify_timestamp: Fri Dec 18 11:18:54 2020
[root@node1 ~]# rbd map ceph-demo/rbd-demo.img
/dev/rbd0
[root@node1 ~]# rbd device list
id pool namespace image snap device
0 ceph-demo rbd-demo.img - /dev/rbd0
[root@node1 ~]# lsblk /dev/rbd0
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
rbd0 252:0 0 10G 0 disk
[root@node1 ~]# mkfs.ext4 /dev/rbd0
[root@node1 ~]# mkdir -pv /mnt/rbd
mkdir: 已创建目录 "/mnt/rbd"
[root@node1 ~]# mount /dev/rbd0 /mnt/rbd
[root@node1 ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 0 487M 0% /dev/shm
tmpfs 487M 7.8M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 47G 2.1G 45G 5% /
/dev/sda1 1014M 168M 847M 17% /boot
tmpfs 98M 0 98M 0% /run/user/0
tmpfs 487M 52K 487M 1% /var/lib/ceph/osd/ceph-0
/dev/rbd0 9.8G 37M 9.2G 1% /mnt/rbd
[root@node1 ~]# echo 123 > /mnt/rbd/test.txt
[root@node1 ~]# rbd info -p ceph-demo --image rbd-demo.img
rbd image 'rbd-demo.img':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 12091f7d9fec
block_name_prefix: rbd_data.12091f7d9fec
format: 2
features: layering
op_features:
flags:
create_timestamp: Fri Dec 18 11:18:54 2020
access_timestamp: Fri Dec 18 11:18:54 2020
modify_timestamp: Fri Dec 18 11:18:54 2020
[root@node1 ~]# rados -p ceph-demo ls | grep rbd_data.12091f7d9fec
rbd_data.12091f7d9fec.0000000000000001
rbd_data.12091f7d9fec.0000000000000400
rbd_data.12091f7d9fec.0000000000000435
······
[root@node1 ~]# ceph osd map ceph-demo rbd_data.12091f7d9fec.0000000000000001
osdmap e21 pool 'ceph-demo' (1) object 'rbd_data.12091f7d9fec.0000000000000001' -> pg 1.cc4b0d00 (1.0) -> up ([1,2,0], p1) acting ([1,2,0], p1)
[root@node1 ~]# for i in `rados -p ceph-demo ls | grep rbd_data.12091f7d9fec`;do ceph osd map ceph-demo $i; done
# 扩容至15G
[root@node1 ~]# rbd resize -p ceph-demo --image rbd-demo.img --size 15G
[root@node1 ~]# lsblk /dev/rbd0
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
rbd0 252:0 0 15G 0 disk /mnt/rbd
# 服务器块设备扩容生效
[root@node1 ~]# resize2fs /dev/rbd0
[root@node1 ~]# df -h /dev/rbd0
文件系统 容量 已用 可用 已用% 挂载点
/dev/rbd0 15G 41M 14G 1% /mnt/rbd
# 资源池告警
[root@node1 ~]# ceph health detail
HEALTH_WARN application not enabled on 1 pool(s)
POOL_APP_NOT_ENABLED application not enabled on 1 pool(s)
application not enabled on pool 'ceph-demo'
use 'ceph osd pool application enable <pool-name> <app-name>', where <app-name> is 'cephfs', 'rbd', 'rgw', or freeform for custom applications.
[root@node1 ~]# ceph osd pool application enable ceph-demo rbd
enabled application 'rbd' on pool 'ceph-demo'
[root@node1 ~]# ceph -s
cluster:
id: 3f5560c6-3af3-4983-89ec-924e8eaa9e06
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3 (age 3h)
mgr: node1(active, since 21h), standbys: node2, node3
mds: 1 up:standby
osd: 3 osds: 3 up (since 3h), 3 in (since 3h)
data:
pools: 1 pools, 64 pgs
objects: 79 objects, 221 MiB
usage: 3.7 GiB used, 56 GiB / 60 GiB avail
pgs: 64 active+clean
[root@node1 ~]# ceph osd pool application get ceph-demo
{
"rbd": {}
}