searchusermenu
  • 发布文章
  • 消息中心
点赞
收藏
评论
分享
原创

命令查看某个特定pool分布在哪些osd上

2023-06-25 09:24:59
31
0
命令查看某个特定pool分布在哪些osd上
思路:get pool crush_rule --> crush rule dump ---> ceph osd tree
 
[root@localhost store]# rados lspools
zone1.rgw.buckets.data
.rgw.root
zone1.rgw.buckets.index
[root@localhost store]# ceph osd pool get zone1.rgw.buckets.index  crush_rule
crush_rule: replicated_rule (replicated_rule只是个名字,这个名字可以叫别的,具体副本还是ec,几副本,要看pool的detail)
[root@localhost store]# ceph osd crush rule dump replicated_rule
{
    "rule_id": 0,
    "rule_name": "replicated_rule",
    "ruleset": 0,
    "type": 1,
    "min_size": 1,
    "max_size": 10,
    "steps": [
        {
            "op": "take",
            "item": -1,
            "item_name": "default" (得到crush rule dump)
        },
[root@localhost store]# ceph osd tree | grep -v osd
ID  CLASS WEIGHT    TYPE NAME               STATUS REWEIGHT PRI-AFF
-76       540.00000   root   hdd-root-1                                 
-9        30.00000      host   DN-001                         
-11        30.00000     host  DN-002                         
-10        30.00000     host  DN-003                         
                        
-1        18.00000   root default (得到目的osd分布)                                   
-7         6.00000     host  MN-001                         
-2         6.00000     host  MN-002                         
-6         6.00000     host  MN-003
 
0条评论
作者已关闭评论
13文章数
1粉丝数
13 文章 | 1 粉丝
原创

命令查看某个特定pool分布在哪些osd上

2023-06-25 09:24:59
31
0
命令查看某个特定pool分布在哪些osd上
思路:get pool crush_rule --> crush rule dump ---> ceph osd tree
 
[root@localhost store]# rados lspools
zone1.rgw.buckets.data
.rgw.root
zone1.rgw.buckets.index
[root@localhost store]# ceph osd pool get zone1.rgw.buckets.index  crush_rule
crush_rule: replicated_rule (replicated_rule只是个名字,这个名字可以叫别的,具体副本还是ec,几副本,要看pool的detail)
[root@localhost store]# ceph osd crush rule dump replicated_rule
{
    "rule_id": 0,
    "rule_name": "replicated_rule",
    "ruleset": 0,
    "type": 1,
    "min_size": 1,
    "max_size": 10,
    "steps": [
        {
            "op": "take",
            "item": -1,
            "item_name": "default" (得到crush rule dump)
        },
[root@localhost store]# ceph osd tree | grep -v osd
ID  CLASS WEIGHT    TYPE NAME               STATUS REWEIGHT PRI-AFF
-76       540.00000   root   hdd-root-1                                 
-9        30.00000      host   DN-001                         
-11        30.00000     host  DN-002                         
-10        30.00000     host  DN-003                         
                        
-1        18.00000   root default (得到目的osd分布)                                   
-7         6.00000     host  MN-001                         
-2         6.00000     host  MN-002                         
-6         6.00000     host  MN-003
 
文章来自个人专栏
ceph基础
13 文章 | 1 订阅
0条评论
作者已关闭评论
作者已关闭评论
0
0