Ceph 限额配置

2024-06-17 712 0

限额最大对象数量

[root@ceph01 ~]# ceph osd pool create demo-test1 8 8
pool 'demo-test1' created
[root@ceph01 ~]# ceph osd pool create demo-test2 8 8
pool 'demo-test2' created

[root@ceph01 ~]# ceph osd pool ls | grep demo-test
demo-test1
demo-test2

[root@ceph01 ~]# ceph osd pool ls detail | grep demo-test
pool 26 'demo-test1' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 243 flags hashpspool stripe_width 0
pool 27 'demo-test2' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 246 flags hashpspool stripe_width 0

限额命令

[root@ceph01 ~]# ceph osd pool --help | grep set-quota
osd pool set-quota <poolname> max_objects|max_bytes <val>              set object or byte limit on pool

限额对象数量

如限制指定demo-test1 池最多3个文件

[root@ceph01 ~]# ceph osd pool set-quota demo-test1 max_objects 3
set-quota max_objects = 3 for pool demo-test1

[root@ceph01 ~]# ceph osd pool get-quota demo-test1
quotas for pool 'demo-test1':
  max objects: 3 objects
  max bytes  : N/A
[root@ceph01 ~]# echo 11 > test.txt
[root@ceph01 ~]# rados put 1.txt ./test.txt --pool=demo-test1
[root@ceph01 ~]# rados put 2.txt ./test.txt --pool=demo-test1
[root@ceph01 ~]# rados put 3.txt ./test.txt --pool=demo-test1
[root@ceph01 ~]# rados put 4.txt ./test.txt --pool=demo-test1
[root@ceph01 ~]# rados put 5.txt ./test.txt --pool=demo-test1
2024-06-17 02:03:52.783 7fded95f99c0  0 client.225570.objecter  FULL, paused modify 0x559c72216ff0 tid 0

# 这里有延迟 实际写入了4个, 第5个才被限制了
[root@ceph01 ~]# rados ls --pool=demo-test1
3.txt
2.txt
4.txt
1.txt

[root@ceph01 ~]# ceph df
...
POOLS:
    POOL                          ID     PGS     STORED      OBJECTS     USED        %USED     MAX AVAIL 
...
        demo-test1                    26       8        12 B           4     768 KiB         0        94 GiB 

[root@ceph01 ~]# ceph -s
  cluster:
    id:     ed040fb0-fa20-456a-a9f0-c9a96cdf089e
    health: HEALTH_WARN
            1 pool(s) full # 可以看到这里报警了
            application not enabled on 1 pool(s)

限额最大容量

如限额demo-test2 池最大容量为100M

[root@ceph01 ~]# ceph osd pool set-quota demo-test2 max_bytes 100M
set-quota max_bytes = 104857600 for pool demo-test2
[root@ceph01 ~]# ceph osd pool get-quota demo-test2
quotas for pool 'demo-test2':
  max objects: N/A
  max bytes  : 100 MiB

# 30M的测试文件
[root@ceph01 ~]# dd if=/dev/zero of=./test.bin bs=1M count=30
30+0 records in
30+0 records out
31457280 bytes (31 MB) copied, 0.024166 s, 1.3 GB/s

[root@ceph01 ~]# rados put test1.bin ./test.bin --pool=demo-test2
[root@ceph01 ~]# rados put test1.bin ./test.bin --pool=demo-test2
[root@ceph01 ~]# rados put test2.bin ./test.bin --pool=demo-test2
[root@ceph01 ~]# rados put test4.bin ./test.bin --pool=demo-test2
[root@ceph01 ~]# rados put test5.bin ./test.bin --pool=demo-test2
[root@ceph01 ~]# rados put test6.bin ./test.bin --pool=demo-test2
2024-06-17 02:11:43.196 7fb3129019c0  0 client.207577.objecter  FULL, paused modify 0x555d78cfcac0 tid 0
# 这里有延迟,第6个才被限制
[root@ceph01 ~]# rados ls --pool=demo-test2
test3.bin
test5.bin
test1.bin
test2.bin
test4.bin

[root@ceph01 ~]# ceph df 
...
POOLS:
    POOL                          ID     PGS     STORED      OBJECTS     USED        %USED     MAX AVAIL 
   ...
    demo-test2                    27       8     150 MiB           5     450 MiB      0.16        93 GiB 

[root@ceph01 ~]# ceph -s
  cluster:
    id:     ed040fb0-fa20-456a-a9f0-c9a96cdf089e
    health: HEALTH_WARN
            2 pool(s) full # 第二个报警(满了)
            application not enabled on 2 pool(s)

取消限额

[root@ceph01 ~]# ceph osd pool set-quota demo-test1 max_objects 0
set-quota max_objects = 0 for pool demo-test1
[root@ceph01 ~]# ceph osd pool set-quota demo-test2 max_bytes 0
set-quota max_bytes = 0 for pool demo-test2

删除测试Pool

[root@ceph01 ~]# ceph osd pool rm demo-test1 demo-test1 --yes-i-really-really-mean-it
pool 'demo-test1' removed
[root@ceph01 ~]# ceph osd pool rm demo-test2 demo-test2 --yes-i-really-really-mean-it
pool 'demo-test2' removed

相关文章

Ceph RGW及S3接口操作
CephFS 文件系统
Ceph RBD
Ceph osd 命令
Ceph Prometheus监控
Ceph RBD 删除不了处理过程

发布评论