Новый кластер ceph держит pg меньше размера + peered и rbd ls застрял - PullRequest
1 голос
/ 15 января 2020

Я создал новый кластер ceph с 1mon 1mds 1mgr и 15osd. После настройки все в порядке, но состояние pg остается заниженным + peered. На весь диск установлена ​​свежая автономная XFS, размер от 3 ТБ до 4 ТБ без таблицы разделов. Весь журнал osd ничего полезного не показывает.

Вот мой ceph -s журнал:

  cluster:
    id:     19e50b60-31b0-467a-8ea9-6c37742a1f77
    health: HEALTH_WARN
            Reduced data availability: 8 pgs inactive
            Degraded data redundancy: 8 pgs undersized
            1 monitors have not enabled msgr2

  services:
    mon: 1 daemons, quorum wuminghan-K600-1G (age 25m)
    mgr: wuminghan-K600-1G(active, since 24m)
    osd: 15 osds: 15 up (since 23m), 15 in (since 8h)

  data:
    pools:   1 pools, 8 pgs
    objects: 0 objects, 0 B
    usage:   15 GiB used, 135 GiB / 150 GiB avail
    pgs:     100.000% pgs not active
             8 undersized+peered

Вот мой ceph.conf:

[global]
fsid = 19e50b60-31b0-467a-8ea9-6c37742a1f77
mon initial members = wuminghan-K600-1G
mon host = 192.168.0.237
public network = 192.168.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 1
osd pool default min size = 1
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1
mon allow pool delete = true

[mgr.wuminghan-K600-1G]
host = wuminghan-K600-1G

[mds.wuminghan-K600-1G]
host = wuminghan-K600-1G
[osd.0]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2112b4c78
debug osd = 20
debug filestore = 20
[osd.1]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee26601e571
debug osd = 20
debug filestore = 20
[osd.2]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee266717eb3
debug osd = 20
debug filestore = 20
[osd.3]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee266806f32
debug osd = 20
debug filestore = 20
[osd.4]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee266808610
debug osd = 20
debug filestore = 20
[osd.5]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee266808651
debug osd = 20
debug filestore = 20
[osd.6]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee266808b36
debug osd = 20
debug filestore = 20
[osd.7]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bb578f3e
debug osd = 20
debug filestore = 20
[osd.8]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bb57915c
debug osd = 20
debug filestore = 20
[osd.9]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbc75bb4
debug osd = 20
debug filestore = 20
[osd.10]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbd63771
debug osd = 20
debug filestore = 20
[osd.11]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbd63795
debug osd = 20
debug filestore = 20
[osd.12]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbd64ee9
debug osd = 20
debug filestore = 20
[osd.13]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbd64fe8
debug osd = 20
debug filestore = 20
[osd.14]
host = wuminghan-K600-1G
devs = /dev/disk/by-id/wwn-0x50014ee2bbd65340
debug osd = 20
debug filestore = 20

Каждый шаг выполняется путем ручной настройки официального документа ceph. Установка с использованием ceph-deploy также была опробована ранее, но безуспешно.

Может успешно создать пул. Но запуск rbd ls или rbd pool init rbd будет зависать вечно без вывода.

...