软件开发Ceph

部署ceph集群,创建pool后,提示100.000% pgs unknown?

[root@node1 ~]# ceph -w cluster:id: 2386c327-8eb1-4dd7-9fed-fedff947c383 health: HEALTH_WARN Reduced data availability: 128 pgs inactive services:mon: 2 daemons, quorum node0,node1 mgr: openstack(active) osd: 3 osds: 3 up, 3 in data:pool...显示全部

[root@node1 ~]# ceph -w
cluster:

id:     2386c327-8eb1-4dd7-9fed-fedff947c383
health: HEALTH_WARN
        Reduced data availability: 128 pgs inactive

services:

mon: 2 daemons, quorum node0,node1
mgr: openstack(active)
osd: 3 osds: 3 up, 3 in

data:

pools:   1 pools, 128 pgs
objects: 0  objects, 0 B
usage:   6.0 GiB used, 118 GiB / 124 GiB avail
pgs:     100.000% pgs unknown
         128 unknown



[root@node1 ~]# ceph health detail
HEALTH_WARN Reduced data availability: 128 pgs inactive
PG_AVAILABILITY Reduced data availability: 128 pgs inactive

pg 3.1a is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1b is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1c is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1d is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1e is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1f is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.20 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.21 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.22 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.23 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.24 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.25 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.26 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.27 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.28 is stuck inactive for 5054.311414, current state unknown, last acting []

[root@node1 ~]# cat /etc/ceph/ceph.conf
[global]

fsid = 2386c327-8eb1-4dd7-9fed-fedff947c383
auth cluster required = none
auth service required = none
auth client required = none

[mds]
[mon]
[mon.node0]

host = node0
mon addr = 192.168.199.130:6789

[mon.node1]

host = node1
mon addr = 192.168.199.131:6789

[mgr]
mgr modules = dashboard

[osd]

osd crush update on start = True

[osd.0]

host = node0
osd data = /var/lib/ceph/osd/ceph-0/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal

[osd.1]

host = node1
osd data = /var/lib/ceph/osd/ceph-1/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal

[osd.2]

host = node2
osd data = /var/lib/ceph/osd/ceph-2/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal

而且所有节点的firewall和senlinux都禁止了

收起
参与13

查看其它 2 个回答吕作令的回答

吕作令吕作令  其它 , A

执行一下 ceph osd crush tree 查看一卡crush map 设置的是否有问题

软件开发 · 2019-10-22
浏览8165

回答者

吕作令
其它A
擅长领域: 存储灾备服务器

吕作令 最近回答过的问题

回答状态

  • 发布时间:2019-10-22
  • 关注会员:3 人
  • 回答浏览:8165
  • X社区推广