部署ceph集群,创建pool后,提示100.000% pgs unknown?

[root@node1 ~]# ceph -w
cluster:
id: 2386c327-8eb1-4dd7-9fed-fedff947c383
health: HEALTH_WARN
Reduced data availability: 128 pgs inactive

services:
mon: 2 daemons, quorum node0,node1
mgr: openstack(active)
osd: 3 osds: 3 up, 3 in

data:
pools: 1 pools, 128 pgs
objects: 0 objects, 0 B
usage: 6.0 GiB used, 118 GiB / 124 GiB avail
pgs: 100.000% pgs unknown
128 unknown

[root@node1 ~]# ceph health detail
HEALTH_WARN Reduced data availability: 128 pgs inactive
PG_AVAILABILITY Reduced data availability: 128 pgs inactive
pg 3.1a is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1b is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1c is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1d is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1e is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.1f is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.20 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.21 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.22 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.23 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.24 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.25 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.26 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.27 is stuck inactive for 5054.311414, current state unknown, last acting []
pg 3.28 is stuck inactive for 5054.311414, current state unknown, last acting []

[root@node1 ~]# cat /etc/ceph/ceph.conf
[global]
fsid = 2386c327-8eb1-4dd7-9fed-fedff947c383
auth cluster required = none
auth service required = none
auth client required = none
[mds]
[mon]
[mon.node0]
host = node0
mon addr = 192.168.199.130:6789
[mon.node1]
host = node1
mon addr = 192.168.199.131:6789
[mgr]
mgr modules = dashboard

[osd]
osd crush update on start = True
[osd.0]
host = node0
osd data = /var/lib/ceph/osd/ceph-0/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal
[osd.1]
host = node1
osd data = /var/lib/ceph/osd/ceph-1/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal
[osd.2]
host = node2
osd data = /var/lib/ceph/osd/ceph-2/
bluestore block path = /dev/disk/by-partlabel/bluestore_block_0
bluestore block db path = /dev/disk/by-partlabel/bluestore_block_db_0
bluestore block wal path = /dev/disk/by-partlabel/bluestore_block_wal

而且所有节点的firewall和senlinux都禁止了

3回答

zhangyouzhangyou  学生 , 华中科技大学
yinxinwuwenpin王希瑞等赞同了此回答
应该是ceph crush的问题,显示全部

应该是ceph crush的问题,

收起
 2018-11-03
浏览2780
aditowhaditowh  系统架构师 , 某某某
aixchina赞同了此回答
感觉像是同步卡住的问题,osd的网络端口通信都是正常吗?显示全部

感觉像是同步卡住的问题,osd的网络端口通信都是正常吗?

收起
 2018-11-07
浏览2654
aixchina 邀答
吕作令吕作令  其它 , XSKY
执行一下 ceph osd crush tree 查看一卡crush map 设置的是否有问题显示全部

执行一下 ceph osd crush tree 查看一卡crush map 设置的是否有问题

收起
 1天前
浏览8

问题状态

  • 发布时间:2018-11-03
  • 关注会员:3 人
  • 问题浏览:3550
  • 最近回答:1天前
  • 关于TWT  使用指南  社区专家合作  厂商入驻社区  企业招聘  投诉建议  版权与免责声明  联系我们
    © 2019  talkwithtrend — talk with trend,talk with technologist 京ICP备09031017号-30