ceph / ceph-ansible

Ansible playbooks to deploy Ceph, the distributed filesystem.
Apache License 2.0
1.68k stars 1.01k forks source link

mons are allowing insecure global_id reclaim #6530

Closed karasjoh000 closed 3 years ago

karasjoh000 commented 3 years ago

Bug Report

What happened:
After installation, health is in warn state. ceph health detail:

HEALTH_WARN mons are allowing insecure global_id reclaim
[WRN] AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED: mons are allowing insecure global_id reclaim
    mon.ceph-node-1 has auth_allow_insecure_global_id_reclaim set to true
    mon.ceph-node-0 has auth_allow_insecure_global_id_reclaim set to true
    mon.ceph-node-2 has auth_allow_insecure_global_id_reclaim set to true

ceph -s

  cluster:
    id:    <FSID>
    health: HEALTH_WARN
            mons are allowing insecure global_id reclaim

  services:
    mon: 3 daemons, quorum ceph-node-1,ceph-node-0,ceph-node-2 (age 28h)
    mgr: ceph-node-0(active, since 28h), standbys: ceph-node-1, ceph-node-2
    mds: 1/1 daemons up, 2 standby
    osd: 66 osds: 66 up (since 28h), 66 in (since 28h)
    rgw: 3 daemons active (3 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   13 pools, 2688 pgs
    objects: 248 objects, 10 KiB
    usage:   20 GiB used, 6.4 TiB / 6.4 TiB avail
    pgs:     2688 active+clean

What you expected to happen:
Health status to be ok after installation

How to reproduce it (minimal and precise):
install ceph-ansible sudo apt-get install ceph-common

Share your group_vars files, inventory and full ceph-ansibe log

# grep -rEv '^ *#|^\s*$|^\s*dummy:' . --include="*.yml"
./clients.yml:---
./mdss.yml:---
./mgrs.yml:---
./mons.yml:---
./osds.yml:---
./osds.yml:devices:
./osds.yml:  - /dev/vdd
./osds.yml:  - /dev/vde
./osds.yml:  - /dev/vdf
./osds.yml:  - /dev/vdg
./osds.yml:  - /dev/vdh
./osds.yml:  - /dev/vdi
./osds.yml:  - /dev/vdj
./osds.yml:  - /dev/vdk
./osds.yml:  - /dev/vdl
./osds.yml:  - /dev/vdm
./osds.yml:  - /dev/vdn
./osds.yml:  - /dev/vdo
./osds.yml:  - /dev/vdp
./osds.yml:  - /dev/vdq
./osds.yml:  - /dev/vdr
./osds.yml:  - /dev/vds
./osds.yml:  - /dev/vdt
./osds.yml:  - /dev/vdu
./osds.yml:  - /dev/vdv
./osds.yml:  - /dev/vdw
./osds.yml:  - /dev/vdx
./osds.yml:  - /dev/vdy
./osds.yml:bluestore_wal_devices:
./osds.yml: - /dev/vdb
./osds.yml: - /dev/vdc
./osds.yml:osd_auto_discovery: false
./rgws.yml:---
./rgws.yml:rgw_create_pools:
./rgws.yml:  "{{ rgw_zone }}.rgw.buckets.data":
./rgws.yml:    pg_num: 64
./rgws.yml:    type: replicated
./rgws.yml:    size: 3
./rgws.yml:  "{{ rgw_zone }}.rgw.buckets.index":
./rgws.yml:    pg_num: 16
./rgws.yml:    size: 3
./rgws.yml:    type: replicated
./rgws.yml:  "{{ rgw_zone }}.rgw.meta":
./rgws.yml:    pg_num: 8
./rgws.yml:    size: 3
./rgws.yml:    type: replicated
./rgws.yml:  "{{ rgw_zone }}.rgw.log":
./rgws.yml:    pg_num: 8
./rgws.yml:    size: 3
./rgws.yml:    type: replicated
./rgws.yml:  "{{ rgw_zone }}.rgw.control":
./rgws.yml:    pg_num: 8
./rgws.yml:    size: 3
./rgws.yml:    type: replicated
./zone.yml:rgw_zone: us-west-1
./zone.yml:rgw_zonemaster: true
./zone.yml:rgw_zonesecondary: false
./zone.yml:rgw_zonegroup: us-west
./zone.yml:rgw_zonegroupmaster: true
./all/all.yml:---
./all/all.yml:ceph_release_num:
./all/all.yml:  octopus: 15
./all/all.yml:ntp_service_enabled: false
./all/all.yml:ceph_origin: repository
./all/all.yml:ceph_repository: community
./all/all.yml:ceph_stable_release: octopus
./all/all.yml:monitor_interface: ens3
./all/all.yml:journal_size: 5120 # OSD journal size in MB
./all/all.yml:public_network: 10.250.1.0/24
./all/all.yml:radosgw_interface: ens3
./all/all.yml:containerized_deployment: true
./all/all.yml:openstack_config: true
./all/all.yml:openstack_glance_pool:
./all/all.yml: name: "images"
./all/all.yml: application: "rbd"
./all/all.yml: pg_autoscale_mode: true
./all/all.yml:openstack_cinder_pool:
./all/all.yml: name: "volumes"
./all/all.yml: application: "rbd"
./all/all.yml: pg_autoscale_mode: true
./all/all.yml:openstack_nova_pool:
./all/all.yml: name: "vms"
./all/all.yml: application: "rbd"
./all/all.yml: pg_autoscale_mode: true
./all/all.yml:openstack_cinder_backup_pool:
./all/all.yml: name: "backups"
./all/all.yml: application: "rbd"
./all/all.yml: pg_autoscale_mode: true
./all/all.yml:openstack_pools:
./all/all.yml: - "{{ openstack_glance_pool }}"
./all/all.yml: - "{{ openstack_cinder_pool }}"
./all/all.yml: - "{{ openstack_nova_pool }}"
./all/all.yml: - "{{ openstack_cinder_backup_pool }}"
./all/all.yml:openstack_keys:
./all/all.yml: - { name: client.glance, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
./all/all.yml: - { name: client.cinder, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_glance_pool.name }}"}, mode: "0600" }
./all/all.yml: - { name: client.cinder-backup, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
./all/all.yml: - { name: client.openstack, caps: { mon: "profile rbd", osd: "profile rbd pool={{ openstack_glance_pool.name }}, profile rbd pool={{ openstack_nova_pool.name }}, profile rbd pool={{ openstack_cinder_pool.name }}, profile rbd pool={{ openstack_cinder_backup_pool.name }}"}, mode: "0600" }
./all/all.yml:dashboard_enabled: True
./all/all.yml:dashboard_protocol: http
./all/all.yml:dashboard_admin_user: admin
./all/all.yml:dashboard_admin_password: password
./all/all.yml:grafana_admin_user: admin
./all/all.yml:grafana_admin_password: password

Environment:

karasjoh000 commented 3 years ago

ceph config set mon auth_allow_insecure_global_id_reclaim false fixed it:

# ceph -s
  cluster:
    id:     <fsid>
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum ceph-node-1,ceph-node-0,ceph-node-2 (age 29h)
    mgr: ceph-node-0(active, since 29h), standbys: ceph-node-1, ceph-node-2
    mds: 1/1 daemons up, 2 standby
    osd: 66 osds: 66 up (since 29h), 66 in (since 29h)
    rgw: 3 daemons active (3 hosts, 1 zones)

  data:
    volumes: 1/1 healthy
    pools:   13 pools, 2688 pgs
    objects: 248 objects, 10 KiB
    usage:   20 GiB used, 6.4 TiB / 6.4 TiB avail
    pgs:     2688 active+clean
karasjoh000 commented 3 years ago

Reopening this issue in case adding ceph config set mon auth_allow_insecure_global_id_reclaim false in the playbook will benefit.

dsavineau commented 3 years ago

no need to modify the playbooks or roles for that since you can do it on your side.

ceph_conf_overrides:
  mon:
    auth_allow_insecure_global_id_reclaim: false

https://github.com/ceph/ceph-ansible/blob/master/group_vars/all.yml.sample#L513-L535