Commit 7ddbe747 authored by Alex Schultz's avatar Alex Schultz Committed by Guillaume Abrioux
Browse files

Use ansible_facts

It has come to our attention that using ansible_* vars that are
populated with INJECT_FACTS_AS_VARS=True is not very performant.  In
order to be able to support setting that to off, we need to update the
references to use ansible_facts[<thing>] instead of ansible_<thing>.

Related: ansible#73654
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1935406

Signed-off-by: default avatarAlex Schultz <aschultz@redhat.com>
(cherry picked from commit a7f2fa73)
parent 697e5823
......@@ -80,7 +80,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
......@@ -150,7 +150,7 @@ dummy:
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 4.0)
......@@ -178,7 +178,7 @@ dummy:
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
......@@ -188,7 +188,7 @@ dummy:
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
......@@ -251,7 +251,7 @@ dummy:
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
......@@ -549,7 +549,7 @@ dummy:
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
......
......@@ -43,14 +43,14 @@ dummy:
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_api_docker_cpu_limit: 1
......@@ -27,13 +27,13 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
#ceph_config_keys: [] # DON'T TOUCH ME
......
......@@ -43,7 +43,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mgr_docker_cpu_limit: 1
#ceph_mgr_docker_extra_env:
......
......@@ -45,7 +45,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mon_docker_cpu_limit: 1
#ceph_mon_container_listen_port: 3300
......
......@@ -25,7 +25,7 @@ dummy:
#ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
......@@ -95,7 +95,7 @@ dummy:
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_hostname }}
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #
......
......@@ -169,7 +169,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_osd_docker_cpu_limit: 4
# The next two variables are undefined, and thus, unused by default.
......
......@@ -50,7 +50,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_mirror_docker_cpu_limit: 1
#ceph_rbd_mirror_docker_extra_env:
......
......@@ -80,7 +80,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
......@@ -150,7 +150,7 @@ ceph_repository: rhcs
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 4.0)
......@@ -178,7 +178,7 @@ ceph_rhcs_version: 4
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
......@@ -188,7 +188,7 @@ ceph_rhcs_version: 4
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
......@@ -251,7 +251,7 @@ ceph_iscsi_config_dev: false
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
......@@ -549,7 +549,7 @@ ceph_iscsi_config_dev: false
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
......
......@@ -36,7 +36,7 @@
- name: set_fact container_run_cmd, container_exec_cmd
set_fact:
container_run_cmd: "{{ container_binary + ' run --rm --privileged=true --net=host --pid=host --ipc=host -v /dev:/dev -v /etc/ceph:/etc/ceph -v /var/lib/ceph:/var/lib/ceph -v /var/run:/var/run --entrypoint=' if containerized_deployment | bool else '' }}ceph-volume {{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else '' }}"
container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_hostname'] if containerized_deployment | bool else '' }}"
container_exec_cmd: "{{ container_binary + ' exec ceph-mon-' + hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] if containerized_deployment | bool else '' }}"
- name: get ceph osd tree data
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree -f json"
......
......@@ -71,7 +71,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
......@@ -127,7 +127,7 @@
name: nfs-ganesha
state: stopped
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
......@@ -249,7 +249,7 @@
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
......@@ -270,11 +270,11 @@
- name: stop ceph mgrs with systemd
service:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
......@@ -318,7 +318,7 @@
- name: stop ceph rgws with systemd
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
......@@ -340,7 +340,7 @@
- name: stop ceph rbd mirror with systemd
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
failed_when: false
......@@ -368,7 +368,7 @@
become: false
wait_for:
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
......@@ -398,7 +398,7 @@
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: remove ceph udev rules
file:
......@@ -613,7 +613,7 @@
- name: stop ceph mons with systemd
service:
name: "ceph-{{ item }}@{{ ansible_hostname }}"
name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
......@@ -732,27 +732,27 @@
yum:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'yum'
when: ansible_facts['pkg_mgr'] == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ ceph_packages }}"
state: absent
purge: true
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge remaining ceph packages with dnf
......@@ -760,7 +760,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge remaining ceph packages with apt
......@@ -768,7 +768,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: purge extra packages with yum
......@@ -776,7 +776,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge extra packages with dnf
......@@ -784,7 +784,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge extra packages with apt
......@@ -792,7 +792,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: remove config and any ceph socket left
......@@ -818,7 +818,7 @@
- name: purge dnf cache
command: dnf clean all
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge rpm cache in /tmp
file:
......@@ -827,7 +827,7 @@
- name: clean apt
command: apt-get clean # noqa 303
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge ceph repo file in /etc/yum.repos.d
file:
......@@ -837,7 +837,7 @@
- ceph-dev
- ceph_stable
- rh_storage
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
- name: check for anything running ceph
command: "ps -u ceph -U ceph"
......@@ -858,7 +858,7 @@
path: "{{ item.path }}"
state: absent
with_items: "{{ systemd_files.files }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge fetch directory
......
......@@ -55,7 +55,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
......@@ -104,7 +104,7 @@
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_hostname }}"
name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -114,7 +114,7 @@
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- name: remove ceph nfs directories for "{{ ansible_hostname }}"
- name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
file:
path: "{{ item }}"
state: absent
......@@ -134,7 +134,7 @@
- name: disable ceph mds service
service:
name: "ceph-mds@{{ ansible_hostname }}"
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -181,7 +181,7 @@
- name: disable ceph mgr service
service:
name: "ceph-mgr@{{ ansible_hostname }}"
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -208,7 +208,7 @@
- name: disable ceph rgw service
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
......@@ -230,7 +230,7 @@
- name: disable ceph rbd-mirror service
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -352,8 +352,8 @@
enabled: no
ignore_errors: true
with_items:
- "ceph-mgr@{{ ansible_hostname }}"
- "ceph-mon@{{ ansible_hostname }}"
- "ceph-mgr@{{ ansible_facts['hostname'] }}"
- "ceph-mon@{{ ansible_facts['hostname'] }}"
- name: remove ceph mon and mgr service
file:
......@@ -486,7 +486,7 @@
tasks:
- name: stop ceph-crash container
service:
name: "ceph-crash@{{ ansible_hostname }}"
name: "ceph-crash@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
......@@ -595,7 +595,7 @@
state: absent
update_cache: yes
autoremove: yes
when: ansible_os_family == 'Debian'
when: ansible_facts['os_family'] == 'Debian'
- name: red hat based systems tasks
block:
......@@ -616,7 +616,7 @@
args:
warn: no
when:
ansible_pkg_mgr == "yum"
ansible_facts['pkg_mgr'] == "yum"
- name: dnf related tasks on red hat
block:
......@@ -635,9 +635,9 @@
args:
warn: no
when:
ansible_pkg_mgr == "dnf"
ansible_facts['pkg_mgr'] == "dnf"
when:
ansible_os_family == 'RedHat' and
ansible_facts['os_family'] == 'RedHat' and
not is_atomic
- name: find any service-cid file left
......@@ -677,7 +677,7 @@
become: true
tasks:
- name: purge ceph directories for "{{ ansible_hostname }}" and ceph socket
- name: purge ceph directories for "{{ ansible_facts['hostname'] }}" and ceph socket
file:
path: "{{ item }}"
state: absent
......
......@@ -83,7 +83,7 @@
- name: set_fact container_exec_cmd
set_fact:
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: get iscsi gateway list
......
......@@ -194,7 +194,7 @@
# after the package gets upgraded
- name: stop ceph mon - shortname
systemd:
name: ceph-mon@{{ ansible_hostname }}
name: ceph-mon@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
......@@ -204,7 +204,7 @@
# after the package gets upgraded
- name: stop ceph mon - fqdn
systemd:
name: ceph-mon@{{ ansible_fqdn }}
name: ceph-mon@{{ ansible_facts['fqdn'] }}
state: stopped
enabled: no
masked: yes
......@@ -214,7 +214,7 @@
# after ALL monitors, even when collocated
- name: mask the mgr service
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
masked: yes
when: inventory_hostname in groups[mgr_group_name] | default([])
or groups[mgr_group_name] | default([]) | length == 0
......@@ -234,7 +234,7 @@
- name: start ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
......@@ -244,20 +244,20 @@
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
- (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: not containerized_deployment | bool
- name: container | waiting for the containerized monitor to join the quorum...
command: >
{{ container_binary }} exec ceph-mon-{{ ansible_hostname }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }} ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" quorum_status --format json
register: ceph_health_raw
until:
- ceph_health_raw.rc == 0
- (hostvars[inventory_hostname]['ansible_hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
- (hostvars[inventory_hostname]['ansible_facts']['hostname'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"] or
hostvars[inventory_hostname]['ansible_facts']['fqdn'] in (ceph_health_raw.stdout | default('{}') | from_json)["quorum_names"])
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
when: containerized_deployment | bool
......@@ -289,7 +289,7 @@
block:
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
masked: yes
......@@ -323,7 +323,7 @@
# or if we run a Ceph cluster before Luminous
- name: stop ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
masked: yes
......@@ -389,7 +389,7 @@
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_facts']['hostname'] }}"
when: containerized_deployment | bool
- name: stop ceph osd
......@@ -477,7 +477,7 @@
- name: set_fact container_exec_cmd_osd
set_fact:
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ ansible_hostname }}"
container_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ ansible_facts['hostname'] }}"
when: containerized_deployment | bool
- name: get osd versions
......@@ -539,7 +539,7 @@
set_fact:
mds_active_host: "{{ [hostvars[item]['inventory_hostname']] }}"
with_items: "{{ groups[mds_group_name] }}"
when: hostvars[item]['ansible_hostname'] == mds_active_name
when: hostvars[item]['ansible_facts']['hostname'] == mds_active_name
- name: create standby_mdss group