Commit 56aac327 authored by Alex Schultz's avatar Alex Schultz Committed by Guillaume Abrioux
Browse files

Use ansible_facts

It has come to our attention that using ansible_* vars that are
populated with INJECT_FACTS_AS_VARS=True is not very performant.  In
order to be able to support setting that to off, we need to update the
references to use ansible_facts[<thing>] instead of ansible_<thing>.

Related: ansible#73654
Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1935406

Signed-off-by: default avatarAlex Schultz <aschultz@redhat.com>
(cherry picked from commit a7f2fa73)
parent ab857d8b
......@@ -82,7 +82,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
......@@ -152,7 +152,7 @@ dummy:
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
......@@ -180,7 +180,7 @@ dummy:
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
......@@ -190,7 +190,7 @@ dummy:
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
......@@ -253,7 +253,7 @@ dummy:
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
......@@ -528,7 +528,7 @@ dummy:
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
......
......@@ -43,14 +43,14 @@ dummy:
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
# TCMU_RUNNER resource limitation
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_tcmu_runner_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_tcmu_runner_docker_cpu_limit: 1
# RBD_TARGET_GW resource limitation
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_gw_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_gw_docker_cpu_limit: 1
# RBD_TARGET_API resource limitation
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_target_api_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_target_api_docker_cpu_limit: 1
......@@ -27,13 +27,13 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mds_docker_extra_env' variable.
#ceph_mds_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mds_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mds_docker_cpu_limit: 4
# we currently for MDS_NAME to hostname because of a bug in ceph-docker
# fix here: https://github.com/ceph/ceph-docker/pull/770
# this will go away soon.
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_hostname }}
#ceph_mds_docker_extra_env: -e MDS_NAME={{ ansible_facts['hostname'] }}
#ceph_config_keys: [] # DON'T TOUCH ME
......
......@@ -41,7 +41,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mgr_docker_extra_env' variable.
#ceph_mgr_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mgr_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mgr_docker_cpu_limit: 1
#ceph_mgr_docker_extra_env:
......
......@@ -45,7 +45,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_mon_docker_extra_env' variable.
#ceph_mon_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_mon_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_mon_docker_cpu_limit: 1
#ceph_mon_container_listen_port: 3300
......
......@@ -25,7 +25,7 @@ dummy:
#ceph_nfs_enable_service: true
# ceph-nfs systemd service uses ansible's hostname as an instance id,
# so service name is ceph-nfs@{{ ansible_hostname }}, this is not
# so service name is ceph-nfs@{{ ansible_facts['hostname'] }}, this is not
# ideal when ceph-nfs is managed by pacemaker across multiple hosts - in
# such case it's better to have constant instance id instead which
# can be set by 'ceph_nfs_service_suffix'
......@@ -82,7 +82,7 @@ dummy:
# they must be configered.
#ceph_nfs_rgw_access_key: "QFAMEDSJP5DEKJO0DDXY"
#ceph_nfs_rgw_secret_key: "iaSFLDVvDdQt6lkNzHyW4fPLZugBAI1g17LO0+87[MAC[M#C"
#rgw_client_name: client.rgw.{{ ansible_hostname }}
#rgw_client_name: client.rgw.{{ ansible_facts['hostname'] }}
###################
# CONFIG OVERRIDE #
......
......@@ -169,7 +169,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_osd_docker_extra_env' variable.
#ceph_osd_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_osd_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_osd_docker_cpu_limit: 4
# The next two variables are undefined, and thus, unused by default.
......
......@@ -50,7 +50,7 @@ dummy:
# For the whole list of limits you can apply see: docs.docker.com/engine/admin/resource_constraints
# Default values are based from: https://access.redhat.com/documentation/en-us/red_hat_ceph_storage/2/html/red_hat_ceph_storage_hardware_guide/minimum_recommendations
# These options can be passed using the 'ceph_rbd_mirror_docker_extra_env' variable.
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_memtotal_mb }}m"
#ceph_rbd_mirror_docker_memory_limit: "{{ ansible_facts['memtotal_mb'] }}m"
#ceph_rbd_mirror_docker_cpu_limit: 1
#ceph_rbd_mirror_docker_extra_env:
......
......@@ -82,7 +82,7 @@ dummy:
#centos_package_dependencies:
# - epel-release
# - "{{ 'python3-libselinux' if ansible_distribution_major_version | int >= 8 else 'libselinux-python' }}"
# - "{{ (ansible_facts['distribution_major_version'] is version('8', '>=')) | ternary('python3-libselinux', 'libselinux-python') }}"
#redhat_package_dependencies: []
......@@ -152,7 +152,7 @@ ceph_repository: rhcs
# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions
# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/
# for more info read: https://github.com/ceph/ceph-ansible/issues/305
#ceph_stable_distro_source: "{{ ansible_distribution_release }}"
#ceph_stable_distro_source: "{{ ansible_facts['distribution_release'] }}"
# REPOSITORY: RHCS VERSION RED HAT STORAGE (from 5.0)
......@@ -180,7 +180,7 @@ ceph_rhcs_version: 5
#
#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu"
#ceph_stable_openstack_release_uca: queens
#ceph_stable_release_uca: "{{ ansible_distribution_release }}-updates/{{ ceph_stable_openstack_release_uca }}"
#ceph_stable_release_uca: "{{ ansible_facts['distribution_release'] }}-updates/{{ ceph_stable_openstack_release_uca }}"
# REPOSITORY: openSUSE OBS
#
......@@ -190,7 +190,7 @@ ceph_rhcs_version: 5
# usually has newer Ceph releases than the normal distro repository.
#
#
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_distribution_version }}/"
#ceph_obs_repo: "https://download.opensuse.org/repositories/filesystems:/ceph:/{{ ceph_stable_release }}/openSUSE_Leap_{{ ansible_facts['distribution_version'] }}/"
# REPOSITORY: DEV
#
......@@ -253,7 +253,7 @@ ceph_iscsi_config_dev: false
#ceph_conf_key_directory: /etc/ceph
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_os_family == 'Debian' else '167' }}"
#ceph_uid: "{{ '64045' if not containerized_deployment | bool and ansible_facts['os_family'] == 'Debian' else '167' }}"
# Permissions for keyring files in /etc/ceph
#ceph_keyring_permissions: '0600'
......@@ -528,7 +528,7 @@ ceph_iscsi_config_dev: false
# global:
# foo: 1234
# bar: 5678
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_hostname'] }}":
# "client.rgw.{{ hostvars[groups.get(rgw_group_name)[0]]['ansible_facts']['hostname'] }}":
# rgw_zone: zone1
#
#ceph_conf_overrides: {}
......
......@@ -283,12 +283,12 @@
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: manage nodes with cephadm
command: "{{ ceph_cmd }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
command: "{{ ceph_cmd }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
- name: add ceph label for core component
command: "{{ ceph_cmd }} orch host label add {{ ansible_hostname }} ceph"
command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
......@@ -352,7 +352,7 @@
- name: adopt mon daemon
cephadm_adopt:
name: "mon.{{ ansible_hostname }}"
name: "mon.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
......@@ -360,7 +360,7 @@
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mon systemd unit
command: 'systemctl reset-failed ceph-mon@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
......@@ -382,7 +382,7 @@
changed_when: false
register: ceph_health_raw
until: >
ansible_hostname in (ceph_health_raw.stdout | from_json)["quorum_names"]
ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
retries: "{{ health_mon_check_retries }}"
delay: "{{ health_mon_check_delay }}"
environment:
......@@ -399,7 +399,7 @@
- name: adopt mgr daemon
cephadm_adopt:
name: "mgr.{{ ansible_hostname }}"
name: "mgr.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
docker: "{{ true if container_binary == 'docker' else false }}"
......@@ -407,7 +407,7 @@
firewalld: "{{ true if configure_firewall | bool else false }}"
- name: reset failed ceph-mgr systemd unit
command: 'systemctl reset-failed ceph-mgr@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
......@@ -583,7 +583,7 @@
- name: stop and disable ceph-mds systemd service
service:
name: 'ceph-mds@{{ ansible_hostname }}'
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
......@@ -596,7 +596,7 @@
when: not containerized_deployment | bool
- name: reset failed ceph-mds systemd unit
command: 'systemctl reset-failed ceph-mds@{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
......@@ -615,7 +615,7 @@
- name: remove legacy ceph mds data
file:
path: '/var/lib/ceph/mds/{{ cluster }}-{{ ansible_hostname }}'
path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
state: absent
- name: rgw realm/zonegroup/zone requirements
......@@ -692,7 +692,7 @@
- name: stop and disable ceph-radosgw systemd service
service:
name: 'ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: false
failed_when: false
......@@ -706,7 +706,7 @@
when: not containerized_deployment | bool
- name: reset failed ceph-radosgw systemd unit
command: 'systemctl reset-failed ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}' # noqa 303
command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}" # noqa 303
changed_when: false
failed_when: false
loop: '{{ rgw_instances }}'
......@@ -726,13 +726,13 @@
- name: remove legacy ceph radosgw data
file:
path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}.{{ item.instance_name }}'
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: absent
loop: '{{ rgw_instances }}'
- name: remove legacy ceph radosgw directory
file:
path: '/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_hostname }}'
path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
state: absent
- name: redeploy rbd-mirror daemons
......@@ -762,7 +762,7 @@
- name: stop and disable rbd-mirror systemd service
service:
name: 'ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}'
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
enabled: false
failed_when: false
......@@ -775,7 +775,7 @@
when: not containerized_deployment | bool
- name: reset failed rbd-mirror systemd unit
command: 'systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}' # noqa 303
command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}" # noqa 303
changed_when: false
failed_when: false
when: containerized_deployment | bool
......@@ -938,7 +938,7 @@
- name: adopt alertmanager daemon
cephadm_adopt:
name: "alertmanager.{{ ansible_hostname }}"
name: "alertmanager.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ alertmanager_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
......@@ -992,7 +992,7 @@
- name: adopt prometheus daemon
cephadm_adopt:
name: "prometheus.{{ ansible_hostname }}"
name: "prometheus.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ prometheus_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
......@@ -1019,7 +1019,7 @@
- name: adopt grafana daemon
cephadm_adopt:
name: "grafana.{{ ansible_hostname }}"
name: "grafana.{{ ansible_facts['hostname'] }}"
cluster: "{{ cluster }}"
image: "{{ grafana_container_image }}"
docker: "{{ true if container_binary == 'docker' else false }}"
......
......@@ -231,14 +231,14 @@
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: manage nodes with cephadm
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_hostname }} {{ ansible_default_ipv4.address }} {{ group_names | join(' ') }}"
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
environment:
CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
- name: add ceph label for core component
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_hostname }} ceph"
command: "{{ cephadm_cmd }} shell -- ceph --cluster {{ cluster }} orch host label add {{ ansible_facts['hostname'] }} ceph"
changed_when: false
delegate_to: '{{ groups[mon_group_name][0] }}'
when: inventory_hostname in groups.get(mon_group_name, []) or
......
......@@ -71,7 +71,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
......@@ -127,7 +127,7 @@
name: nfs-ganesha
state: stopped
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge node-exporter
hosts:
......@@ -249,7 +249,7 @@
- name: stop ceph mdss with systemd
service:
name: ceph-mds@{{ ansible_hostname }}
name: ceph-mds@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
......@@ -270,11 +270,11 @@
- name: stop ceph mgrs with systemd
service:
name: ceph-mgr@{{ ansible_hostname }}
name: ceph-mgr@{{ ansible_facts['hostname'] }}
state: stopped
enabled: no
failed_when: false
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge rgwloadbalancer cluster
......@@ -318,7 +318,7 @@
- name: stop ceph rgws with systemd
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
......@@ -340,7 +340,7 @@
- name: stop ceph rbd mirror with systemd
service:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
state: stopped
failed_when: false
......@@ -368,7 +368,7 @@
become: false
wait_for:
port: 22
host: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
host: "{{ hostvars[inventory_hostname]['ansible_facts']['default_ipv4']['address'] }}"
state: started
delay: 10
timeout: 500
......@@ -398,7 +398,7 @@
state: stopped
enabled: no
with_items: "{{ osd_ids.stdout_lines }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: remove ceph udev rules
file:
......@@ -618,7 +618,7 @@
- name: stop ceph mons with systemd
service:
name: "ceph-{{ item }}@{{ ansible_hostname }}"
name: "ceph-{{ item }}@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
failed_when: false
......@@ -740,27 +740,27 @@
yum:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'yum'
when: ansible_facts['pkg_mgr'] == 'yum'
- name: purge ceph packages with dnf
dnf:
name: "{{ ceph_packages }}"
state: absent
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge ceph packages with apt
apt:
name: "{{ ceph_packages }}"
state: absent
purge: true
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge remaining ceph packages with yum
yum:
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge remaining ceph packages with dnf
......@@ -768,7 +768,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge remaining ceph packages with apt
......@@ -776,7 +776,7 @@
name: "{{ ceph_remaining_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: purge extra packages with yum
......@@ -784,7 +784,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'yum'
- ansible_facts['pkg_mgr'] == 'yum'
- purge_all_packages | bool
- name: purge extra packages with dnf
......@@ -792,7 +792,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'dnf'
- ansible_facts['pkg_mgr'] == 'dnf'
- purge_all_packages | bool
- name: purge extra packages with apt
......@@ -800,7 +800,7 @@
name: "{{ extra_packages }}"
state: absent
when:
- ansible_pkg_mgr == 'apt'
- ansible_facts['pkg_mgr'] == 'apt'
- purge_all_packages | bool
- name: remove config and any ceph socket left
......@@ -826,7 +826,7 @@
- name: purge dnf cache
command: dnf clean all
when: ansible_pkg_mgr == 'dnf'
when: ansible_facts['pkg_mgr'] == 'dnf'
- name: purge rpm cache in /tmp
file:
......@@ -835,7 +835,7 @@
- name: clean apt
command: apt-get clean # noqa 303
when: ansible_pkg_mgr == 'apt'
when: ansible_facts['pkg_mgr'] == 'apt'
- name: purge ceph repo file in /etc/yum.repos.d
file:
......@@ -845,7 +845,7 @@
- ceph-dev
- ceph_stable
- rh_storage
when: ansible_os_family == 'RedHat'
when: ansible_facts['os_family'] == 'RedHat'
- name: check for anything running ceph
command: "ps -u ceph -U ceph"
......@@ -866,7 +866,7 @@
path: "{{ item.path }}"
state: absent
with_items: "{{ systemd_files.files }}"
when: ansible_service_mgr == 'systemd'
when: ansible_facts['service_mgr'] == 'systemd'
- name: purge fetch directory
......
......@@ -55,7 +55,7 @@
run_once: true
- name: get all nfs-ganesha mount points
command: grep "{{ hostvars[item]['ansible_all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
command: grep "{{ hostvars[item]['ansible_facts']['all_ipv4_addresses'] | ips_in_ranges(public_network.split(',')) | first }}" /proc/mounts
register: nfs_ganesha_mount_points
failed_when: false
with_items: "{{ groups[nfs_group_name] }}"
......@@ -104,7 +104,7 @@
- name: disable ceph nfs service
service:
name: "ceph-nfs@{{ ansible_hostname }}"
name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -114,7 +114,7 @@
path: /etc/systemd/system/ceph-nfs@.service
state: absent
- name: remove ceph nfs directories for "{{ ansible_hostname }}"
- name: remove ceph nfs directories for "{{ ansible_facts['hostname'] }}"
file:
path: "{{ item }}"
state: absent
......@@ -134,7 +134,7 @@
- name: disable ceph mds service
service:
name: "ceph-mds@{{ ansible_hostname }}"
name: "ceph-mds@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -181,7 +181,7 @@
- name: disable ceph mgr service
service:
name: "ceph-mgr@{{ ansible_hostname }}"
name: "ceph-mgr@{{ ansible_facts['hostname'] }}"
state: stopped
enabled: no
ignore_errors: true
......@@ -208,7 +208,7 @@
- name: disable ceph rgw service
service:
name: "ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}"
name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
state: stopped
enabled: no
failed_when: false
......@@ -230,7 +230,7 @@
- name: disable ceph rbd-mirror service