Commit 0784b8c5 authored by Leseb's avatar Leseb
Browse files

Merge pull request #662 from ceph/follow-up-cluster-name

ceph: implement cluster name support
parents dde0c204 450feaac
......@@ -14,6 +14,7 @@ dummy:
###########
#fetch_directory: fetch/
#cluster: ceph # cluster name
###########
# INSTALL #
......
......@@ -21,7 +21,7 @@
private: no
tasks:
- name: exit playbook, if user didn't mean to purge cluster
- name: exit playbook, if user did not mean to purge cluster
fail:
msg: >
"Exiting purge-cluster playbook, cluster was NOT purged.
......@@ -70,6 +70,10 @@
- python-rados
- python-rbd
cluster: ceph # name of the cluster
monitor_name: "{{ ansible_hostname }}"
mds_name: "{{ ansible_hostname }}"
handlers:
- name: restart machine
......@@ -183,7 +187,10 @@
# Ubuntu 14.04
- name: stop ceph osds on ubuntu
command: stop ceph-osd id={{ item }}
shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl stop ceph-osd cluster={{ cluster }} id=$id
done
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
......@@ -191,21 +198,21 @@
with_items: "{{ osd_ids.stdout_lines }}"
- name: stop ceph mons on ubuntu
command: stop ceph-mon id={{ ansible_hostname }}
command: initctl stop ceph-mon cluster={{ cluster }} id={{ monitor_name }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
mon_group_name in group_names
- name: stop ceph mdss on ubuntu
command: stop ceph-mds-all
command: initctl stop ceph-mds cluster={{ cluster }} id={{ mds_name }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
mds_group_name in group_names
- name: stop ceph rgws on ubuntu
command: stop ceph-radosgw id=rgw.{{ ansible_hostname }}
command: initctl stop radosgw cluster={{ cluster }} id={{ ansible_hostname }}
failed_when: false
when:
ansible_distribution == 'Ubuntu' and
......
......@@ -6,6 +6,7 @@
###########
fetch_directory: fetch/
cluster: ceph # cluster name
###########
# INSTALL #
......
......@@ -22,7 +22,7 @@
is_ceph_infernalis
- name: restart ceph mons on ubuntu
command: restart ceph-mon-all
command: initctl restart ceph-mon cluster={{ cluster }} id={{ monitor_name }}
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
......@@ -50,14 +50,17 @@
is_ceph_infernalis
- name: restart ceph osds on ubuntu
command: restart ceph-osd-all
shell: |
for id in $(ls /var/lib/ceph/osd/ |grep -oh '[0-9]*'); do
initctl restart ceph-osd cluster={{ cluster }} id=$id
done
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
osd_group_name in group_names
- name: restart ceph mdss on ubuntu
command: restart ceph-mds-all
command: initctl restart ceph-mds cluster={{ cluster }} id={{ ansible_hostname }}
when:
socket.rc == 0 and
ansible_distribution == 'Ubuntu' and
......@@ -84,7 +87,7 @@
ceph_stable_release not in ceph_stable_releases
- name: restart ceph rgws on ubuntu
command: restart ceph-all
command: initctl restart radosgw cluster={{ cluster }} id=rgw.{{ ansible_hostname }}
when:
socketrgw.rc == 0 and
ansible_distribution == 'Ubuntu' and
......
......@@ -179,7 +179,7 @@
action: config_template
args:
src: ceph.conf.j2
dest: /etc/ceph/ceph.conf
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
......@@ -207,3 +207,19 @@
owner: "{{ rbd_client_dir_owner }}"
group: "{{ rbd_client_dir_group }}"
mode: "{{ rbd_client_dir_mode }}"
- name: configure cluster name
lineinfile:
dest: /etc/sysconfig/ceph
insertafter: EOF
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "RedHat"
- name: configure cluster name
lineinfile:
dest: /etc/default/ceph/ceph
insertafter: EOF
line: "CLUSTER={{ cluster }}"
when:
ansible_os_family == "Debian"
......@@ -183,10 +183,10 @@ debug mds migrator = {{ debug_mds_level }}
rgw dns name = {{ radosgw_dns_name }}
{% endif %}
host = {{ hostvars[host]['ansible_hostname'] }}
keyring = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
keyring = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}/keyring
rgw socket path = /tmp/radosgw-{{ hostvars[host]['ansible_hostname'] }}.sock
log file = /var/log/ceph/radosgw-{{ hostvars[host]['ansible_hostname'] }}.log
rgw data = /var/lib/ceph/radosgw/ceph-rgw.{{ hostvars[host]['ansible_hostname'] }}
log file = /var/log/ceph/{{ cluster }}-rgw-{{ hostvars[host]['ansible_hostname'] }}.log
rgw data = /var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ hostvars[host]['ansible_hostname'] }}
{% if radosgw_frontend == 'civetweb' %}
rgw frontends = civetweb port={{ radosgw_civetweb_port }}
{% endif %}
......
......@@ -23,30 +23,30 @@
group: "{{ key_group }}"
mode: "{{ key_mode }}"
with_items:
- { name: /var/lib/ceph/bootstrap-mds/ceph.keyring, copy_key: true }
- { name: /etc/ceph/ceph.client.admin.keyring, copy_key: "{{ copy_admin_key }}" }
- { name: "/var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
cephx and
item.copy_key|bool
- name: create mds directory
file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
- name: create mds keyring
command: ceph --cluster ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring
command: ceph --cluster {{ cluster }} --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring auth get-or-create mds.{{ mds_name }} osd 'allow rwx' mds 'allow' mon 'allow profile mds' -o /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
args:
creates: /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring
creates: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
changed_when: false
when: cephx
- name: set mds key permissions
file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/keyring
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/keyring
mode: "{{ key_mode }}"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
......@@ -54,7 +54,7 @@
- name: activate metadata server with upstart
file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/{{ item }}
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
......@@ -67,7 +67,7 @@
- name: activate metadata server with sysvinit
file:
path: /var/lib/ceph/mds/ceph-{{ mds_name }}/{{ item }}
path: /var/lib/ceph/mds/{{ cluster }}-{{ mds_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
......@@ -90,12 +90,9 @@
is_ceph_infernalis
- name: start and add that the metadata service to the init sequence (ubuntu)
service:
name: ceph-mds
state: started
enabled: yes
args: "id={{ mds_name }}"
command: initctl emit ceph-mds cluster={{ cluster }} id={{ mds_name }}
changed_when: false
failed_when: false
when: ansible_distribution == "Ubuntu"
- name: start and add that the metadata service to the init sequence (before infernalis)
......
......@@ -3,12 +3,12 @@
# the admin key is not instantanely created so we have to wait a bit
- name: wait for client.admin key exists
wait_for:
path: /etc/ceph/ceph.client.admin.keyring
path: /etc/ceph/{{ cluster }}.client.admin.keyring
- name: create ceph rest api keyring when mon is not containerized
command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
command: ceph --cluster {{ cluster }} auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/{{ cluster }}.client.restapi.keyring
args:
creates: /etc/ceph/ceph.client.restapi.keyring
creates: /etc/ceph/{{ cluster }}.client.restapi.keyring
changed_when: false
when:
cephx and
......@@ -41,9 +41,9 @@
run_once: true
with_items:
- "{{ ceph_keys.stdout_lines }}"
- /var/lib/ceph/bootstrap-osd/ceph.keyring
- /var/lib/ceph/bootstrap-rgw/ceph.keyring
- /var/lib/ceph/bootstrap-mds/ceph.keyring
- /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-rgw/{{ cluster }}.keyring
- /var/lib/ceph/bootstrap-mds/{{ cluster }}.keyring
when: cephx
- name: drop in a motd script to report status when logging in
......
......@@ -4,7 +4,7 @@
# the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
# since those check are performed by the ceph-common role
- name: create filesystem pools
command: ceph osd pool create {{ item }} {{ pool_default_pg_num }}
command: ceph --cluster {{ cluster }} osd pool create {{ item }} {{ pool_default_pg_num }}
with_items:
- cephfs_data
- cephfs_metadata
......@@ -12,6 +12,6 @@
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
- name: create ceph filesystem
command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
command: ceph --cluster {{ cluster }} fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
changed_when: false
when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
......@@ -38,16 +38,16 @@
- name: create monitor directory
file:
path: /var/lib/ceph/mon/ceph-{{ monitor_name }}
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}
state: directory
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ dir_mode }}"
- name: ceph monitor mkfs with keyring (for or after infernalis release)
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
command: ceph-mon --cluster {{ cluster }} --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/keyring
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
cephx and
is_ceph_infernalis
......@@ -55,7 +55,7 @@
- name: ceph monitor mkfs without keyring (for or after infernalis release)
command: ceph-mon --setuser ceph --setgroup ceph --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/store.db
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
not cephx and
is_ceph_infernalis
......@@ -63,7 +63,7 @@
- name: ceph monitor mkfs with keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }} --keyring /var/lib/ceph/tmp/keyring.mon.{{ monitor_name }}
args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/keyring
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/keyring
when:
cephx and
not is_ceph_infernalis
......@@ -71,7 +71,7 @@
- name: ceph monitor mkfs without keyring (before infernalis release)
command: ceph-mon --mkfs -i {{ monitor_name }} --fsid {{ fsid }}
args:
creates: /var/lib/ceph/mon/ceph-{{ monitor_name }}/store.db
creates: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/store.db
when:
not cephx and
not is_ceph_infernalis
---
- name: create openstack pool
command: ceph osd pool create {{ item.name }} {{ item.pg_num }}
command: ceph --cluster {{ cluster }} osd pool create {{ item.name }} {{ item.pg_num }}
with_items:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
......@@ -10,9 +10,9 @@
failed_when: false
- name: create openstack keys
command: ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
command: ceph --cluster {{ cluster }} auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
args:
creates: /etc/ceph/ceph.{{ item.name }}.keyring
creates: /etc/ceph/{{ cluster }}.{{ item.name }}.keyring
with_items: openstack_keys
changed_when: false
when: cephx
---
- name: collect all the pools
command: rados lspools
command: rados --cluster {{ cluster }} lspools
register: ceph_pools
when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
- name: secure the cluster
command: ceph osd pool set {{ item[0] }} {{ item[1] }} true
command: ceph --cluster {{ cluster }} osd pool set {{ item[0] }} {{ item[1] }} true
with_nested:
- ceph_pools.stdout_lines
- secure_cluster_flags
......
---
- name: activate monitor with upstart
file:
path: /var/lib/ceph/mon/ceph-{{ monitor_name }}/{{ item }}
path: /var/lib/ceph/mon/{{ cluster }}-{{ monitor_name }}/{{ item }}
state: touch
owner: "{{ activate_file_owner }}"
group: "{{ activate_file_group }}"
......@@ -13,11 +13,9 @@
when: ansible_distribution == "Ubuntu"
- name: start and add that the monitor service to the init sequence (ubuntu)
service:
name: ceph-mon
state: started
enabled: yes
args: "id={{ monitor_name }}"
command: initctl emit ceph-mon cluster={{ cluster }} id={{ monitor_name }}
changed_when: false
failed_when: false
when: ansible_distribution == "Ubuntu"
# NOTE (leseb): somehow the service ansible module is messing things up
......@@ -51,13 +49,13 @@
is_ceph_infernalis
- name: collect admin and bootstrap keys
command: ceph-create-keys --id {{ monitor_name }}
command: ceph-create-keys --cluster {{ cluster }} --id {{ monitor_name }}
changed_when: false
failed_when: false
when: cephx
- name: get ceph monitor version
shell: ceph daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
shell: ceph --cluster {{ cluster }} daemon mon."{{ monitor_name }}" version | cut -d '"' -f 4 | cut -f 1,2 -d '.'
changed_when: false
failed_when: "'No such file or directory' in ceph_version.stderr"
register: ceph_version
......@@ -49,8 +49,8 @@
mode: "{{ activate_file_mode }}"
with_items: combined_osd_id.results
- name: copy ceph.conf for assembling
command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
- name: copy {{ cluster }}.conf for assembling
command: cp /etc/ceph/{{ cluster }}.conf /etc/ceph/ceph.d/
changed_when: false
- name: assemble osd sections
......@@ -61,10 +61,10 @@
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
- name: assemble ceph conf and osd fragments
- name: assemble {{ cluster }}.conf and osd fragments
assemble:
src: /etc/ceph/ceph.d/
dest: /etc/ceph/ceph.conf
dest: /etc/ceph/{{ cluster }}.conf
owner: "{{ dir_owner }}"
group: "{{ dir_group }}"
mode: "{{ activate_file_mode }}"
......@@ -29,8 +29,8 @@
group: "{{ key_group }}"
mode: "{{ key_mode }}"
with_items:
- { name: /var/lib/ceph/bootstrap-osd/ceph.keyring, copy_key: true }
- { name: /etc/ceph/ceph.client.admin.keyring, copy_key: "{{ copy_admin_key }}" }
- { name: "/var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring", copy_key: true }
- { name: "/etc/ceph/{{ cluster }}.client.admin.keyring", copy_key: "{{ copy_admin_key }}" }
when:
cephx and
item.copy_key|bool
......@@ -11,7 +11,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare --bluestore "/dev/{{ item.key }}"
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "/dev/{{ item.key }}"
ignore_errors: true
register: prepared_osds
with_dict: ansible_devices
......@@ -23,7 +23,7 @@
osd_auto_discovery
- name: manually prepare osd disk(s)
command: ceph-disk prepare --bluestore "{{ item.2 }}"
command: ceph-disk prepare --bluestore --cluster "{{ cluster }}" "{{ item.2 }}"
ignore_errors: true
with_together:
- combined_parted_results.results
......
......@@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: automatic prepare osd disk(s) without partitions
command: ceph-disk prepare "/dev/{{ item.key }}"
command: ceph-disk prepare --cluster "{{ cluster }}" "/dev/{{ item.key }}"
ignore_errors: true
register: prepared_osds
with_dict: ansible_devices
......@@ -22,7 +22,7 @@
osd_auto_discovery
- name: manually prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }}"
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }}"
ignore_errors: true
with_together:
- combined_parted_results.results
......
......@@ -16,7 +16,7 @@
# if you have 64 disks with 4TB each, this will take a while
# since Ansible will sequential process the loop
- name: prepare OSD disk(s)
command: "ceph-disk prepare {{ item }}"
command: "ceph-disk prepare --cluster {{ cluster }} {{ item }}"
with_items: osd_directories
changed_when: false
when: osd_directory
......
......@@ -10,7 +10,7 @@
# NOTE (alahouze): if the device is a partition, the parted command below has
# failed, this is why we check if the device is a partition too.
- name: prepare osd disk(s)
command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
command: "ceph-disk prepare --cluster {{ cluster }} {{ item.2 }} {{ item.3 }}"
with_together:
- combined_parted_results.results
- combined_ispartition_results.results
......
......@@ -9,7 +9,7 @@
- name: copy ceph rest api keyring
copy:
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/ceph.client.restapi.keyring"
src: "{{ fetch_directory }}/{{ fsid }}/etc/ceph/{{ cluster }}.client.restapi.keyring"
dest: "/var/lib/ceph/restapi/ceph-restapi/keyring"
owner: "{{ key_owner }}"
group: "{{ key_group }}"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment