cephadm-adopt.yml 46.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
---
#
# This playbook does a cephadm adopt for all the Ceph services
#

- name: confirm whether user really meant to adopt the cluster by cephadm
  hosts: localhost
  connection: local
  become: false
  gather_facts: false
  vars_prompt:
    - name: ireallymeanit
      prompt: Are you sure you want to adopt the cluster by cephadm ?
      default: 'no'
      private: no
  tasks:
    - name: exit playbook, if user did not mean to adopt the cluster by cephadm
      fail:
        msg: >
          Exiting cephadm-adopt playbook, cluster was NOT adopted.
           To adopt the cluster, either say 'yes' on the prompt or
           use `-e ireallymeanit=yes` on the command line when
           invoking the playbook
      when: ireallymeanit != 'yes'

26
27
28
29
30
31
32
33
34
35
    - name: import_role ceph-defaults
      import_role:
        name: ceph-defaults

    - name: check if a legacy grafana-server group exists
      import_role:
        name: ceph-facts
        tasks_from: convert_grafana_server_group_name.yml
      when: groups.get((grafana_server_group_name | default('grafana-server')), []) | length > 0

36
37
38
39
40
41
42
43
44
45
- name: gather facts and prepare system for cephadm
  hosts:
    - "{{ mon_group_name|default('mons') }}"
    - "{{ osd_group_name|default('osds') }}"
    - "{{ mds_group_name|default('mdss') }}"
    - "{{ rgw_group_name|default('rgws') }}"
    - "{{ mgr_group_name|default('mgrs') }}"
    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
    - "{{ nfs_group_name|default('nfss') }}"
    - "{{ iscsi_gw_group_name|default('iscsigws') }}"
46
    - "{{ monitoring_group_name|default('monitoring') }}"
47
48
49
50
51
52
53
54
55
56
  become: true
  gather_facts: false
  vars:
    delegate_facts_host: true
  tasks:
    - import_role:
        name: ceph-defaults

    - name: gather facts
      setup:
57
58
59
60
        gather_subset:
          - 'all'
          - '!facter'
          - '!ohai'
61
62
63
64
      when: not delegate_facts_host | bool or inventory_hostname in groups.get(client_group_name, [])

    - name: gather and delegate facts
      setup:
65
66
67
68
        gather_subset:
          - 'all'
          - '!facter'
          - '!ohai'
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
      delegate_to: "{{ item }}"
      delegate_facts: true
      with_items: "{{ groups['all'] | difference(groups.get('clients', [])) }}"
      run_once: true
      when: delegate_facts_host | bool

    - name: fail if one osd node is using filestore
      fail:
        msg: >
          filestore OSDs are not supported with cephadm.
          Please convert them with the filestore-to-bluestore.yml playbook first.
      when:
        - osd_group_name in group_names
        - osd_objectstore == 'filestore'

    - import_role:
        name: ceph-facts
        tasks_from: container_binary.yml

88
89
90
91
92
    - import_role:
        name: ceph-facts
        tasks_from: convert_grafana_server_group_name.yml
      when: groups.get((grafana_server_group_name|default('grafana-server')), []) | length > 0

93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
    - name: get the ceph version
      command: "{{ container_binary + ' run --rm --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --version"
      changed_when: false
      register: ceph_version_out

    - name: set_fact ceph_version
      set_fact:
        ceph_version: "{{ ceph_version_out.stdout.split(' ')[2] }}"

    - name: fail on pre octopus ceph releases
      fail:
        msg: >
          Your Ceph version {{ ceph_version }} is not supported for this operation.
          Please upgrade your cluster with the rolling_update.yml playbook first.
      when: ceph_version is version('15.2', '<')

    - name: check if it is atomic host
      stat:
        path: /run/ostree-booted
      register: stat_ostree

    - name: set_fact is_atomic
      set_fact:
        is_atomic: "{{ stat_ostree.stat.exists }}"

    - import_role:
        name: ceph-container-engine
      when: not containerized_deployment | bool

    - import_role:
        name: ceph-container-common
        tasks_from: registry.yml
      when:
        - not containerized_deployment | bool
        - ceph_docker_registry_auth | bool

    - name: "pulling {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} image"
      command: "{{ timeout_command }} {{ container_binary }} pull {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
      changed_when: false
      register: docker_image
      until: docker_image.rc == 0
      retries: "{{ docker_pull_retry }}"
      delay: 10
      when:
        - not containerized_deployment | bool
        - inventory_hostname in groups.get(mon_group_name, []) or
          inventory_hostname in groups.get(osd_group_name, []) or
          inventory_hostname in groups.get(mds_group_name, []) or
          inventory_hostname in groups.get(rgw_group_name, []) or
          inventory_hostname in groups.get(mgr_group_name, []) or
          inventory_hostname in groups.get(rbdmirror_group_name, []) or
          inventory_hostname in groups.get(iscsi_gw_group_name, []) or
          inventory_hostname in groups.get(nfs_group_name, [])

    - name: install cephadm requirements
      package:
        name: ['python3', 'lvm2']
      register: result
      until: result is succeeded

    - name: install cephadm
      package:
        name: cephadm
      register: result
      until: result is succeeded
      when: not containerized_deployment | bool

    - name: install cephadm mgr module
      package:
        name: ceph-mgr-cephadm
      register: result
      until: result is succeeded
      when:
        - not containerized_deployment | bool
        - mgr_group_name in group_names

    - name: get cephadm from the container image
      when: containerized_deployment | bool
      block:
        - name: create a cephadm container
          command: "{{ container_binary }} create --name cephadm {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
          changed_when: false

        - name: cp the cephadm cli file
          command: "{{ container_binary }} cp cephadm:/usr/sbin/cephadm /usr/sbin/cephadm"
          args:
            creates: /usr/sbin/cephadm

        - name: remove the cephadm container
          command: "{{ container_binary }} rm cephadm"
          changed_when: false

185
    - name: set_fact ceph_cmd
186
      set_fact:
187
        ceph_cmd: "{{ container_binary + ' run --rm --net=host -v /etc/ceph:/etc/ceph:z -v /var/lib/ceph:/var/lib/ceph:z -v /var/run/ceph:/var/run/ceph:z --entrypoint=ceph ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'ceph' }} --cluster {{ cluster }}"
188
189

    - name: get current fsid
190
      command: "{{ ceph_cmd }} fsid"
191
192
193
194
195
      register: current_fsid
      run_once: true
      changed_when: false
      delegate_to: "{{ groups[mon_group_name][0] }}"

196
197
198
199
200
201
202
    - name: get a minimal ceph configuration
      command: "{{ ceph_cmd }} config generate-minimal-conf"
      register: minimal_config
      run_once: true
      changed_when: false
      delegate_to: "{{ groups[mon_group_name][0] }}"

203
204
    - name: set_fact fsid
      set_fact:
205
206
        fsid: "{{ current_fsid.stdout }}"
      run_once: true
207
208

    - name: enable cephadm mgr module
209
210
211
212
213
214
215
      ceph_mgr_module:
        name: cephadm
        cluster: "{{ cluster }}"
        state: enable
      environment:
        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
216
217
218
219
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: set cephadm as orchestrator backend
220
      command: "{{ ceph_cmd }} orch set backend cephadm"
221
222
223
224
225
      changed_when: false
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: generate cephadm ssh key
226
      command: "{{ ceph_cmd }} cephadm generate-key"
227
228
229
230
231
      changed_when: false
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: get the cephadm ssh pub key
232
      command: "{{ ceph_cmd }} cephadm get-pub-key"
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
      changed_when: false
      run_once: true
      register: cephadm_pubpkey
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: allow cephadm key for root account
      authorized_key:
        user: root
        key: '{{ cephadm_pubpkey.stdout }}'

    - name: run cephadm prepare-host
      command: cephadm prepare-host
      changed_when: false
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

    - name: set default container image in ceph configuration
250
      command: "{{ ceph_cmd }} config set global container_image {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
251
252
253
254
      changed_when: false
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'

255
    - name: set container image base in ceph configuration
256
      command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_base {{ ceph_docker_registry }}/{{ ceph_docker_image }}"
257
258
259
260
261
262
263
264
265
      changed_when: false
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: set dashboard container image in ceph mgr configuration
      when: dashboard_enabled | bool
      run_once: true
      block:
        - name: set alertmanager container image in ceph configuration
266
          command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_alertmanager {{ alertmanager_container_image }}"
267
268
269
270
          changed_when: false
          delegate_to: '{{ groups[mon_group_name][0] }}'

        - name: set grafana container image in ceph configuration
271
          command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_grafana {{ grafana_container_image }}"
272
273
274
275
          changed_when: false
          delegate_to: '{{ groups[mon_group_name][0] }}'

        - name: set node-exporter container image in ceph configuration
276
          command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_node_exporter {{ node_exporter_container_image }}"
277
278
279
280
          changed_when: false
          delegate_to: '{{ groups[mon_group_name][0] }}'

        - name: set prometheus container image in ceph configuration
281
          command: "{{ ceph_cmd }} config set mgr mgr/cephadm/container_image_prometheus {{ prometheus_container_image }}"
282
283
284
          changed_when: false
          delegate_to: '{{ groups[mon_group_name][0] }}'

285
    - name: manage nodes with cephadm
Alex Schultz's avatar
Alex Schultz committed
286
      command: "{{ ceph_cmd }} orch host add {{ ansible_facts['hostname'] }} {{ ansible_facts['default_ipv4']['address'] }} {{ group_names | join(' ') }}"
287
288
289
290
      changed_when: false
      delegate_to: '{{ groups[mon_group_name][0] }}'

    - name: add ceph label for core component
Alex Schultz's avatar
Alex Schultz committed
291
      command: "{{ ceph_cmd }} orch host label add {{ ansible_facts['hostname'] }} ceph"
292
293
      changed_when: false
      delegate_to: '{{ groups[mon_group_name][0] }}'
294
295
296
297
298
299
      when: inventory_hostname in groups.get(mon_group_name, []) or
            inventory_hostname in groups.get(osd_group_name, []) or
            inventory_hostname in groups.get(mds_group_name, []) or
            inventory_hostname in groups.get(rgw_group_name, []) or
            inventory_hostname in groups.get(mgr_group_name, []) or
            inventory_hostname in groups.get(rbdmirror_group_name, [])
300

301
    - name: get the client.admin keyring
302
303
304
305
306
307
308
309
      ceph_key:
        name: client.admin
        cluster: "{{ cluster }}"
        output_format: plain
        state: info
      environment:
        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
      run_once: true
      delegate_to: '{{ groups[mon_group_name][0] }}'
      register: client_admin_keyring

    - name: copy the client.admin keyring
      copy:
        dest: "/etc/ceph/{{ cluster }}.client.admin.keyring"
        content: "{{ client_admin_keyring.stdout + '\n' }}"
        owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
        group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
        mode: "{{ ceph_keyring_permissions }}"
      run_once: true
      delegate_to: "{{ item }}"
      with_items:
        - "{{ groups.get(osd_group_name, []) }}"
        - "{{ groups.get(mds_group_name, []) }}"
        - "{{ groups.get(rgw_group_name, []) }}"
        - "{{ groups.get(mgr_group_name, []) }}"
        - "{{ groups.get(rbdmirror_group_name, []) }}"

330
    - name: assimilate ceph configuration
331
      command: "{{ ceph_cmd }} config assimilate-conf -i /etc/ceph/{{ cluster }}.conf"
332
333
334
335
336
337
338
339
      changed_when: false
      when: inventory_hostname in groups.get(mon_group_name, []) or
            inventory_hostname in groups.get(osd_group_name, []) or
            inventory_hostname in groups.get(mds_group_name, []) or
            inventory_hostname in groups.get(rgw_group_name, []) or
            inventory_hostname in groups.get(mgr_group_name, []) or
            inventory_hostname in groups.get(rbdmirror_group_name, [])

340
341
342
343
    - name: set_fact cephadm_cmd
      set_fact:
        cephadm_cmd: "cephadm {{ '--docker' if container_binary == 'docker' else '' }}"

344
345
346
347
348
349
350
351
352
353
- name: adopt ceph mon daemons
  hosts: "{{ mon_group_name|default('mons') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: adopt mon daemon
354
      cephadm_adopt:
Alex Schultz's avatar
Alex Schultz committed
355
        name: "mon.{{ ansible_facts['hostname'] }}"
356
357
358
359
360
        cluster: "{{ cluster }}"
        image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
        docker: "{{ true if container_binary == 'docker' else false }}"
        pull: false
        firewalld: "{{ true if configure_firewall | bool else false }}"
361
362

    - name: reset failed ceph-mon systemd unit
Alex Schultz's avatar
Alex Schultz committed
363
      command: "systemctl reset-failed ceph-mon@{{ ansible_facts['hostname'] }}"  # noqa 303
364
      changed_when: false
365
      failed_when: false
366
367
368
369
370
371
372
373
374
375
376
377
378
379
      when: containerized_deployment | bool

    - name: remove ceph-mon systemd unit file
      file:
        path: /etc/systemd/system/ceph-mon@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove ceph-mon systemd override directory
      file:
        path: /etc/systemd/system/ceph-mon@.service.d
        state: absent
      when: not containerized_deployment | bool

380
    - name: waiting for the monitor to join the quorum...
381
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} quorum_status --format json"
382
383
384
      changed_when: false
      register: ceph_health_raw
      until: >
Alex Schultz's avatar
Alex Schultz committed
385
        ansible_facts['hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"]
386
387
388
389
390
      retries: "{{ health_mon_check_retries }}"
      delay: "{{ health_mon_check_delay }}"
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

391
392
393
394
395
396
397
398
399
400
- name: adopt ceph mgr daemons
  hosts: "{{ mgr_group_name|default('mgrs') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: adopt mgr daemon
401
      cephadm_adopt:
Alex Schultz's avatar
Alex Schultz committed
402
        name: "mgr.{{ ansible_facts['hostname'] }}"
403
404
405
406
407
        cluster: "{{ cluster }}"
        image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
        docker: "{{ true if container_binary == 'docker' else false }}"
        pull: false
        firewalld: "{{ true if configure_firewall | bool else false }}"
408
409

    - name: reset failed ceph-mgr systemd unit
Alex Schultz's avatar
Alex Schultz committed
410
      command: "systemctl reset-failed ceph-mgr@{{ ansible_facts['hostname'] }}"  # noqa 303
411
      changed_when: false
412
      failed_when: false
413
414
415
416
417
418
419
420
421
422
423
424
425
426
      when: containerized_deployment | bool

    - name: remove ceph-mgr systemd unit file
      file:
        path: /etc/systemd/system/ceph-mgr@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove ceph-mgr systemd override directory
      file:
        path: /etc/systemd/system/ceph-mgr@.service.d
        state: absent
      when: not containerized_deployment | bool

427
- name: set osd flags
428
  hosts: "{{ osd_group_name|default('osds') }}"
429
430
431
432
433
434
435
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: set osd flags
436
437
438
439
      ceph_osd_flag:
        cluster: "{{ cluster }}"
        name: "{{ item }}"
        state: present
440
441
442
      with_items:
        - noout
        - nodeep-scrub
443
444
      delegate_to: "{{ groups[mon_group_name][0] }}"
      run_once: true
445
      environment:
446
447
        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
448

449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
- name: adopt ceph osd daemons
  hosts: "{{ osd_group_name|default('osd') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - import_role:
        name: ceph-facts
        tasks_from: container_binary.yml
      when: containerized_deployment | bool

    - name: get osd list
464
465
466
467
468
469
      ceph_volume:
        cluster: "{{ cluster }}"
        action: list
      environment:
        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
      register: osd_list

    - name: set osd fsid for containerized deployment
      lineinfile:
        path: '/var/lib/ceph/osd/{{ cluster }}-{{ item.key }}/fsid'
        line: "{{ (item.value | selectattr('type', 'equalto', 'block') | map(attribute='tags') | first)['ceph.osd_fsid'] }}"
        owner: '{{ ceph_uid }}'
        group: '{{ ceph_uid }}'
        create: true
      with_dict: '{{ osd_list.stdout | from_json }}'
      when: containerized_deployment | bool

    - name: set osd type for containerized deployment
      lineinfile:
        path: '/var/lib/ceph/osd/{{ cluster }}-{{ item }}/type'
        line: 'bluestore'
        owner: '{{ ceph_uid }}'
        group: '{{ ceph_uid }}'
        create: true
      loop: '{{ (osd_list.stdout | from_json).keys() | list }}'
      when: containerized_deployment | bool

    - name: adopt osd daemon
493
494
495
496
497
498
499
      cephadm_adopt:
        name: "osd.{{ item }}"
        cluster: "{{ cluster }}"
        image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
        docker: "{{ true if container_binary == 'docker' else false }}"
        pull: false
        firewalld: "{{ true if configure_firewall | bool else false }}"
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
      loop: '{{ (osd_list.stdout | from_json).keys() | list }}'

    - name: remove ceph-osd systemd unit and ceph-osd-run.sh files
      file:
        path: '{{ item }}'
        state: absent
      loop:
        - /etc/systemd/system/ceph-osd@.service
        - "{{ ceph_osd_docker_run_script_path | default('/usr/share') }}/ceph-osd-run.sh"
      when: containerized_deployment | bool

    - name: remove ceph-osd systemd override directory
      file:
        path: /etc/systemd/system/ceph-osd@.service.d
        state: absent
      when: not containerized_deployment | bool

517
518
519
520
521
522
    - name: remove osd directory
      file:
        path: "/var/lib/ceph/osd/{{ cluster }}-{{ item }}"
        state: absent
      loop: '{{ (osd_list.stdout | from_json).keys() | list }}'

523
    - name: waiting for clean pgs...
524
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} pg stat --format json"
525
526
527
      changed_when: false
      register: ceph_health_post
      until: >
528
        (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | length) > 0)
529
        and
530
        (((ceph_health_post.stdout | from_json).pg_summary.num_pg_by_state | selectattr('name', 'search', '^active\\+clean') | map(attribute='num') | list | sum) == (ceph_health_post.stdout | from_json).pg_summary.num_pgs)
531
532
533
534
535
536
537
      delegate_to: "{{ groups[mon_group_name][0] }}"
      retries: "{{ health_osd_check_retries }}"
      delay: "{{ health_osd_check_delay }}"
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: unset osd flags
538
  hosts: "{{ osd_group_name|default('osds') }}"
539
540
541
542
543
544
545
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: unset osd flags
546
547
548
549
      ceph_osd_flag:
        cluster: "{{ cluster }}"
        name: "{{ item }}"
        state: absent
550
551
552
      with_items:
        - noout
        - nodeep-scrub
553
554
      delegate_to: "{{ groups[mon_group_name][0] }}"
      run_once: true
555
      environment:
556
557
        CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
        CEPH_CONTAINER_BINARY: "{{ container_binary }}"
558

559
- name: redeploy mds daemons
560
  hosts: "{{ mds_group_name|default('mdss') }}"
561
562
563
564
565
566
567
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: update the placement of metadata hosts
568
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mds {{ cephfs }} --placement='{{ groups.get(mds_group_name, []) | length }} label:{{ mds_group_name }}'"
569
570
      run_once: true
      changed_when: false
571
      delegate_to: "{{ groups[mon_group_name][0] }}"
572
573
574
575
576
577
578
579
580
581
582
583
584
585
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: stop and remove legacy ceph mds daemons
  hosts: "{{ mds_group_name|default('mdss') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: stop and disable ceph-mds systemd service
      service:
Alex Schultz's avatar
Alex Schultz committed
586
        name: "ceph-mds@{{ ansible_facts['hostname'] }}"
587
588
        state: stopped
        enabled: false
589
      failed_when: false
590
591
592
593
594
595
596
597
598

    - name: stop and disable ceph-mds systemd target
      service:
        name: ceph-mds.target
        state: stopped
        enabled: false
      when: not containerized_deployment | bool

    - name: reset failed ceph-mds systemd unit
Alex Schultz's avatar
Alex Schultz committed
599
      command: "systemctl reset-failed ceph-mds@{{ ansible_facts['hostname'] }}"  # noqa 303
600
      changed_when: false
601
      failed_when: false
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
      when: containerized_deployment | bool

    - name: remove ceph-mds systemd unit file
      file:
        path: /etc/systemd/system/ceph-mds@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove ceph-mds systemd override directory
      file:
        path: /etc/systemd/system/ceph-mds@.service.d
        state: absent
      when: not containerized_deployment | bool

    - name: remove legacy ceph mds data
      file:
Alex Schultz's avatar
Alex Schultz committed
618
        path: "/var/lib/ceph/mds/{{ cluster }}-{{ ansible_facts['hostname'] }}"
619
620
621
        state: absent

- name: rgw realm/zonegroup/zone requirements
622
  hosts: "{{ rgw_group_name|default('rgws') }}"
623
624
625
626
627
628
629
630
631
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: for non multisite setup
      when: not rgw_multisite | bool
      run_once: true
632
      delegate_to: "{{ groups[mon_group_name][0] }}"
633
634
      block:
        - name: create a default realm
635
636
637
638
          radosgw_realm:
            cluster: "{{ cluster }}"
            name: default
            default: true
639
          environment:
640
641
            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
            CEPH_CONTAINER_BINARY: "{{ container_binary }}"
642
643

        - name: modify the default zonegroup
644
645
646
647
648
649
          radosgw_zonegroup:
            cluster: "{{ cluster }}"
            name: default
            realm: default
            master: true
            default: true
650
          environment:
651
652
            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
            CEPH_CONTAINER_BINARY: "{{ container_binary }}"
653
654

        - name: modify the default zone
655
656
657
658
659
660
661
          radosgw_zone:
            cluster: "{{ cluster }}"
            name: default
            realm: default
            zonegroup: default
            master: true
            default: true
662
          environment:
663
664
            CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else None }}"
            CEPH_CONTAINER_BINARY: "{{ container_binary }}"
665
666

        - name: commit the period
667
          command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- radosgw-admin --cluster {{ cluster }} period update --commit"
668
669
670
671
          changed_when: false
          environment:
            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

672
    - name: update the placement of radosgw hosts
673
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply rgw {{ rgw_realm | default('default') }} {{ rgw_zone | default('default') }} --placement='count-per-host:{{ radosgw_num_instances }} label:{{ rgw_group_name }}' --port={{ radosgw_frontend_port }} {{ '--ssl' if radosgw_frontend_ssl_certificate else '' }}"
674
675
      run_once: true
      changed_when: false
676
      delegate_to: "{{ groups[mon_group_name][0] }}"
677
678
679
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

Guillaume Abrioux's avatar
Guillaume Abrioux committed
680
- name: stop and remove legacy ceph rgw daemons
681
682
683
684
685
686
687
688
689
690
691
692
693
694
  hosts: "{{ rgw_group_name|default('rgws') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - import_role:
        name: ceph-facts
        tasks_from: set_radosgw_address.yml

    - name: stop and disable ceph-radosgw systemd service
      service:
Alex Schultz's avatar
Alex Schultz committed
695
        name: "ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
696
697
        state: stopped
        enabled: false
698
      failed_when: false
699
700
701
702
703
704
705
706
707
708
      loop: '{{ rgw_instances }}'

    - name: stop and disable ceph-radosgw systemd target
      service:
        name: ceph-rgw.target
        state: stopped
        enabled: false
      when: not containerized_deployment | bool

    - name: reset failed ceph-radosgw systemd unit
Alex Schultz's avatar
Alex Schultz committed
709
      command: "systemctl reset-failed ceph-radosgw@rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"  # noqa 303
710
      changed_when: false
711
      failed_when: false
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
      loop: '{{ rgw_instances }}'
      when: containerized_deployment | bool

    - name: remove ceph-radosgw systemd unit file
      file:
        path: /etc/systemd/system/ceph-radosgw@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove ceph-radosgw systemd override directory
      file:
        path: /etc/systemd/system/ceph-radosgw@.service.d
        state: absent
      when: not containerized_deployment | bool

    - name: remove legacy ceph radosgw data
      file:
Alex Schultz's avatar
Alex Schultz committed
729
        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}.{{ item.instance_name }}"
730
731
732
733
734
        state: absent
      loop: '{{ rgw_instances }}'

    - name: remove legacy ceph radosgw directory
      file:
Alex Schultz's avatar
Alex Schultz committed
735
        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
736
737
        state: absent

738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
- name: stop and remove legacy ceph nfs daemons
  hosts: "{{ nfs_group_name|default('nfss') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - import_role:
        name: ceph-nfs
        tasks_from: create_rgw_nfs_user.yml

    - name: stop and disable ceph-nfs systemd service
      service:
        name: "ceph-nfs@{{ ansible_facts['hostname'] }}"
        state: stopped
        enabled: false
      failed_when: false

    - name: stop and disable ceph-nfs systemd target
      service:
        name: ceph-nfs.target
        state: stopped
        enabled: false
      when: not containerized_deployment | bool

    - name: reset failed ceph-nfs systemd unit
      command: "systemctl reset-failed ceph-nfs@{{ ansible_facts['hostname'] }}"  # noqa 303
      changed_when: false
      failed_when: false
      when: containerized_deployment | bool

    - name: remove ceph-nfs systemd unit file
      file:
        path: /etc/systemd/system/ceph-nfs@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove ceph-nfs systemd override directory
      file:
        path: /etc/systemd/system/ceph-nfs@.service.d
        state: absent
      when: not containerized_deployment | bool

    - name: remove legacy ceph radosgw directory
      file:
        path: "/var/lib/ceph/radosgw/{{ cluster }}-rgw.{{ ansible_facts['hostname'] }}"
        state: absent

    - name: set_fact rados_cmd
      set_fact:
        rados_cmd: "{{ hostvars[groups[mon_group_name][0]]['container_binary'] + ' run --interactive --rm --net=host -v /etc/ceph:/etc/ceph:z --entrypoint=rados ' + ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment | bool else 'rados' }}"

    - name: get legacy nfs export from rados object
      command: "{{ rados_cmd }} -p {{ cephfs_data_pool.name }} get {{ ceph_nfs_rados_export_index }} /dev/stdout"
      register: legacy_export
      changed_when: false
      delegate_to: "{{ groups[mon_group_name][0] }}"
      when: ceph_nfs_rados_backend | bool

    - name: set_fact nfs_file_gw_export
      set_fact:
        nfs_file_gw_export: |
          EXPORT
          {
            Export_id={{ ceph_nfs_ceph_export_id }};
            Path = "/";
            Pseudo = {{ ceph_nfs_ceph_pseudo_path }};
            Access_Type = {{ ceph_nfs_ceph_access_type }};
            Protocols = {{ ceph_nfs_ceph_protocols }};
            Transports = TCP;
            SecType = {{ ceph_nfs_ceph_sectype }};
            Squash = {{ ceph_nfs_ceph_squash }};
            Attr_Expiration_Time = 0;
            FSAL {
              Name = CEPH;
              User_Id = "{{ ceph_nfs_ceph_user }}";
            }
                  {{ ganesha_ceph_export_overrides | default(None) }}
          }
      when: nfs_file_gw | bool

    - name: set_fact nfs_obj_gw_export
      set_fact:
        nfs_obj_gw_export: |
          EXPORT
          {
            Export_id={{ ceph_nfs_rgw_export_id }};
            Path = "/";
            Pseudo = {{ ceph_nfs_rgw_pseudo_path }};
            Access_Type = {{ ceph_nfs_rgw_access_type }};
            Protocols = {{ ceph_nfs_rgw_protocols }};
            Transports = TCP;
            SecType = {{ ceph_nfs_rgw_sectype }};
            Squash = {{ ceph_nfs_rgw_squash }};
            FSAL {
              Name = RGW;
              User_Id = "{{ ceph_nfs_rgw_user }}";
              Access_Key_Id ="{{ ceph_nfs_rgw_access_key }}";
              Secret_Access_Key = "{{ ceph_nfs_rgw_secret_key }}";
            }
                  {{ ganesha_rgw_export_overrides | default(None) }}
          }
      when: nfs_obj_gw | bool

    - name: set_fact new_export
      set_fact:
        new_export: |
          {{ legacy_export.stdout | default('') }}
          {{ nfs_file_gw_export | default('') }}
          {{ nfs_obj_gw_export | default('') }}

    - name: push the new exports in a rados object
      command: "{{ rados_cmd }} -p {{ cephfs_data_pool.name }} -N {{ cephfs_data_pool.name }} put conf-nfs.{{ nfs_group_name | default('nfss') }} -"
      args:
        stdin: "{{ new_export }}"
        stdin_add_newline: no
      changed_when: false
      delegate_to: "{{ groups[mon_group_name][0] }}"

    - name: update the placement of nfs hosts
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply nfs {{ nfs_group_name | default('nfss') }} {{ cephfs_data_pool.name }} {{ cephfs_data_pool.name }} --placement='{{ groups.get(nfs_group_name, []) | length }} label:{{ nfs_group_name }}'"
      run_once: true
      changed_when: false
      delegate_to: "{{ groups[mon_group_name][0] }}"
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

867
- name: redeploy rbd-mirror daemons
868
  hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
869
870
871
872
873
874
875
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: update the placement of rbd-mirror hosts
876
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply rbd-mirror --placement='{{ groups.get(rbdmirror_group_name, []) | length }} label:{{ rbdmirror_group_name }}'"
877
878
      run_once: true
      changed_when: false
879
      delegate_to: "{{ groups[mon_group_name][0] }}"
880
881
882
883
884
885
886
887
888
889
890
891
892
893
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: stop and remove legacy rbd-mirror daemons
  hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: stop and disable rbd-mirror systemd service
      service:
Alex Schultz's avatar
Alex Schultz committed
894
        name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"
895
896
        state: stopped
        enabled: false
897
      failed_when: false
898
899
900
901
902
903
904
905
906

    - name: stop and disable rbd-mirror systemd target
      service:
        name: ceph-rbd-mirror.target
        state: stopped
        enabled: false
      when: not containerized_deployment | bool

    - name: reset failed rbd-mirror systemd unit
Alex Schultz's avatar
Alex Schultz committed
907
      command: "systemctl reset-failed ceph-rbd-mirror@rbd-mirror.{{ ansible_facts['hostname'] }}"  # noqa 303
908
      changed_when: false
909
      failed_when: false
910
911
912
913
914
915
916
917
918
919
920
921
922
923
      when: containerized_deployment | bool

    - name: remove rbd-mirror systemd unit file
      file:
        path: /etc/systemd/system/ceph-rbd-mirror@.service
        state: absent
      when: containerized_deployment | bool

    - name: remove rbd-mirror systemd override directory
      file:
        path: /etc/systemd/system/ceph-rbd-mirror@.service.d
        state: absent
      when: not containerized_deployment | bool

924
925
926
927
928
929
930
931
932
- name: redeploy iscsigw daemons
  hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: update the placement of iscsigw hosts
933
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply iscsi {{ iscsi_pool_name | default('rbd') }} {{ api_user | default('admin') }} {{ api_password | default('admin') }} {{ trusted_ip_list | default('192.168.122.1') }} --placement='{{ groups.get(iscsi_gw_group_name, []) | length }} label:{{ iscsi_gw_group_name }}'"
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
      run_once: true
      changed_when: false
      delegate_to: '{{ groups[mon_group_name][0] }}'
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: stop and remove legacy iscsigw daemons
  hosts: "{{ iscsi_gw_group_name|default('iscsigws') }}"
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: stop and disable iscsigw systemd services
      service:
        name: '{{ item }}'
        state: stopped
        enabled: false
954
      failed_when: false
955
956
957
958
959
960
      with_items:
        - rbd-target-api
        - rbd-target-gw
        - tcmu-runner

    - name: reset failed iscsigw systemd units
961
      command: 'systemctl reset-failed {{ item }}'  # noqa 303
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
      changed_when: false
      failed_when: false
      with_items:
        - rbd-target-api
        - rbd-target-gw
        - tcmu-runner
      when: containerized_deployment | bool

    - name: remove iscsigw systemd unit files
      file:
        path: '/etc/systemd/system/{{ item }}.service'
        state: absent
      with_items:
        - rbd-target-api
        - rbd-target-gw
        - tcmu-runner
      when: containerized_deployment | bool

980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
- name: redeploy ceph-crash daemons
  hosts:
    - "{{ mon_group_name|default('mons') }}"
    - "{{ osd_group_name|default('osds') }}"
    - "{{ mds_group_name|default('mdss') }}"
    - "{{ rgw_group_name|default('rgws') }}"
    - "{{ mgr_group_name|default('mgrs') }}"
    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: stop and disable ceph-crash systemd service
      service:
        name: ceph-crash
        state: stopped
        enabled: false
      failed_when: false
      when: not containerized_deployment | bool

    - name: update the placement of ceph-crash hosts
1003
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply crash --placement='label:ceph'"
1004
1005
      run_once: true
      changed_when: false
1006
      delegate_to: '{{ groups[mon_group_name][0] }}'
1007
1008
1009
1010
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

- name: redeploy alertmanager/grafana/prometheus daemons
1011
  hosts: "{{ monitoring_group_name|default('monitoring') }}"
1012
1013
1014
1015
1016
1017
1018
  serial: 1
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
    - name: check whether a ceph config file is present
      stat:
        path: "/etc/ceph/{{ cluster }}.conf"
      register: ceph_config

    - name: write a ceph.conf with minimal config
      copy:
        dest: "/etc/ceph/{{ cluster }}.conf"
        content: "{{ minimal_config.stdout }}"
        owner: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
        group: "{{ ceph_uid | int if containerized_deployment | bool else 'ceph' }}"
        mode: "{{ ceph_keyring_permissions }}"
      when: not ceph_config.stat.exists | bool

1033
1034
1035
    - name: with dashboard enabled
      when: dashboard_enabled | bool
      block:
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
        - name: ensure alertmanager/prometheus data directories are present
          file:
            path: "{{ item }}"
            state: directory
            owner: "{{ prometheus_user_id }}"
            group: "{{ prometheus_user_id }}"
          with_items:
           - "{{ alertmanager_data_dir }}"
           - "{{ prometheus_data_dir }}"

1046
1047
1048
1049
1050
1051
        # (workaround) cephadm adopt alertmanager only stops prometheus-alertmanager systemd service
        - name: stop and disable alertmanager systemd unit
          service:
            name: alertmanager
            state: stopped
            enabled: false
1052
          failed_when: false
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068

        # (workaround) cephadm adopt alertmanager only uses /etc/prometheus/alertmanager.yml
        - name: create alertmanager config symlink
          file:
            path: /etc/prometheus/alertmanager.yml
            src: '{{ alertmanager_conf_dir }}/alertmanager.yml'
            state: link

        # (workaround) cephadm adopt alertmanager only uses /var/lib/prometheus/alertmanager/
        - name: create alertmanager data symlink
          file:
            path: '{{ prometheus_data_dir }}/alertmanager'
            src: '{{ alertmanager_data_dir }}'
            state: link

        - name: adopt alertmanager daemon
1069
          cephadm_adopt:
Alex Schultz's avatar
Alex Schultz committed
1070
            name: "alertmanager.{{ ansible_facts['hostname'] }}"
1071
1072
1073
1074
1075
            cluster: "{{ cluster }}"
            image: "{{ alertmanager_container_image }}"
            docker: "{{ true if container_binary == 'docker' else false }}"
            pull: false
            firewalld: "{{ true if configure_firewall | bool else false }}"
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091

        - name: remove alertmanager systemd unit file
          file:
            path: /etc/systemd/system/alertmanager.service
            state: absent

        - name: remove the legacy alertmanager data
          file:
            path: '{{ alertmanager_data_dir }}'
            state: absent

        - name: stop and disable prometheus systemd unit
          service:
            name: prometheus
            state: stopped
            enabled: false
1092
          failed_when: false
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122

        - name: remove alertmanager data symlink
          file:
            path: '{{ prometheus_data_dir }}/alertmanager'
            state: absent

        # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
        - name: tmp copy the prometheus data
          copy:
            src: '{{ prometheus_data_dir }}/'
            dest: /var/lib/prom_metrics
            owner: 65534
            group: 65534
            remote_src: true

        # (workaround) cephadm adopt prometheus only uses /var/lib/prometheus/metrics/
        - name: restore the prometheus data
          copy:
            src: /var/lib/prom_metrics/
            dest: /var/lib/prometheus/metrics
            owner: 65534
            group: 65534
            remote_src: true

        - name: remove the tmp prometheus data copy
          file:
            path: /var/lib/prom_metrics
            state: absent

        - name: adopt prometheus daemon
1123
          cephadm_adopt:
Alex Schultz's avatar
Alex Schultz committed
1124
            name: "prometheus.{{ ansible_facts['hostname'] }}"
1125
1126
1127
1128
1129
            cluster: "{{ cluster }}"
            image: "{{ prometheus_container_image }}"
            docker: "{{ true if container_binary == 'docker' else false }}"
            pull: false
            firewalld: "{{ true if configure_firewall | bool else false }}"
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146

        - name: remove prometheus systemd unit file
          file:
            path: /etc/systemd/system/prometheus.service
            state: absent

        - name: remove the legacy prometheus data
          file:
            path: '{{ prometheus_data_dir }}'
            state: absent

        # (workaround) cephadm adopt grafana only stops grafana systemd service
        - name: stop and disable grafana systemd unit
          service:
            name: grafana-server
            state: stopped
            enabled: false
1147
          failed_when: false
1148
1149

        - name: adopt grafana daemon
1150
          cephadm_adopt:
Alex Schultz's avatar
Alex Schultz committed
1151
            name: "grafana.{{ ansible_facts['hostname'] }}"
1152
1153
1154
1155
1156
            cluster: "{{ cluster }}"
            image: "{{ grafana_container_image }}"
            docker: "{{ true if container_binary == 'docker' else false }}"
            pull: false
            firewalld: "{{ true if configure_firewall | bool else false }}"
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177

        - name: remove grafana systemd unit file
          file:
            path: /etc/systemd/system/grafana-server.service
            state: absent

        - name: remove the legacy grafana data
          file:
            path: /var/lib/grafana
            state: absent

- name: redeploy node-exporter daemons
  hosts:
    - "{{ mon_group_name|default('mons') }}"
    - "{{ osd_group_name|default('osds') }}"
    - "{{ mds_group_name|default('mdss') }}"
    - "{{ rgw_group_name|default('rgws') }}"
    - "{{ mgr_group_name|default('mgrs') }}"
    - "{{ rbdmirror_group_name|default('rbdmirrors') }}"
    - "{{ nfs_group_name|default('nfss') }}"
    - "{{ iscsi_gw_group_name|default('iscsigws') }}"
1178
    - "{{ monitoring_group_name|default('monitoring') }}"
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: with dashboard enabled
      when: dashboard_enabled | bool
      block:
        - name: stop and disable node-exporter systemd service
          service:
            name: node_exporter
            state: stopped
            enabled: false
1193
          failed_when: false
1194
1195
1196
1197
1198
1199
1200

        - name: remove node_exporter systemd unit file
          file:
            path: /etc/systemd/system/node_exporter.service
            state: absent

        - name: update the placement of node-exporter hosts
1201
          command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply node-exporter --placement='*'"
1202
1203
          run_once: true
          changed_when: false
1204
          delegate_to: '{{ groups[mon_group_name][0] }}'
1205
1206
1207
          environment:
            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

1208

1209
- name: adjust placement daemons
1210
  hosts: "{{ mon_group_name|default('mons') }}[0]"
1211
1212
1213
1214
1215
1216
1217
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: update the placement of monitor hosts
1218
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mon --placement='{{ groups.get(mon_group_name, []) | length }} label:{{ mon_group_name }}'"
1219
1220
1221
1222
1223
      changed_when: false
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

    - name: update the placement of manager hosts
1224
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply mgr --placement='{{ groups.get(mgr_group_name, []) | length }} label:{{ mgr_group_name }}'"
1225
1226
1227
1228
1229
1230
1231
1232
      changed_when: false
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

    - name: with dashboard enabled
      when: dashboard_enabled | bool
      block:
        - name: update the placement of alertmanager hosts
1233
          command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply alertmanager --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
1234
1235
1236
1237
1238
          changed_when: false
          environment:
            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

        - name: update the placement of grafana hosts
1239
          command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply grafana --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
1240
1241
1242
1243
1244
          changed_when: false
          environment:
            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

        - name: update the placement of prometheus hosts
1245
          command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch apply prometheus --placement='{{ groups.get(monitoring_group_name, []) | length }} label:{{ monitoring_group_name }}'"
1246
1247
1248
          changed_when: false
          environment:
            CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
1249
1250

- name: show ceph orchestrator status
1251
  hosts: "{{ mon_group_name|default('mons') }}[0]"
1252
1253
1254
1255
1256
1257
1258
  become: true
  gather_facts: false
  tasks:
    - import_role:
        name: ceph-defaults

    - name: show ceph orchestrator services
1259
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch ls --refresh"
1260
1261
1262
1263
1264
      changed_when: false
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'

    - name: show ceph orchestrator daemons
1265
      command: "{{ cephadm_cmd }} shell --fsid {{ fsid }} -- ceph --cluster {{ cluster }} orch ps --refresh"
1266
1267
1268
      changed_when: false
      environment:
        CEPHADM_IMAGE: '{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}'
1269
1270
1271
1272
1273
1274
1275

    - name: inform users about cephadm
      debug:
        msg: |
          This Ceph cluster is now managed by cephadm. Any new changes to the
          cluster need to be achieved by using the cephadm CLI and you don't
          need to use ceph-ansible playbooks anymore.