Commit 90f3f615 authored by Guillaume Abrioux's avatar Guillaume Abrioux Committed by Dimitri Savineau
Browse files

infra: introduce docker to podman playbook

This isn't backported from master because there are too many changes
between stable-3.2 and other newer branches.

NOTE:
This playbook  *doesn't* add podman support in stable-3.2 at all.
This is a tripleO dedicated playbook which is intended to be run
early during FFU workflow in order to prepare the OS upgrade.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1853457

Signed-off-by: default avatarGuillaume Abrioux <gabrioux@redhat.com>
parent 6daa2c9d
- hosts: all
gather_facts: true
become: true
tasks:
- name: install podman
package:
name: podman
state: present
\ No newline at end of file
../../../Vagrantfile
\ No newline at end of file
{
"ceph_conf_overrides": {
"global": {
"osd_pool_default_pg_num": 12,
"osd_pool_default_size": 1,
"mon_allow_pool_size_one": true,
"mon_warn_on_pool_no_redundancy": false
}
},
"cephfs_pools": [
{
"name": "cephfs_data",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
},
{
"name": "cephfs_metadata",
"pg_num": 8,
"pgp_num": 8,
"rule_name": "replicated_rule",
"type": 1,
"erasure_profile": "",
"expected_num_objects": "",
"application": "cephfs",
"size": 3,
"min_size": 0
}
],
"ceph_mon_docker_memory_limit": "2g"
}
---
# this is only here to let the CI tests know
# that this scenario is using docker
docker: True
container_binary: docker
containerized_deployment: True
monitor_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
radosgw_interface: "{{ 'eth1' if ansible_distribution == 'CentOS' else 'ens6' }}"
ceph_mon_docker_subnet: "{{ public_network }}"
ceph_docker_on_openstack: False
public_network: "192.168.58.0/24"
cluster_network: "192.168.59.0/24"
rgw_override_bucket_index_max_shards: 16
rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides:
global:
mon_allow_pool_size_one: true
mon_warn_on_pool_no_redundancy: false
osd_pool_default_size: 1
openstack_config: False
openstack_glance_pool:
name: "images"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_cinder_pool:
name: "volumes"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
size: 1
openstack_pools:
- "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10
---
user_config: True
copy_admin_key: True
test:
name: "test"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
test2:
name: "test2"
pg_num: "{{ osd_pool_default_pg_num }}"
pgp_num: "{{ osd_pool_default_pg_num }}"
rule_name: "HDD"
type: 1
erasure_profile: ""
expected_num_objects: ""
pools:
- "{{ test }}"
- "{{ test2 }}"
---
gateway_ip_list: "{{ ansible_all_ipv4_addresses | ipaddr(public_network) | first }}"
generate_crt: True
---
create_crush_tree: False
crush_rule_config: False
crush_rule_hdd:
name: HDD
root: default
type: host
class: hdd
default: true
crush_rules:
- "{{ crush_rule_hdd }}"
---
osd_objectstore: "bluestore"
osd_scenario: lvm
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc
\ No newline at end of file
---
copy_admin_key: True
rgw_create_pools:
foo:
pg_num: 16
bar:
pg_num: 16
[mons]
mon0
[osds]
osd0
[mgrs]
mon0
[mdss]
osd0
[rgws]
osd0
[nfss]
nfs0
[rbdmirrors]
rbd-mirror0
[iscsigws]
iscsi-gw0
[all:vars]
nfs_ganesha_stable=True
nfs_ganesha_dev=False
nfs_ganesha_stable_branch="V2.7-stable"
nfs_ganesha_flavor="ceph_master"
---
# DEPLOY CONTAINERIZED DAEMONS
docker: True
# DEFINE THE NUMBER OF VMS TO RUN
mon_vms: 1
osd_vms: 1
mds_vms: 0
rgw_vms: 1
nfs_vms: 1
rbd_mirror_vms: 1
client_vms: 0
iscsi_gw_vms: 1
mgr_vms: 0
# SUBNETS TO USE FOR THE VMS
public_subnet: 192.168.58
cluster_subnet: 192.168.59
# MEMORY
# set 1024 for CentOS
memory: 1024
vagrant_box: centos/7
# The sync directory changes based on vagrant box
# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
#vagrant_sync_dir: /home/vagrant/sync
vagrant_sync_dir: /vagrant
# Disables synced folder creation. Not needed for testing, will skip mounting
# the vagrant directory on the remote box regardless of the provider.
vagrant_disable_synced_folder: true
......@@ -21,13 +21,15 @@ class TestMDSs(object):
def test_mds_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
container_binary = node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-mds-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-mds-{hostname}'.format( # noqa E501
hostname=hostname, container_binary=container_binary)
else:
docker_exec_cmd = ''
container_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-mds --keyring /var/lib/ceph/bootstrap-mds/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
cluster=node['cluster_name']
)
cluster_status = json.loads(host.check_output(cmd))
......
......@@ -22,12 +22,13 @@ class TestMGRs(object):
def test_mgr_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary=node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-mgr-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-mgr-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name mgr.{hostname} --keyring /var/lib/ceph/mgr/{cluster}-{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=node["vars"]["inventory_hostname"],
cluster=cluster
)
......
......@@ -28,10 +28,10 @@ class TestMons(object):
output = host.check_output(cmd)
assert output.strip().startswith("cluster")
def test_ceph_config_has_inital_members_line(self, node, File):
assert File(node["conf_path"]).contains("^mon initial members = .*$")
def test_ceph_config_has_inital_members_line(self, node, host):
assert host.file(node["conf_path"]).contains("^mon initial members = .*$")
def test_initial_members_line_has_correct_value(self, node, host, File):
def test_initial_members_line_has_correct_value(self, node, host):
mon_initial_members_line = host.check_output("grep 'mon initial members = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
result = True
for host in node["vars"]["groups"]["mons"]:
......
......@@ -26,12 +26,13 @@ class TestNFSs(object):
def test_nfs_is_up(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
container_binary = node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-nfs-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-nfs-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.rgw.{hostname} --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
......
......@@ -12,9 +12,9 @@ class TestOSDs(object):
osds = cmd.stdout.rstrip("\n").split("\n")
return osds
def _get_docker_exec_cmd(self, host):
def _get_docker_exec_cmd(self, node, host):
osd_id = host.check_output(
"docker ps -q --filter='name=ceph-osd' | head -1")
"{container_binary} ps -q --filter='name=ceph-osd' | head -1".format(container_binary=node['container_binary']))
return osd_id
......@@ -86,8 +86,10 @@ class TestOSDs(object):
@pytest.mark.docker
def test_all_docker_osds_are_up_and_in(self, node, host):
cmd = "sudo docker exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
osd_id=self._get_docker_exec_cmd(host),
container_binary= node['container_binary']
cmd = "sudo {container_binary} exec {osd_id} ceph --cluster={cluster} --connect-timeout 5 --keyring /var/lib/ceph/bootstrap-osd/{cluster}.keyring -n client.bootstrap-osd osd tree -f json".format(
container_binary=container_binary,
osd_id=self._get_docker_exec_cmd(node, host),
cluster=node["cluster_name"]
)
output = json.loads(host.check_output(cmd))
......
......@@ -30,15 +30,16 @@ class TestRbdMirrors(object):
def test_rbd_mirror_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary = node["container_binary"]
daemons = []
if node['docker']:
docker_exec_cmd = 'docker exec ceph-rbd-mirror-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-rbd-mirror-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
container_exec_cmd = ''
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rbd --keyring /var/lib/ceph/bootstrap-rbd/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
......
......@@ -25,12 +25,13 @@ class TestRGWs(object):
def test_rgw_is_up(self, node, host):
hostname=node["vars"]["inventory_hostname"]
cluster=node["cluster_name"]
container_binary=node['container_binary']
if node['docker']:
docker_exec_cmd = 'docker exec ceph-rgw-{hostname}'.format(hostname=hostname)
container_exec_cmd = '{container_binary} exec ceph-rgw-{hostname}'.format(container_binary=container_binary, hostname=hostname)
else:
docker_exec_cmd = ''
cmd = "sudo {docker_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
docker_exec_cmd=docker_exec_cmd,
container_exec_cmd = ''
cmd = "sudo {container_exec_cmd} ceph --name client.bootstrap-rgw --keyring /var/lib/ceph/bootstrap-rgw/{cluster}.keyring --cluster={cluster} --connect-timeout 5 -f json -s".format(
container_exec_cmd=container_exec_cmd,
hostname=hostname,
cluster=cluster
)
......
......@@ -37,7 +37,9 @@ class TestRGWs(object):
def test_docker_rgw_tuning_pools_are_set(self, node, host):
hostname = node["vars"]["inventory_hostname"]
cluster = node['cluster_name']
cmd = "sudo docker exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
container_binary = node['container_binary']
cmd = "sudo {container_binary} exec ceph-rgw-{hostname} ceph --cluster={cluster} -n client.rgw.{hostname} --connect-timeout 5 --keyring /var/lib/ceph/radosgw/{cluster}-rgw.{hostname}/keyring osd dump".format(
container_binary=container_binary,
hostname=hostname,
cluster=cluster
)
......
......@@ -22,8 +22,8 @@ class TestInstall(object):
class TestCephConf(object):
def test_ceph_config_has_mon_host_line(self, node, File):
assert File(node["conf_path"]).contains("^mon host = .*$")
def test_ceph_config_has_mon_host_line(self, node, host):
assert host.file(node["conf_path"]).contains("^mon host = .*$")
def test_mon_host_line_has_correct_value(self, node, host):
mon_host_line = host.check_output("grep 'mon host = ' /etc/ceph/{cluster}.conf".format(cluster=node['cluster_name']))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment