Commit 470c1d82 authored by Guillaume Abrioux's avatar Guillaume Abrioux
Browse files

tests: migrate to quay.ceph.io registry



in order to avoid docker.io rate limiting
Signed-off-by: default avatarGuillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 2001039c)
parent 84edd510
...@@ -30,9 +30,9 @@ The following environent variables are available for use: ...@@ -30,9 +30,9 @@ The following environent variables are available for use:
* ``UPDATE_CEPH_STABLE_RELEASE``: (default: ``kraken``) This would configure the ``ceph-ansible`` variable ``ceph_stable_relese`` during an ``update`` * ``UPDATE_CEPH_STABLE_RELEASE``: (default: ``kraken``) This would configure the ``ceph-ansible`` variable ``ceph_stable_relese`` during an ``update``
scenario. This is set automatically when using the ``jewel-*`` or ``kraken-*`` testing scenarios. scenario. This is set automatically when using the ``jewel-*`` or ``kraken-*`` testing scenarios.
* ``CEPH_DOCKER_REGISTRY``: (default: ``docker.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``. * ``CEPH_DOCKER_REGISTRY``: (default: ``quay.ceph.io``) This would configure the ``ceph-ansible`` variable ``ceph_docker_registry``.
* ``CEPH_DOCKER_IMAGE``: (default: ``ceph/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``. * ``CEPH_DOCKER_IMAGE``: (default: ``ceph-ci/daemon``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image``.
* ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``. * ``CEPH_DOCKER_IMAGE_TAG``: (default: ``latest``) This would configure the ``ceph-ansible`` variable ``ceph_docker_image_name``.
......
...@@ -36,9 +36,9 @@ class TestCephVolumeModule(object): ...@@ -36,9 +36,9 @@ class TestCephVolumeModule(object):
result = ceph_volume.get_wal("wal-lv", "wal-vg") result = ceph_volume.get_wal("wal-lv", "wal-vg")
assert result == "wal-vg/wal-lv" assert result == "wal-vg/wal-lv"
def test_container_exec(sefl): def test_container_exec(self):
fake_binary = "ceph-volume" fake_binary = "ceph-volume"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -48,14 +48,14 @@ class TestCephVolumeModule(object): ...@@ -48,14 +48,14 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous'] 'quay.ceph.io/ceph-ci/daemon:latest-luminous']
result = ceph_volume.container_exec(fake_binary, fake_container_image) result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list assert result == expected_command_list
def test_zap_osd_container(self): def test_zap_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'} fake_module.params = {'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -65,7 +65,7 @@ class TestCephVolumeModule(object): ...@@ -65,7 +65,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
'lvm', 'lvm',
'zap', 'zap',
'--destroy', '--destroy',
...@@ -124,7 +124,7 @@ class TestCephVolumeModule(object): ...@@ -124,7 +124,7 @@ class TestCephVolumeModule(object):
def test_list_osd_container(self): def test_list_osd_container(self):
fake_module = MagicMock() fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'} fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -134,7 +134,7 @@ class TestCephVolumeModule(object): ...@@ -134,7 +134,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
...@@ -152,7 +152,7 @@ class TestCephVolumeModule(object): ...@@ -152,7 +152,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "create" fake_action = "create"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -162,7 +162,7 @@ class TestCephVolumeModule(object): ...@@ -162,7 +162,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
...@@ -201,7 +201,7 @@ class TestCephVolumeModule(object): ...@@ -201,7 +201,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', } 'cluster': 'ceph', }
fake_action = "prepare" fake_action = "prepare"
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -211,7 +211,7 @@ class TestCephVolumeModule(object): ...@@ -211,7 +211,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
...@@ -251,7 +251,7 @@ class TestCephVolumeModule(object): ...@@ -251,7 +251,7 @@ class TestCephVolumeModule(object):
'cluster': 'ceph', 'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]} 'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = "docker.io/ceph/daemon:latest-luminous" fake_container_image = "quay.ceph.io/ceph-ci/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501 expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096', '--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z', '-v', '/run/lock/lvm:/run/lock/lvm:z',
...@@ -261,7 +261,7 @@ class TestCephVolumeModule(object): ...@@ -261,7 +261,7 @@ class TestCephVolumeModule(object):
'-v', '/var/lib/ceph/:/var/lib/ceph/:z', '-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z', '-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume', '--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous', 'quay.ceph.io/ceph-ci/daemon:latest-luminous',
'--cluster', '--cluster',
'ceph', 'ceph',
'lvm', 'lvm',
......
...@@ -25,4 +25,7 @@ os_tuning_params: ...@@ -25,4 +25,7 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -36,4 +36,7 @@ lvm_volumes: ...@@ -36,4 +36,7 @@ lvm_volumes:
- data: data-lv2 - data: data-lv2
data_vg: test_group data_vg: test_group
db: journal1 db: journal1
db_vg: journals db_vg: journals
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
\ No newline at end of file
...@@ -38,3 +38,6 @@ openstack_pools: ...@@ -38,3 +38,6 @@ openstack_pools:
- "{{ openstack_glance_pool }}" - "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
mds_max_mds: 2 mds_max_mds: 2
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -32,3 +32,6 @@ openstack_pools: ...@@ -32,3 +32,6 @@ openstack_pools:
- "{{ openstack_glance_pool }}" - "{{ openstack_glance_pool }}"
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
mds_max_mds: 2 mds_max_mds: 2
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -26,4 +26,7 @@ os_tuning_params: ...@@ -26,4 +26,7 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -15,4 +15,7 @@ rgw_bucket_default_quota_max_objects: 1638400 ...@@ -15,4 +15,7 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_pg_num: 8 osd_pool_default_pg_num: 8
osd_pool_default_size: 1 osd_pool_default_size: 1
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -13,4 +13,4 @@ rgw_bucket_default_quota_max_objects: 1638400 ...@@ -13,4 +13,4 @@ rgw_bucket_default_quota_max_objects: 1638400
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_pg_num: 8 osd_pool_default_pg_num: 8
osd_pool_default_size: 1 osd_pool_default_size: 1
\ No newline at end of file
...@@ -42,3 +42,6 @@ openstack_pools: ...@@ -42,3 +42,6 @@ openstack_pools:
- "{{ openstack_cinder_pool }}" - "{{ openstack_cinder_pool }}"
handler_health_mon_check_delay: 10 handler_health_mon_check_delay: 10
handler_health_osd_check_delay: 10 handler_health_osd_check_delay: 10
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -35,4 +35,7 @@ lvm_volumes: ...@@ -35,4 +35,7 @@ lvm_volumes:
db: journal1 db: journal1
db_vg: journals db_vg: journals
fsid: 40358a87-ab6e-4bdc-83db-1d909147861c fsid: 40358a87-ab6e-4bdc-83db-1d909147861c
generate_fsid: false generate_fsid: false
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
\ No newline at end of file
...@@ -24,3 +24,6 @@ ceph_conf_overrides: ...@@ -24,3 +24,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -25,4 +25,7 @@ os_tuning_params: ...@@ -25,4 +25,7 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -29,3 +29,6 @@ ceph_conf_overrides: ...@@ -29,3 +29,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -9,9 +9,9 @@ all: ...@@ -9,9 +9,9 @@ all:
rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0} rgw_keystone_url: 'http://192.168.95.10:5000', rgw_s3_auth_use_keystone: 'true', rgw_keystone_revocation_interval: 0}
ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0' ceph_mgr_docker_extra_env: '-e MGR_DASHBOARD=0'
cluster: mycluster cluster: mycluster
ceph_docker_image: ceph/daemon ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: v3.2.5-stable-3.2-luminous-centos-7 ceph_docker_image_tag: v3.2.14-stable-3.2-luminous-centos-7-x86_64
ceph_docker_registry: docker.io ceph_docker_registry: quay.ceph.io
ceph_origin: repository ceph_origin: repository
ceph_repository: community ceph_repository: community
ceph_release: luminous ceph_release: luminous
......
...@@ -26,3 +26,6 @@ ceph_conf_overrides: ...@@ -26,3 +26,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -25,4 +25,7 @@ os_tuning_params: ...@@ -25,4 +25,7 @@ os_tuning_params:
ceph_conf_overrides: ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
ceph_osd_docker_run_script_path: /var/tmp ceph_osd_docker_run_script_path: /var/tmp
\ No newline at end of file ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -13,3 +13,6 @@ ceph_conf_overrides: ...@@ -13,3 +13,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -13,3 +13,6 @@ ceph_conf_overrides: ...@@ -13,3 +13,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
...@@ -13,3 +13,6 @@ ceph_conf_overrides: ...@@ -13,3 +13,6 @@ ceph_conf_overrides:
global: global:
osd_pool_default_size: 1 osd_pool_default_size: 1
openstack_config: False openstack_config: False
ceph_docker_registry: quay.ceph.io
ceph_docker_image: ceph-ci/daemon
ceph_docker_image_tag: latest-luminous
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment