summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--apex/builders/common_builder.py43
-rw-r--r--apex/builders/undercloud_builder.py2
-rw-r--r--apex/common/constants.py2
-rw-r--r--apex/common/utils.py12
-rw-r--r--apex/deploy.py11
-rw-r--r--apex/network/network_data.py2
-rw-r--r--apex/network/network_environment.py9
-rw-r--r--apex/overcloud/deploy.py61
-rw-r--r--apex/tests/config/98faaca.diff2
-rw-r--r--apex/tests/test_apex_common_utils.py5
-rw-r--r--apex/tests/test_apex_network_environment.py7
-rw-r--r--apex/tests/test_apex_overcloud_deploy.py16
-rw-r--r--apex/undercloud/undercloud.py5
-rw-r--r--build/csit-environment.yaml26
-rw-r--r--build/csit-queens-environment.yaml26
-rw-r--r--build/csit-rocky-environment.yaml26
-rw-r--r--build/network-environment.yaml22
-rw-r--r--build/patches/neutron-patch-NSDriver.patch2
-rw-r--r--lib/ansible/playbooks/configure_undercloud.yml24
-rw-r--r--lib/ansible/playbooks/deploy_overcloud.yml31
-rw-r--r--lib/ansible/playbooks/patch_containers.yml13
-rw-r--r--lib/ansible/playbooks/post_deploy_overcloud.yml12
-rw-r--r--lib/ansible/playbooks/prepare_overcloud_containers.yml13
23 files changed, 277 insertions, 95 deletions
diff --git a/apex/builders/common_builder.py b/apex/builders/common_builder.py
index 7627ae3c..59af94cd 100644
--- a/apex/builders/common_builder.py
+++ b/apex/builders/common_builder.py
@@ -62,12 +62,13 @@ def project_to_path(project, patch=None):
def project_to_docker_image(project, docker_url):
"""
Translates OpenStack project to OOO services that are containerized
- :param project: name of OpenStack project
+ :param project: short name of OpenStack project
:return: List of OOO docker service names
"""
# Fetch all docker containers in docker hub with tripleo and filter
# based on project
-
+ logging.info("Checking for docker images matching project: {}".format(
+ project))
hub_output = utils.open_webpage(
urllib.parse.urljoin(docker_url,
'?page_size=1024'), timeout=10)
@@ -85,6 +86,8 @@ def project_to_docker_image(project, docker_url):
for result in results:
if result['name'].startswith("centos-binary-{}".format(project)):
# add as docker image shortname (just service name)
+ logging.debug("Adding docker image {} for project {} for "
+ "patching".format(result['name'], project))
docker_images.append(result['name'].replace('centos-binary-', ''))
return docker_images
@@ -184,8 +187,16 @@ def add_upstream_patches(patches, image, tmp_dir,
if docker_tag and 'python' in project_path:
# Projects map to multiple THT services, need to check which
# are supported
- ooo_docker_services = project_to_docker_image(patch['project'],
+ project_short_name = os.path.basename(patch['project'])
+ ooo_docker_services = project_to_docker_image(project_short_name,
docker_url)
+ if not ooo_docker_services:
+ logging.error("Did not find any matching docker containers "
+ "for project: {}".format(project_short_name))
+ raise exc.ApexCommonBuilderException(
+ 'Unable to find docker services for python project in '
+ 'patch')
+ # Just use the first image to see if patch was promoted into it
docker_img = ooo_docker_services[0]
else:
ooo_docker_services = []
@@ -200,24 +211,38 @@ def add_upstream_patches(patches, image, tmp_dir,
if patch_diff and not patch_promoted:
patch_file = "{}.patch".format(patch['change-id'])
+ patch_file_paths = []
# If we found services, then we treat the patch like it applies to
# docker only
if ooo_docker_services:
os_version = default_branch.replace('stable/', '')
for service in ooo_docker_services:
docker_services = docker_services.union({service})
+ # We need to go root to be able to install patch and then
+ # switch back to previous user. Some containers that
+ # have the same name as the project do not necessarily
+ # contain the project code. For example
+ # novajoin-notifier does not contain nova package code.
+ # Therefore we must try to patch and unfortunately
+ # ignore failures until we have a better way of checking
+ # this
docker_cmds = [
"WORKDIR {}".format(project_path),
+ "USER root",
+ "ARG REAL_USER",
+ "RUN yum -y install patch",
"ADD {} {}".format(patch_file, project_path),
- "RUN patch -p1 < {}".format(patch_file)
+ "RUN patch -p1 < {} || echo "
+ "'Patching failed'".format(patch_file),
+ "USER $REAL_USER"
]
src_img_uri = "{}:8787/tripleo{}/centos-binary-{}:" \
"{}".format(uc_ip, os_version, service,
docker_tag)
oc_builder.build_dockerfile(service, tmp_dir, docker_cmds,
src_img_uri)
- patch_file_path = os.path.join(tmp_dir, 'containers',
- patch_file)
+ patch_file_paths.append(os.path.join(
+ tmp_dir, "containers/{}".format(service), patch_file))
else:
patch_file_path = os.path.join(tmp_dir, patch_file)
virt_ops.extend([
@@ -227,8 +252,10 @@ def add_upstream_patches(patches, image, tmp_dir,
project_path, patch_file)}])
logging.info("Adding patch {} to {}".format(patch_file,
image))
- with open(patch_file_path, 'w') as fh:
- fh.write(patch_diff)
+ patch_file_paths.append(patch_file_path)
+ for patch_fp in patch_file_paths:
+ with open(patch_fp, 'w') as fh:
+ fh.write(patch_diff)
else:
logging.info("Ignoring patch:\n{}".format(patch))
if len(virt_ops) > 1:
diff --git a/apex/builders/undercloud_builder.py b/apex/builders/undercloud_builder.py
index 943c2525..47d2568d 100644
--- a/apex/builders/undercloud_builder.py
+++ b/apex/builders/undercloud_builder.py
@@ -28,14 +28,12 @@ def add_upstream_packages(image):
pkgs = [
'epel-release',
'openstack-utils',
- 'ceph-common',
'python2-networking-sfc',
'openstack-ironic-inspector',
'subunit-filters',
'docker-distribution',
'openstack-tripleo-validations',
'libguestfs-tools',
- 'ceph-ansible',
'python-tripleoclient',
'openstack-tripleo-heat-templates'
]
diff --git a/apex/common/constants.py b/apex/common/constants.py
index 4e48920e..59988f74 100644
--- a/apex/common/constants.py
+++ b/apex/common/constants.py
@@ -53,7 +53,7 @@ DEPLOY_TIMEOUT = 120
RDO_TAG = 'current-tripleo'
UPSTREAM_RDO = "https://images.rdoproject.org/master/rdo_trunk/{}/".format(
RDO_TAG)
-OPENSTACK_GERRIT = 'https://review.openstack.org'
+OPENSTACK_GERRIT = 'https://review.opendev.org'
DOCKER_TAG = RDO_TAG
# Maps regular service files to docker versions
diff --git a/apex/common/utils.py b/apex/common/utils.py
index aae821ef..72a66d10 100644
--- a/apex/common/utils.py
+++ b/apex/common/utils.py
@@ -310,3 +310,15 @@ def fetch_properties(url):
logging.warning('Unable to fetch properties for: {}'.format(url))
raise exc.FetchException('Unable determine properties location: '
'{}'.format(url))
+
+
+def find_container_client(os_version):
+ """
+ Determines whether to use docker or podman client
+ :param os_version: openstack version
+ :return: client name as string
+ """
+ if os_version == 'rocky' or os_version == 'queens':
+ return 'docker'
+ else:
+ return 'podman'
diff --git a/apex/deploy.py b/apex/deploy.py
index bb011f92..d0c2b208 100644
--- a/apex/deploy.py
+++ b/apex/deploy.py
@@ -338,13 +338,14 @@ def main():
utils.run_ansible(ansible_args,
os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
'deploy_dependencies.yml'))
+ all_in_one = not bool(args.virt_compute_nodes)
if args.snapshot:
# Start snapshot Deployment
logging.info('Executing Snapshot Deployment...')
SnapshotDeployment(deploy_settings=deploy_settings,
snap_cache_dir=args.snap_cache,
fetch=not args.no_fetch,
- all_in_one=not bool(args.virt_compute_nodes))
+ all_in_one=all_in_one)
else:
# Start Standard TripleO Deployment
deployment = ApexDeployment(deploy_settings, args.patches_file,
@@ -526,6 +527,8 @@ def main():
container_vars['os_version'] = os_version
container_vars['aarch64'] = platform.machine() == 'aarch64'
container_vars['sdn_env_file'] = sdn_env_files
+ container_vars['container_client'] = utils.find_container_client(
+ os_version)
try:
utils.run_ansible(container_vars, docker_playbook,
host=undercloud.ip, user='stack',
@@ -568,6 +571,8 @@ def main():
deploy_vars['http_proxy'] = net_settings.get('http_proxy', '')
deploy_vars['https_proxy'] = net_settings.get('https_proxy', '')
deploy_vars['vim'] = ds_opts['vim']
+ deploy_vars['container_client'] = utils.find_container_client(
+ os_version)
for dns_server in net_settings['dns_servers']:
deploy_vars['dns_server_args'] += " --dns-nameserver {}".format(
dns_server)
@@ -731,6 +736,10 @@ def main():
deploy_vars['l2gw'] = ds_opts.get('l2gw')
deploy_vars['sriov'] = ds_opts.get('sriov')
deploy_vars['tacker'] = ds_opts.get('tacker')
+ deploy_vars['all_in_one'] = all_in_one
+ # TODO(trozet): need to set container client to docker until OOO
+ # migrates OC to podman. Remove this later.
+ deploy_vars['container_client'] = 'docker'
# TODO(trozet): pull all logs and store in tmp dir in overcloud
# playbook
post_overcloud = os.path.join(args.lib_dir, constants.ANSIBLE_PATH,
diff --git a/apex/network/network_data.py b/apex/network/network_data.py
index 1177af09..6f330c50 100644
--- a/apex/network/network_data.py
+++ b/apex/network/network_data.py
@@ -83,7 +83,7 @@ def create_network_data(ns, target=None):
"{}".format(net))
raise NetworkDataException("cidr is null for network {}".format(
net))
-
+ tmp_net['mtu'] = network.get('mtu', 1500)
network_data.append(copy.deepcopy(tmp_net))
# have to do this due to the aforementioned bug
diff --git a/apex/network/network_environment.py b/apex/network/network_environment.py
index 0a4d1036..52b4452a 100644
--- a/apex/network/network_environment.py
+++ b/apex/network/network_environment.py
@@ -186,6 +186,8 @@ class NetworkEnvironment(dict):
for flag in IPV6_FLAGS:
self[param_def][flag] = True
+ self._update_service_netmap(net_settings.enabled_network_list)
+
def _get_vlan(self, network):
if isinstance(network['nic_mapping'][CONTROLLER]['vlan'], int):
return network['nic_mapping'][CONTROLLER]['vlan']
@@ -218,6 +220,13 @@ class NetworkEnvironment(dict):
prefix = ''
self[reg][key] = self.tht_dir + prefix + postfix
+ def _update_service_netmap(self, network_list):
+ if 'ServiceNetMap' not in self[param_def]:
+ return
+ for service, network in self[param_def]['ServiceNetMap'].items():
+ if network not in network_list:
+ self[param_def]['ServiceNetMap'][service] = 'ctlplane'
+
class NetworkEnvException(Exception):
def __init__(self, value):
diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py
index f40c8bd4..538f50a4 100644
--- a/apex/overcloud/deploy.py
+++ b/apex/overcloud/deploy.py
@@ -99,6 +99,12 @@ DUPLICATE_COMPUTE_SERVICES = [
'OS::TripleO::Services::ComputeNeutronL3Agent'
]
+NFS_VARS = [
+ 'NovaNfsEnabled',
+ 'GlanceNfsEnabled',
+ 'CinderNfsEnabledBackend'
+]
+
def build_sdn_env_list(ds, sdn_map, env_list=None):
"""
@@ -361,22 +367,12 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
pw_op = "password:{}".format(root_pw)
virt_cmds.append({con.VIRT_PW: pw_op})
- if dataplane == 'ovs':
- if ds_opts['sfc']:
- oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
- elif sdn == 'opendaylight':
- # FIXME(trozet) remove this after RDO is updated with fix for
- # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
- ovs_file = os.path.basename(con.CUSTOM_OVS)
- ovs_url = con.CUSTOM_OVS.replace(ovs_file, '')
- utils.fetch_upstream_and_unpack(dest=tmp_dir, url=ovs_url,
- targets=[ovs_file])
- virt_cmds.extend([
- {con.VIRT_UPLOAD: "{}:/root/".format(os.path.join(tmp_dir,
- ovs_file))},
- {con.VIRT_RUN_CMD: "yum downgrade -y /root/{}".format(
- ovs_file)}
- ])
+ # FIXME(trozet) ovs build is failing in CentOS 7.6
+ # if dataplane == 'ovs':
+ # FIXME(trozet) remove this after RDO is updated with fix for
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1544892
+ # https://review.rdoproject.org/r/#/c/13839/
+ # oc_builder.inject_ovs_nsh(tmp_oc_image, tmp_dir)
if dataplane == 'fdio':
# Patch neutron with using OVS external interface for router
@@ -442,17 +438,20 @@ def prep_image(ds, ns, img, tmp_dir, root_pw=None, docker_tag=None,
{con.VIRT_RUN_CMD: "ln -s /usr/lib/systemd/system/nfs-server.service "
"/etc/systemd/system/multi-user.target.wants/"
"nfs-server.service"},
- {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/glance"},
- {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/cinder"},
- {con.VIRT_RUN_CMD: "mkdir -p /root/nfs/nova"},
- {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/glance"},
- {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/cinder"},
- {con.VIRT_RUN_CMD: "chmod 777 /root/nfs/nova"},
- {con.VIRT_RUN_CMD: "echo '/root/nfs/glance *(rw,sync,"
+ {con.VIRT_RUN_CMD: "mkdir -p /glance"},
+ {con.VIRT_RUN_CMD: "mkdir -p /cinder"},
+ {con.VIRT_RUN_CMD: "mkdir -p /nova"},
+ {con.VIRT_RUN_CMD: "chmod 777 /glance"},
+ {con.VIRT_RUN_CMD: "chmod 777 /cinder"},
+ {con.VIRT_RUN_CMD: "chmod 777 /nova"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /glance"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /cinder"},
+ {con.VIRT_RUN_CMD: "chown nfsnobody:nfsnobody /nova"},
+ {con.VIRT_RUN_CMD: "echo '/glance *(rw,sync,"
"no_root_squash,no_acl)' > /etc/exports"},
- {con.VIRT_RUN_CMD: "echo '/root/nfs/cinder *(rw,sync,"
+ {con.VIRT_RUN_CMD: "echo '/cinder *(rw,sync,"
"no_root_squash,no_acl)' >> /etc/exports"},
- {con.VIRT_RUN_CMD: "echo '/root/nfs/nova *(rw,sync,"
+ {con.VIRT_RUN_CMD: "echo '/nova *(rw,sync,"
"no_root_squash,no_acl)' >> /etc/exports"},
{con.VIRT_RUN_CMD: "exportfs -avr"},
])
@@ -701,11 +700,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
# Merge compute services into control services if only a single
# node deployment
if num_compute == 0:
- logging.info("All in one deployment. Checking if service merging "
- "required into control services")
with open(tmp_opnfv_env, 'r') as fh:
data = yaml.safe_load(fh)
param_data = data['parameter_defaults']
+ logging.info("All in one deployment detected")
+ logging.info("Disabling NFS in env file")
# Check to see if any parameters are set for Compute
for param in param_data.keys():
if param != 'ComputeServices' and param.startswith('Compute'):
@@ -713,6 +712,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
"in deployment: {}. Please use Controller "
"based parameters when using All-in-one "
"deployments".format(param))
+ if param in NFS_VARS:
+ param_data[param] = False
+ logging.info("Checking if service merging required into "
+ "control services")
if ('ControllerServices' in param_data and 'ComputeServices' in
param_data):
logging.info("Services detected in environment file. Merging...")
@@ -727,11 +730,11 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir):
logging.debug("Merged controller services: {}".format(
pprint.pformat(param_data['ControllerServices'])
))
- with open(tmp_opnfv_env, 'w') as fh:
- yaml.safe_dump(data, fh, default_flow_style=False)
else:
logging.info("No services detected in env file, not merging "
"services")
+ with open(tmp_opnfv_env, 'w') as fh:
+ yaml.safe_dump(data, fh, default_flow_style=False)
logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env))
with open(tmp_opnfv_env, 'r') as fh:
diff --git a/apex/tests/config/98faaca.diff b/apex/tests/config/98faaca.diff
index 68a66fbc..96462d5f 100644
--- a/apex/tests/config/98faaca.diff
+++ b/apex/tests/config/98faaca.diff
@@ -17,7 +17,7 @@ specified in environments/services-docker/update-odl.yaml.
Upgrading ODL to the next major release (1.1->2) requires
only the L2 steps. These are implemented as upgrade_tasks and
-post_upgrade_tasks in https://review.openstack.org/489201.
+post_upgrade_tasks in https://review.opendev.org/489201.
Steps involved in level 2 update are
1. Block OVS instances to connect to ODL
diff --git a/apex/tests/test_apex_common_utils.py b/apex/tests/test_apex_common_utils.py
index f307990d..1ecb7df6 100644
--- a/apex/tests/test_apex_common_utils.py
+++ b/apex/tests/test_apex_common_utils.py
@@ -155,3 +155,8 @@ class TestCommonUtils:
def test_unique(self):
dummy_list = [1, 2, 1, 3, 4, 5, 5]
assert_equal(utils.unique(dummy_list), [1, 2, 3, 4, 5])
+
+ def test_find_container_client(self):
+ for version in 'rocky', 'queens':
+ assert_equal(utils.find_container_client(version), 'docker')
+ assert_equal(utils.find_container_client('master'), 'podman')
diff --git a/apex/tests/test_apex_network_environment.py b/apex/tests/test_apex_network_environment.py
index 79a72a55..7aa6ef15 100644
--- a/apex/tests/test_apex_network_environment.py
+++ b/apex/tests/test_apex_network_environment.py
@@ -165,3 +165,10 @@ class TestNetworkEnvironment:
e = NetworkEnvException("test")
print(e)
assert_is_instance(e, NetworkEnvException)
+
+ def test_service_netmap(self):
+ ns = copy(self.ns)
+ ns.enabled_network_list = ['admin']
+ ne = NetworkEnvironment(ns, os.path.join(TEST_BUILD_DIR, NET_ENV_FILE))
+ for network in ne['parameter_defaults']['ServiceNetMap'].values():
+ assert_equal(network, 'ctlplane')
diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py
index 402ecebf..79dbf54b 100644
--- a/apex/tests/test_apex_overcloud_deploy.py
+++ b/apex/tests/test_apex_overcloud_deploy.py
@@ -233,6 +233,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@patch('apex.overcloud.deploy.virt_utils')
@@ -240,7 +241,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl(self, mock_is_file, mock_shutil,
- mock_virt_utils, mock_inject_odl, mock_fetch):
+ mock_virt_utils, mock_inject_odl,
+ mock_fetch, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -258,6 +260,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.c_builder')
@patch('apex.overcloud.deploy.oc_builder')
@@ -339,12 +342,13 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_oc_builder.inject_opendaylight.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.virt_utils')
@patch('apex.overcloud.deploy.shutil')
@patch('apex.overcloud.deploy.os.path.isfile')
@patch('builtins.open', mock_open())
def test_prep_image_sdn_ovn(self, mock_is_file, mock_shutil,
- mock_virt_utils):
+ mock_virt_utils, mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'vpn': False,
@@ -357,7 +361,9 @@ class TestOvercloudDeploy(unittest.TestCase):
ns = MagicMock()
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
+ # mock_ovs_nsh.assert_called()
+ @patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.overcloud.deploy.utils.fetch_upstream_and_unpack')
@patch('apex.builders.overcloud_builder.inject_quagga')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -367,7 +373,8 @@ class TestOvercloudDeploy(unittest.TestCase):
@patch('builtins.open', mock_open())
def test_prep_image_sdn_odl_vpn(self, mock_is_file, mock_shutil,
mock_virt_utils, mock_inject_odl,
- mock_inject_quagga, mock_fetch):
+ mock_inject_quagga, mock_fetch,
+ mock_ovs_nsh):
mock_is_file.return_value = True
ds_opts = {'dataplane': 'ovs',
'sdn_controller': 'opendaylight',
@@ -386,6 +393,7 @@ class TestOvercloudDeploy(unittest.TestCase):
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
mock_inject_quagga.assert_called()
+ # mock_ovs_nsh.assert_called()
@patch('apex.builders.overcloud_builder.inject_ovs_nsh')
@patch('apex.builders.overcloud_builder.inject_opendaylight')
@@ -413,7 +421,7 @@ class TestOvercloudDeploy(unittest.TestCase):
prep_image(ds, ns, 'undercloud.qcow2', '/tmp', root_pw='test')
mock_virt_utils.virt_customize.assert_called()
mock_inject_odl.assert_called()
- mock_inject_ovs_nsh.assert_called()
+ # mock_inject_ovs_nsh.assert_called()
@patch('apex.overcloud.deploy.os.path.isfile')
def test_prep_image_no_image(self, mock_isfile):
diff --git a/apex/undercloud/undercloud.py b/apex/undercloud/undercloud.py
index ccdcd168..5ee487c2 100644
--- a/apex/undercloud/undercloud.py
+++ b/apex/undercloud/undercloud.py
@@ -155,6 +155,8 @@ class Undercloud:
ansible_vars['apex_temp_dir'] = apex_temp_dir
ansible_vars['nat'] = self.detect_nat(net_settings)
+ ansible_vars['container_client'] = utils.find_container_client(
+ self.os_version)
try:
utils.run_ansible(ansible_vars, playbook, host=self.ip,
user='stack')
@@ -252,7 +254,8 @@ class Undercloud:
"generate_service_certificate false",
"undercloud_ntp_servers {}".format(str(ns['ntp'][0])),
"container_images_file "
- "/home/stack/containers-prepare-parameter.yaml"
+ "/home/stack/containers-prepare-parameter.yaml",
+ "undercloud_enable_selinux false"
]
config['undercloud_network_config'] = [
diff --git a/build/csit-environment.yaml b/build/csit-environment.yaml
index 9572504a..39486d32 100644
--- a/build/csit-environment.yaml
+++ b/build/csit-environment.yaml
@@ -15,16 +15,28 @@ parameter_defaults:
tripleo::ringbuilder::build_ring: false
nova::api::default_floating_pool: 'external'
ControllerExtraConfig:
- tripleo::firewall:firewall_rules:
- '139 allow NFS':
- dport: 2049
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
GlanceNfsEnabled: true
- GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/glance
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
GlanceNfsOptions:
- 'rw,sync,nosharecache,context=system_u:object_r:glance_var_lib_t:s0'
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
NovaNfsEnabled: true
- NovaNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/nova
- NovaNfsOptions: 'rw,sync,nosharecache,context=system_u:object_r:nfs_t:s0'
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
diff --git a/build/csit-queens-environment.yaml b/build/csit-queens-environment.yaml
index 2cf3f02b..12c994d1 100644
--- a/build/csit-queens-environment.yaml
+++ b/build/csit-queens-environment.yaml
@@ -15,16 +15,28 @@ parameter_defaults:
tripleo::ringbuilder::build_ring: false
nova::api::default_floating_pool: 'external'
ControllerExtraConfig:
- tripleo::firewall:firewall_rules:
- '139 allow NFS':
- dport: 2049
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
GlanceNfsEnabled: true
- GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/glance
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
GlanceNfsOptions:
- 'rw,sync,nosharecache,context=system_u:object_r:glance_var_lib_t:s0'
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
NovaNfsEnabled: true
- NovaNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/nova
- NovaNfsOptions: 'rw,sync,nosharecache,context=system_u:object_r:nfs_t:s0'
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
diff --git a/build/csit-rocky-environment.yaml b/build/csit-rocky-environment.yaml
index 9572504a..39486d32 100644
--- a/build/csit-rocky-environment.yaml
+++ b/build/csit-rocky-environment.yaml
@@ -15,16 +15,28 @@ parameter_defaults:
tripleo::ringbuilder::build_ring: false
nova::api::default_floating_pool: 'external'
ControllerExtraConfig:
- tripleo::firewall:firewall_rules:
- '139 allow NFS':
- dport: 2049
+ tripleo::firewall::firewall_rules:
+ '139 allow NFS TCP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: tcp
+ action: accept
+ '140 allow NFS UDP':
+ dport:
+ - 2049
+ - 111
+ - 32765
+ proto: udp
+ action: accept
GlanceNfsEnabled: true
- GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/glance
+ GlanceNfsShare: overcloud-controller-0.opnfvlf.org:/glance
GlanceNfsOptions:
- 'rw,sync,nosharecache,context=system_u:object_r:glance_var_lib_t:s0'
+ 'rw,sync,context=system_u:object_r:glance_var_lib_t:s0'
NovaNfsEnabled: true
- NovaNfsShare: overcloud-controller-0.opnfvlf.org:/root/nfs/nova
- NovaNfsOptions: 'rw,sync,nosharecache,context=system_u:object_r:nfs_t:s0'
+ NovaNfsShare: overcloud-controller-0.opnfvlf.org:/nova
+ NovaNfsOptions: 'rw,sync,context=system_u:object_r:nfs_t:s0'
DockerPuppetProcessCount: 10
NeutronNetworkVLANRanges: 'datacentre:500:525'
SshServerOptions:
diff --git a/build/network-environment.yaml b/build/network-environment.yaml
index 3fd22e3d..1397a0c8 100644
--- a/build/network-environment.yaml
+++ b/build/network-environment.yaml
@@ -63,33 +63,53 @@ parameter_defaults:
NeutronExternalNetworkBridge: 'br-ex'
ServiceNetMap:
+ ApacheNetwork: internal_api
NeutronTenantNetwork: tenant
CeilometerApiNetwork: internal_api
AodhApiNetwork: internal_api
+ PankoApiNetwork: internal_api
+ BarbicanApiNetwork: internal_api
+ GnocchiApiNetwork: internal_api
OpendaylightApiNetwork: internal_api
MongoDbNetwork: internal_api
CinderApiNetwork: internal_api
CinderIscsiNetwork: storage
GlanceApiNetwork: internal_api
GlanceRegistryNetwork: internal_api
+ IronicApiNetwork: ctlplane
+ IronicNetwork: ctlplane
+ IronicInspectorNetwork: ctlplane
KeystoneAdminApiNetwork: ctlplane
KeystonePublicApiNetwork: internal_api
NeutronApiNetwork: internal_api
HeatApiNetwork: internal_api
+ HeatApiCfnNetwork: internal_api
+ HeatApiCloudwatchNetwork: internal_api
+ ManilaApiNetwork: internal_api
+ MetricsQdrNetwork: internal_api
NovaApiNetwork: internal_api
NovaMetadataNetwork: internal_api
+ NovaPlacementNetwork: internal_api
NovaVncProxyNetwork: internal_api
+ NovaLibvirtNetwork: internal_api
+ NovajoinNetwork: internal_api
+ OctaviaApiNetwork: internal_api
SwiftMgmtNetwork: storage
SwiftProxyNetwork: storage
TackerApiNetwork: internal_api
CongressApiNetwork: internal_api
HorizonNetwork: internal_api
+ OsloMessagingRpcNetwork: internal_api
+ OsloMessagingNotifyNetwork: internal_api
MemcachedNetwork: internal_api
RabbitMqNetwork: internal_api
RedisNetwork: internal_api
MysqlNetwork: internal_api
CephClusterNetwork: storage
- CephPublicNetwork: storage
+ CephMonNetwork: storage
+ PublicNetwork: external
+ OvnDbsNetwork: internal_api
+ DockerRegistryNetwork: ctlplane
# Define which network will be used for hostname resolution
ControllerHostnameResolveNetwork: internal_api
ComputeHostnameResolveNetwork: internal_api
diff --git a/build/patches/neutron-patch-NSDriver.patch b/build/patches/neutron-patch-NSDriver.patch
index 84b4fb02..95ad58f9 100644
--- a/build/patches/neutron-patch-NSDriver.patch
+++ b/build/patches/neutron-patch-NSDriver.patch
@@ -139,7 +139,7 @@ index 88d6e67f31..c0fab604d1 100644
+
+ def _configure_mtu(self, ns_dev, mtu=None):
+ # Need to set MTU, after added to namespace. See review
-+ # https://review.openstack.org/327651
++ # https://review.opendev.org/327651
+ try:
+ # Note: network_device_mtu will be deprecated in future
+ mtu_override = self.conf.network_device_mtu
diff --git a/lib/ansible/playbooks/configure_undercloud.yml b/lib/ansible/playbooks/configure_undercloud.yml
index 80f3e67e..07b82c8e 100644
--- a/lib/ansible/playbooks/configure_undercloud.yml
+++ b/lib/ansible/playbooks/configure_undercloud.yml
@@ -73,12 +73,16 @@
src: /home/stack/apex-undercloud-install.log
dest: "{{ apex_temp_dir }}/"
flat: yes
+ - name: Install ceph-ansible
+ yum:
+ name: ceph-ansible
+ become: yes
- name: openstack-configs nova
shell: openstack-config --set /var/lib/config-data/nova/etc/nova/nova.conf DEFAULT {{ item }}
become: yes
with_items: "{{ nova_config }}"
- name: restart nova services
- shell: "docker restart {{ item }}"
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- nova_conductor
- nova_compute
@@ -90,7 +94,7 @@
become: yes
with_items: "{{ neutron_config }}"
- name: restart neutron services
- shell: "docker restart {{ item }}"
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- neutron_api
- neutron_dhcp
@@ -100,7 +104,7 @@
become: yes
with_items: "{{ ironic_config }}"
- name: restart ironic services
- shell: "docker restart {{ item }}"
+ shell: "{{ container_client }} restart {{ item }}"
with_items:
- ironic_api
- ironic_conductor
@@ -168,12 +172,22 @@
jump: ACCEPT
source: "{{ nat_cidr }}"
ctstate: ESTABLISHED,RELATED
- - name: Undercloud NAT - Save iptables
- shell: service iptables save
become: yes
when:
- not nat_network_ipv6
- nat
+ - name: Allow SSH in iptables
+ iptables:
+ action: insert
+ chain: INPUT
+ rule_num: 1
+ protocol: tcp
+ destination_port: 22
+ jump: ACCEPT
+ become: yes
+ - name: Undercloud NAT - Save iptables
+ shell: service iptables save
+ become: yes
- name: fetch storage environment file
fetch:
src: /usr/share/openstack-tripleo-heat-templates/environments/storage-environment.yaml
diff --git a/lib/ansible/playbooks/deploy_overcloud.yml b/lib/ansible/playbooks/deploy_overcloud.yml
index e2e84d18..9a405814 100644
--- a/lib/ansible/playbooks/deploy_overcloud.yml
+++ b/lib/ansible/playbooks/deploy_overcloud.yml
@@ -73,6 +73,22 @@
owner: root
group: root
become: yes
+ - name: Insert External network into Compute role
+ shell: |
+ ruby -e '
+ require "yaml"
+ data = YAML.load(File.read("/usr/share/openstack-tripleo-heat-templates/roles_data.yaml"))
+ if data[1]["networks"].is_a?(Array)
+ data[1]["networks"].push("External")
+ elsif data[1]["networks"].is_a?(Hash)
+ data[1]["networks"].merge!("External"=> { "subnet" => "external_subnet" })
+ else
+ raise "Unable to determine data to modify in roles_data.yaml"
+ end
+ data[1]["default_route_networks"] = Array.new(["External"])
+ File.open("/usr/share/openstack-tripleo-heat-templates/roles_data.yaml", "w") { |f| f.write(data.to_yaml) }
+ '
+ become: yes
- name: Upload glance images
shell: "{{ stackrc }} && openstack overcloud image upload"
become: yes
@@ -92,11 +108,6 @@
- baremetal
- control
- compute
- - name: Downgrade ceph
- yum:
- allow_downgrade: yes
- name: ceph-ansible-3.1.6
- become: yes
- name: Re-enable ceph config for aarch64
replace:
path: "/usr/share/ceph-ansible/roles/ceph-client/tasks/create_users_keys.yml"
@@ -106,6 +117,16 @@
when: aarch64
- name: Configure DNS server for ctlplane network
shell: "{{ stackrc }} && openstack subnet set ctlplane-subnet {{ dns_server_args }}"
+ - name: Update NIC templates before deployment
+ shell: >
+ /usr/share/openstack-tripleo-heat-templates/tools/merge-new-params-nic-config-script.py
+ -n /home/stack/network_data.yaml -t /home/stack/nics/{{ item }}.yaml --discard-comments True
+ --role-name Controller
+ become: yes
+ become_user: stack
+ with_items:
+ - controller
+ - compute
- block:
- name: Execute Overcloud Deployment
shell: "{{ stackrc }} && bash deploy_command"
diff --git a/lib/ansible/playbooks/patch_containers.yml b/lib/ansible/playbooks/patch_containers.yml
new file mode 100644
index 00000000..1ef05810
--- /dev/null
+++ b/lib/ansible/playbooks/patch_containers.yml
@@ -0,0 +1,13 @@
+---
+ - name: "Pull docker image to ensure it exists locally: {{ item }}"
+ shell: "{{ container_client }} pull {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:current-tripleo"
+ - name: "Find docker image user {{ item }}"
+ shell: >
+ {{ container_client }} inspect --format='{{ '{{' }}.ContainerConfig.User{{ '}}' }}'
+ {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:current-tripleo
+ register: user_result
+ - name: "Patch docker image {{ item }}"
+ shell: >
+ cd /home/stack/containers/{{ item }} && {{ container_client }} build
+ --build-arg REAL_USER={{ user_result.stdout }}
+ -t {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex .
diff --git a/lib/ansible/playbooks/post_deploy_overcloud.yml b/lib/ansible/playbooks/post_deploy_overcloud.yml
index 882b0126..2b90ab1f 100644
--- a/lib/ansible/playbooks/post_deploy_overcloud.yml
+++ b/lib/ansible/playbooks/post_deploy_overcloud.yml
@@ -54,14 +54,12 @@
- openstack-nova-api
- openstack-nova-scheduler
- openstack-nova-conductor
- - name: Restart Compute Nova Compute (Pike Workaround)
- shell: "systemctl restart openstack-nova-compute"
+ - name: Restart Compute Nova Compute (workaround for NFS)
+ shell: "{{ container_client }} restart nova_compute"
become: yes
- when:
- - "'compute' in ansible_hostname"
- - os_version == 'pike'
+ when: "'compute' in ansible_hostname or all_in_one"
- name: Update ODL container restart policy to always
- shell: "docker update --restart=always opendaylight_api"
+ shell: "{{ container_client }} update --restart=always opendaylight_api"
become: yes
when:
- sdn == 'opendaylight'
@@ -90,7 +88,7 @@
- "'controller' in ansible_hostname"
- sdn != 'ovn'
- name: Restart metadata service
- shell: "docker restart neutron_metadata_agent"
+ shell: "{{ container_client }} restart neutron_metadata_agent"
become: yes
when:
- "'controller' in ansible_hostname"
diff --git a/lib/ansible/playbooks/prepare_overcloud_containers.yml b/lib/ansible/playbooks/prepare_overcloud_containers.yml
index e2a4e134..ebf081dc 100644
--- a/lib/ansible/playbooks/prepare_overcloud_containers.yml
+++ b/lib/ansible/playbooks/prepare_overcloud_containers.yml
@@ -28,16 +28,15 @@
url: http://{{ undercloud_ip }}:8787/v2/_catalog
body_format: json
register: response
- - name: Patch Docker images
- shell: >
- cd /home/stack/containers/{{ item }} && docker build
- -t {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex .
+ - include_tasks: patch_containers.yml
+ with_items: "{{ patched_docker_services }}"
+ loop_control:
+ loop_var: item
when:
- patched_docker_services|length > 0
- item in (response.json)['repositories']|join(" ")
- with_items: "{{ patched_docker_services }}"
- name: Push patched docker images to local registry
- shell: docker push {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex
+ shell: "{{ container_client }} push {{ undercloud_ip }}:8787/tripleo{{ os_version }}/centos-binary-{{ item }}:apex"
when:
- patched_docker_services|length > 0
- item in (response.json)['repositories']|join(" ")
@@ -45,7 +44,7 @@
- name: Modify Images with Apex tag
replace:
path: "/home/stack/docker-images.yaml"
- regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item[1] }}):.*"
+ regexp: "(\\s*Docker.*?:.*?centos-binary-{{ item }}):.*"
replace: '\1:apex'
with_items: "{{ patched_docker_services }}"
become: yes