diff options
-rw-r--r-- | apex/deploy.py | 2 | ||||
-rw-r--r-- | apex/inventory/inventory.py | 7 | ||||
-rw-r--r-- | apex/overcloud/deploy.py | 61 | ||||
-rw-r--r-- | apex/tests/config/inventory-virt-1-compute-node.yaml | 14 | ||||
-rw-r--r-- | apex/tests/test_apex_inventory.py | 7 | ||||
-rw-r--r-- | apex/tests/test_apex_overcloud_deploy.py | 54 | ||||
-rw-r--r-- | docs/release/installation/baremetal.rst | 8 | ||||
-rw-r--r-- | docs/release/installation/introduction.rst | 7 | ||||
-rw-r--r-- | docs/release/installation/virtual.rst | 12 |
9 files changed, 127 insertions, 45 deletions
diff --git a/apex/deploy.py b/apex/deploy.py index 70bc3a5a..ca4101b4 100644 --- a/apex/deploy.py +++ b/apex/deploy.py @@ -289,6 +289,8 @@ def main(): vcpus=args.virt_cpus ) inventory = Inventory(args.inventory_file, ha_enabled, args.virtual) + logging.info("Inventory is:\n {}".format(pprint.pformat( + inventory))) validate_cross_settings(deploy_settings, net_settings, inventory) ds_opts = deploy_settings['deploy_options'] diff --git a/apex/inventory/inventory.py b/apex/inventory/inventory.py index b5ffd2f8..0546fe9f 100644 --- a/apex/inventory/inventory.py +++ b/apex/inventory/inventory.py @@ -67,9 +67,12 @@ class Inventory(dict): if ha and len(self['nodes']) < 5: raise ApexInventoryException('You must provide at least 5 ' 'nodes for HA deployment') - elif len(self['nodes']) < 2: - raise ApexInventoryException('You must provide at least 2 nodes ' + elif len(self['nodes']) < 1: + raise ApexInventoryException('You must provide at least 1 node ' 'for non-HA deployment') + elif list(self.get_node_counts())[0] < 1: + raise ApexInventoryException('You must provide at least 1 ' + 'control node for deployment') if virtual: self['host-ip'] = '192.168.122.1' diff --git a/apex/overcloud/deploy.py b/apex/overcloud/deploy.py index 790e794a..dd476b6f 100644 --- a/apex/overcloud/deploy.py +++ b/apex/overcloud/deploy.py @@ -12,10 +12,12 @@ import fileinput import logging import os import platform +import pprint import shutil import uuid import struct import time +import yaml import apex.builders.overcloud_builder as oc_builder import apex.builders.common_builder as c_builder @@ -92,6 +94,13 @@ RemainAfterExit=yes WantedBy=multi-user.target """ +DUPLICATE_COMPUTE_SERVICES = [ + 'OS::TripleO::Services::ComputeNeutronCorePlugin', + 'OS::TripleO::Services::ComputeNeutronMetadataAgent', + 'OS::TripleO::Services::ComputeNeutronOvsAgent', + 'OS::TripleO::Services::ComputeNeutronL3Agent' +] + def build_sdn_env_list(ds, sdn_map, env_list=None): """ @@ -218,11 +227,7 @@ def create_deploy_cmd(ds, ns, inv, tmp_dir, deploy_options.append('baremetal-environment.yaml') num_control, num_compute = inv.get_node_counts() - if num_control == 0 or num_compute == 0: - logging.error("Detected 0 control or compute nodes. Control nodes: " - "{}, compute nodes{}".format(num_control, num_compute)) - raise ApexDeployException("Invalid number of control or computes") - elif num_control > 1 and not ds['global_params']['ha_enabled']: + if num_control > 1 and not ds['global_params']['ha_enabled']: num_control = 1 if platform.machine() == 'aarch64': # aarch64 deploys were not completing in the default 90 mins. @@ -489,6 +494,10 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): # SSH keys private_key, public_key = make_ssh_key() + num_control, num_compute = inv.get_node_counts() + if num_control > 1 and not ds['global_params']['ha_enabled']: + num_control = 1 + # Make easier/faster variables to index in the file editor if 'performance' in ds_opts: perf = True @@ -602,9 +611,12 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): if 'OS::TripleO::Services::NeutronDhcpAgent' in line: output_line = '' elif 'NeutronDhcpAgentsPerNetwork' in line: - num_control, num_compute = inv.get_node_counts() + if num_compute == 0: + num_dhcp_agents = num_control + else: + num_dhcp_agents = num_compute output_line = (" NeutronDhcpAgentsPerNetwork: {}" - .format(num_compute)) + .format(num_dhcp_agents)) elif 'ComputeServices' in line: output_line = (" ComputeServices:\n" " - OS::TripleO::Services::NeutronDhcpAgent") @@ -670,6 +682,41 @@ def prep_env(ds, ns, inv, opnfv_env, net_env, tmp_dir): print(output_line) + # Merge compute services into control services if only a single + # node deployment + if num_compute == 0: + logging.info("All in one deployment. Checking if service merging " + "required into control services") + with open(tmp_opnfv_env, 'r') as fh: + data = yaml.safe_load(fh) + param_data = data['parameter_defaults'] + # Check to see if any parameters are set for Compute + for param in param_data.keys(): + if param != 'ComputeServices' and param.startswith('Compute'): + logging.warning("Compute parameter set, but will not be used " + "in deployment: {}. Please use Controller " + "based parameters when using All-in-one " + "deployments".format(param)) + if ('ControllerServices' in param_data and 'ComputeServices' in + param_data): + logging.info("Services detected in environment file. Merging...") + ctrl_services = param_data['ControllerServices'] + cmp_services = param_data['ComputeServices'] + param_data['ControllerServices'] = list(set().union( + ctrl_services, cmp_services)) + for dup_service in DUPLICATE_COMPUTE_SERVICES: + if dup_service in param_data['ControllerServices']: + param_data['ControllerServices'].remove(dup_service) + param_data.pop('ComputeServices') + logging.debug("Merged controller services: {}".format( + pprint.pformat(param_data['ControllerServices']) + )) + with open(tmp_opnfv_env, 'w') as fh: + yaml.safe_dump(data, fh, default_flow_style=False) + else: + logging.info("No services detected in env file, not merging " + "services") + logging.info("opnfv-environment file written to {}".format(tmp_opnfv_env)) diff --git a/apex/tests/config/inventory-virt-1-compute-node.yaml b/apex/tests/config/inventory-virt-1-compute-node.yaml new file mode 100644 index 00000000..4c2dc5d4 --- /dev/null +++ b/apex/tests/config/inventory-virt-1-compute-node.yaml @@ -0,0 +1,14 @@ +--- +nodes: + node0: + arch: x86_64 + capabilities: profile:compute + cpu: 4 + disk: 41 + ipmi_ip: 192.168.122.1 + ipmi_pass: password + ipmi_user: admin + mac_address: 00:a8:58:29:f9:99 + memory: 10240 + pm_port: 6230 + pm_type: pxe_ipmitool diff --git a/apex/tests/test_apex_inventory.py b/apex/tests/test_apex_inventory.py index 71979465..38a4271a 100644 --- a/apex/tests/test_apex_inventory.py +++ b/apex/tests/test_apex_inventory.py @@ -56,10 +56,15 @@ class TestInventory: os.path.join(TEST_DUMMY_CONFIG, 'inventory-virt.yaml'), virtual=True, ha=True) + def test_inventory_valid_allinone_count(self): + i = Inventory(os.path.join(TEST_DUMMY_CONFIG, + 'inventory-virt-1-node.yaml'), ha=False) + assert_equal(list(i.get_node_counts()), [1, 0]) + def test_inventory_invalid_noha_count(self): assert_raises(ApexInventoryException, Inventory, os.path.join(TEST_DUMMY_CONFIG, - 'inventory-virt-1-node.yaml'), + 'inventory-virt-1-compute-node.yaml'), virtual=True, ha=False) def test_inventory_virtual(self): diff --git a/apex/tests/test_apex_overcloud_deploy.py b/apex/tests/test_apex_overcloud_deploy.py index 41f2e01a..a9e4bda5 100644 --- a/apex/tests/test_apex_overcloud_deploy.py +++ b/apex/tests/test_apex_overcloud_deploy.py @@ -7,6 +7,7 @@ # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## +import mock import os import sys import unittest @@ -193,23 +194,6 @@ class TestOvercloudDeploy(unittest.TestCase): assert_not_in('enable_congress.yaml', result_cmd) assert_not_in('enable_barometer.yaml', result_cmd) - @patch('apex.overcloud.deploy.prep_sriov_env') - @patch('apex.overcloud.deploy.prep_storage_env') - @patch('apex.overcloud.deploy.build_sdn_env_list') - def test_create_deploy_cmd_raises(self, mock_sdn_list, mock_prep_storage, - mock_prep_sriov): - mock_sdn_list.return_value = [] - ds = {'deploy_options': MagicMock(), - 'global_params': MagicMock()} - ds['deploy_options'].__getitem__.side_effect = \ - lambda i: 'master' if i == 'os_version' else MagicMock() - ns = {} - inv = MagicMock() - inv.get_node_counts.return_value = (0, 0) - virt = False - assert_raises(ApexDeployException, create_deploy_cmd, - ds, ns, inv, '/tmp', virt) - @patch('apex.builders.overcloud_builder.inject_opendaylight') @patch('apex.overcloud.deploy.virt_utils') @patch('apex.overcloud.deploy.shutil') @@ -374,16 +358,24 @@ class TestOvercloudDeploy(unittest.TestCase): assert_in('-----BEGIN PRIVATE KEY-----', priv) assert_in('ssh-rsa', pub) + @patch('apex.overcloud.deploy.yaml') @patch('apex.overcloud.deploy.fileinput') @patch('apex.overcloud.deploy.shutil') - def test_prep_env(self, mock_shutil, mock_fileinput): + @patch('builtins.open', mock_open()) + def test_prep_env(self, mock_shutil, mock_fileinput, mock_yaml): mock_fileinput.input.return_value = \ ['CloudDomain', 'replace_private_key', 'replace_public_key', 'opendaylight::vpp_routing_node', 'ControllerExtraConfig', 'NovaComputeExtraConfig', 'ComputeKernelArgs', 'HostCpusList', 'ComputeExtraConfigPre', 'resource_registry', 'NovaSchedulerDefaultFilters'] - ds = {'deploy_options': + mock_yaml.safe_load.return_value = { + 'parameter_defaults': { + 'ControllerServices': [1, 2, 3], + 'ComputeServices': [3, 4, 5] + }} + ds = {'global_params': {'ha_enabled': False}, + 'deploy_options': {'sdn_controller': 'opendaylight', 'odl_vpp_routing_node': 'test', 'dataplane': 'ovs_dpdk', @@ -405,7 +397,8 @@ class TestOvercloudDeploy(unittest.TestCase): {'members': ['ext_nic']}, 'compute': {'members': ['ext_nic']}}}]}} - inv = None + inv = MagicMock() + inv.get_node_counts.return_value = (1, 0) try: # Swap stdout saved_stdout = sys.stdout @@ -421,6 +414,12 @@ class TestOvercloudDeploy(unittest.TestCase): assert_in('ssh-rsa', output) assert_in('ComputeKernelArgs: \'test=test \'', output) assert_in('fdio::vpp_cpu_main_core: \'test\'', output) + mock_yaml.safe_dump.assert_called_with( + {'parameter_defaults': { + 'ControllerServices': [1, 2, 3, 4, 5], + }}, + mock.ANY, default_flow_style=False + ) finally: # put stdout back sys.stdout = saved_stdout @@ -430,7 +429,8 @@ class TestOvercloudDeploy(unittest.TestCase): def test_prep_env_round_two(self, mock_shutil, mock_fileinput): mock_fileinput.input.return_value = \ ['NeutronVPPAgentPhysnets'] - ds = {'deploy_options': + ds = {'global_params': {'ha_enabled': False}, + 'deploy_options': {'sdn_controller': False, 'dataplane': 'fdio', 'sriov': 'xxx', @@ -448,7 +448,8 @@ class TestOvercloudDeploy(unittest.TestCase): {'members': ['ext_nic']}, 'compute': {'members': ['ext_nic']}}}]}} - inv = None + inv = MagicMock() + inv.get_node_counts.return_value = (3, 2) try: # Swap stdout saved_stdout = sys.stdout @@ -474,7 +475,8 @@ class TestOvercloudDeploy(unittest.TestCase): mock_fileinput.input.return_value = \ ['OS::TripleO::Services::NeutronDhcpAgent', 'NeutronDhcpAgentsPerNetwork', 'ComputeServices'] - ds = {'deploy_options': + ds = {'global_params': {'ha_enabled': False}, + 'deploy_options': {'sdn_controller': 'opendaylight', 'dataplane': 'fdio', 'sriov': 'xxx', @@ -515,7 +517,8 @@ class TestOvercloudDeploy(unittest.TestCase): mock_fileinput.input.return_value = \ ['NeutronNetworkVLANRanges', 'NeutronNetworkType', 'NeutronBridgeMappings'] - ds = {'deploy_options': + ds = {'global_params': {'ha_enabled': False}, + 'deploy_options': {'sdn_controller': False, 'dataplane': 'ovs', 'sriov': 'xxx', @@ -566,7 +569,8 @@ class TestOvercloudDeploy(unittest.TestCase): 'NeutronNetworkType', 'NeutronBridgeMappings', 'OpenDaylightProviderMappings'] - ds = {'deploy_options': + ds = {'global_params': {'ha_enabled': False}, + 'deploy_options': {'sdn_controller': 'opendaylight', 'dataplane': 'ovs', 'sriov': 'xxx', diff --git a/docs/release/installation/baremetal.rst b/docs/release/installation/baremetal.rst index d8f90792..ff55bc16 100644 --- a/docs/release/installation/baremetal.rst +++ b/docs/release/installation/baremetal.rst @@ -150,9 +150,13 @@ IPMI configuration information gathered in section template to ``/etc/opnfv-apex/inventory.yaml``. 2. The nodes dictionary contains a definition block for each baremetal host - that will be deployed. 1 or more compute nodes and 3 controller nodes are - required. (The example file contains blocks for each of these already). + that will be deployed. 0 or more compute nodes and 1 or 3 controller nodes + are required. (The example file contains blocks for each of these already). It is optional at this point to add more compute nodes into the node list. + By specifying 0 compute nodes in the inventory file, the deployment will + automatically deploy "all-in-one" nodes which means the compute will run + along side the controller in a single overcloud node. Specifying 3 control + nodes will result in a highly-available service model. 3. Edit the following values for each node: diff --git a/docs/release/installation/introduction.rst b/docs/release/installation/introduction.rst index 8dbf8f2f..76ed0acb 100644 --- a/docs/release/installation/introduction.rst +++ b/docs/release/installation/introduction.rst @@ -12,7 +12,7 @@ Preface Apex uses Triple-O from the RDO Project OpenStack distribution as a provisioning tool. The Triple-O image based life cycle installation -tool provisions an OPNFV Target System (3 controllers, 2 or more +tool provisions an OPNFV Target System (1 or 3 controllers, 0 or more compute nodes) with OPNFV specific configuration provided by the Apex deployment tool chain. @@ -37,6 +37,5 @@ will prepare a host to the same ready state for OPNFV deployment. ``opnfv-deploy`` instantiates a Triple-O Undercloud VM server using libvirt as its provider. This VM is then configured and used to provision the -OPNFV target deployment (3 controllers, n compute nodes). These nodes can -be either virtual or bare metal. This guide contains instructions for -installing either method. +OPNFV target deployment. These nodes can be either virtual or bare metal. +This guide contains instructions for installing either method. diff --git a/docs/release/installation/virtual.rst b/docs/release/installation/virtual.rst index af8aece2..5682f364 100644 --- a/docs/release/installation/virtual.rst +++ b/docs/release/installation/virtual.rst @@ -12,11 +12,14 @@ The virtual deployment operates almost the same way as the bare metal deployment with a few differences mainly related to power management. ``opnfv-deploy`` still deploys an undercloud VM. In addition to the undercloud VM a collection of VMs (3 control nodes + 2 compute for an HA deployment or 1 -control node and 1 or more compute nodes for a Non-HA Deployment) will be +control node and 0 or more compute nodes for a Non-HA Deployment) will be defined for the target OPNFV deployment. All overcloud VMs are registered with a Virtual BMC emulator which will service power management (IPMI) commands. The overcloud VMs are still provisioned with the same disk images -and configuration that baremetal would use. +and configuration that baremetal would use. Using 0 nodes for a virtual +deployment will automatically deploy "all-in-one" nodes which means the compute +will run along side the controller in a single overcloud node. Specifying 3 +control nodes will result in a highly-available service model. To Triple-O these nodes look like they have just built and registered the same way as bare metal nodes, the main difference is the use of a libvirt driver for @@ -67,7 +70,7 @@ environment will deploy with the following architecture: - 1 undercloud VM - The option of 3 control and 2 or more compute VMs (HA Deploy / default) - or 1 control and 1 or more compute VM (Non-HA deploy / pass -n) + or 1 control and 0 or more compute VMs (Non-HA deploy) - 1-5 networks: provisioning, private tenant networking, external, storage and internal API. The API, storage and tenant networking networks can be @@ -83,7 +86,8 @@ Follow the steps below to execute: password: 'opnfvapex'. It is also useful in some cases to surround the deploy command with ``nohup``. For example: ``nohup <deploy command> &``, will allow a deployment to continue even if - ssh access to the Jump Host is lost during deployment. + ssh access to the Jump Host is lost during deployment. By specifying + ``--virtual-computes 0``, the deployment will proceed as all-in-one. 2. It will take approximately 45 minutes to an hour to stand up undercloud, define the target virtual machines, configure the deployment and execute |