diff options
20 files changed, 411 insertions, 34 deletions
diff --git a/.gitignore b/.gitignore index c8064d486..4e90f3248 100644 --- a/.gitignore +++ b/.gitignore @@ -2,14 +2,14 @@ .cache* .project .pydevproject -ci/config/ -deploy/autodeploy.log +**/ci/config/ +**/deploy/autodeploy.log *~ .*.sw? -/docs_build/ -/docs_output/ -/releng/ -mcp/deploy/images/ -mcp/scripts/mcp.rsa* -mcp/scripts/user-data.sh -mcp/scripts/net_mcpcontrol.xml +**/docs_build/ +**/docs_output/ +**/releng/ +**/mcp/deploy/images/ +**/mcp/scripts/mcp.rsa* +**/mcp/scripts/user-data.sh +**/mcp/scripts/net_mcpcontrol.xml diff --git a/ci/deploy.sh b/ci/deploy.sh index 80667e815..0f4239608 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -38,7 +38,7 @@ $(notify "USAGE:" 2) $(basename "$0") -b base-uri -l lab-name -p pod-name -s deploy-scenario \\ [-B PXE Bridge [-B Mgmt Bridge [-B Internal Bridge [-B Public Bridge]]]] \\ [-S storage-dir] [-L /path/to/log/file.tar.gz] \\ - [-f [-f]] [-F] [-e] [-d] [-D] + [-f[f]] [-F] [-e | -E[E]] [-d] [-D] $(notify "OPTIONS:" 2) -b Base-uri for the stack-configuration structure @@ -46,6 +46,7 @@ $(notify "OPTIONS:" 2) -d Dry-run -D Debug logging -e Do not launch environment deployment + -E Remove existing VCP VMs (use twice to redeploy baremetal nodes) -f Deploy on existing Salt master (use twice to also skip config sync) -F Do only create a Salt master -h Print this message and exit @@ -82,6 +83,10 @@ $(notify "Input parameters to the build script are:" 2) -d Dry-run - Produce deploy config files, but do not execute deploy -D Debug logging - Enable extra logging in sh deploy scripts (set -x) -e Do not launch environment deployment +-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs + currently defined on cluster KVM nodes. If specified twice (e.g. -E -E), + baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned. + Only applicable for baremetal deploys. -f Deploy on existing Salt master. It will skip infrastructure VM creation, but it will still sync reclass configuration from current repo to Salt Master node. If specified twice (e.g. -f -f), config sync will also be @@ -150,6 +155,7 @@ DRY_RUN=${DRY_RUN:-0} USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0} INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0} NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0} +ERASE_ENV=${ERASE_ENV:-0} source "${DEPLOY_DIR}/globals.sh" @@ -162,7 +168,7 @@ source "${DEPLOY_DIR}/globals.sh" # set +x OPNFV_BRIDGE_IDX=0 -while getopts "b:B:dDfFl:L:p:s:S:he" OPTION +while getopts "b:B:dDfEFl:L:p:s:S:he" OPTION do case $OPTION in b) @@ -200,6 +206,9 @@ do e) NO_DEPLOY_ENVIRONMENT=1 ;; + E) + ((ERASE_ENV+=1)) + ;; l) TARGET_LAB=${OPTARG} ;; @@ -423,8 +432,9 @@ else for state in "${cluster_states[@]}"; do notify "[STATE] Applying state: ${state}\n" 2 # shellcheck disable=SC2086,2029 - wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} \ - sudo /root/fuel/mcp/config/states/${state}" + wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \ + CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \ + /root/fuel/mcp/config/states/${state}" done fi diff --git a/docs/release/installation/installation.instruction.rst b/docs/release/installation/installation.instruction.rst index b27963055..502c75097 100644 --- a/docs/release/installation/installation.instruction.rst +++ b/docs/release/installation/installation.instruction.rst @@ -249,7 +249,7 @@ In this figure there are examples of two virtual deploys: the deploy script will skip creating a virsh bridge for it **Note**: A virtual network "mcpcontrol" is always created. For virtual deploys, "mcpcontrol" is also used -for PXE, leaving the PXE bridge unused. +for Admin, leaving the PXE/Admin bridge unused. Automatic Installation of a Baremetal POD diff --git a/docs/release/userguide/img/horizon_login.png b/docs/release/userguide/img/horizon_login.png Binary files differnew file mode 100644 index 000000000..641ca6c6a --- /dev/null +++ b/docs/release/userguide/img/horizon_login.png diff --git a/docs/release/userguide/img/salt_services_ip.png b/docs/release/userguide/img/salt_services_ip.png Binary files differnew file mode 100644 index 000000000..504beb3e2 --- /dev/null +++ b/docs/release/userguide/img/salt_services_ip.png diff --git a/docs/release/userguide/img/saltstack.png b/docs/release/userguide/img/saltstack.png Binary files differnew file mode 100644 index 000000000..d57452c65 --- /dev/null +++ b/docs/release/userguide/img/saltstack.png diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst new file mode 100644 index 000000000..d4330d08c --- /dev/null +++ b/docs/release/userguide/index.rst @@ -0,0 +1,18 @@ +.. _fuel-userguide: + +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) Open Platform for NFV Project, Inc. and its contributors + +.. _fuel-release-userguide-label: + +************************** +User guide for Fuel\@OPNFV +************************** + +.. toctree:: + :numbered: + :maxdepth: 2 + + userguide.rst + diff --git a/docs/release/userguide/userguide.rst b/docs/release/userguide/userguide.rst new file mode 100644 index 000000000..f00e66357 --- /dev/null +++ b/docs/release/userguide/userguide.rst @@ -0,0 +1,267 @@ +.. This work is licensed under a Creative Commons Attribution 4.0 International License. +.. http://creativecommons.org/licenses/by/4.0 +.. (c) Open Platform for NFV Project, Inc. and its contributors + +======== +Abstract +======== + +This document contains details about how to use OPNFV Fuel - Euphrates +release - after it was deployed. For details on how to deploy check the +installation instructions in the :ref:`references` section. + +This is an unified documentation for both x86_64 and aarch64 +architectures. All information is common for both architectures +except when explicitly stated. + + + +================ +Network Overview +================ + +Fuel uses several networks to deploy and administer the cloud: + ++------------------+-------------------+---------------------------------------------------------+ +| Network name | Deploy Type | Description | +| | | | ++==================+===================+=========================================================+ +| **PXE/ADMIN** | baremetal only | Used for booting the nodes via PXE | ++------------------+-------------------+---------------------------------------------------------+ +| **MCPCONTROL** | baremetal & | Used to provision the infrastructure VMs (Salt & MaaS). | +| | virtual | On virtual deploys, it is used for Admin too (on target | +| | | VMs) leaving the PXE/Admin bridge unused | ++------------------+-------------------+---------------------------------------------------------+ +| **Mgmt** | baremetal & | Used for internal communication between | +| | virtual | OpenStack components | ++------------------+-------------------+---------------------------------------------------------+ +| **Internal** | baremetal & | Used for VM data communication within the | +| | virtual | cloud deployment | ++------------------+-------------------+---------------------------------------------------------+ +| **Public** | baremetal & | Used to provide Virtual IPs for public endpoints | +| | virtual | that are used to connect to OpenStack services APIs. | +| | | Used by Virtual machines to access the Internet | ++------------------+-------------------+---------------------------------------------------------+ + + +These networks - except mcpcontrol - can be linux bridges configured before the deploy on the +Jumpserver. If they don't exists at deploy time, they will be created by the scripts as virsh +networks. + +Mcpcontrol exists only on the Jumpserver and needs to be virtual because a DHCP server runs +on this network and associates static host entry IPs for Salt and Maas VMs. + + + +=================== +Accessing the Cloud +=================== + +Access to any component of the deployed cloud is done from Jumpserver to user *ubuntu* with +ssh key */var/lib/opnfv/mcp.rsa*. The example below is a connection to Salt master. + + .. code-block:: bash + + $ ssh -o StrictHostKeyChecking=no -i /var/lib/opnfv/mcp.rsa -l ubuntu 10.20.0.2 + +**Note**: The Salt master IP is not hard set, it is configurable via INSTALLER_IP during deployment + + +The Fuel baremetal deploy has a Virtualized Control Plane (VCP) which means that the controller +services are installed in VMs on the baremetal targets (kvm servers). These VMs can also be +accessed with virsh console: user *opnfv*, password *opnfv_secret*. This method does not apply +to infrastructure VMs (Salt master and MaaS). + +The example below is a connection to a controller VM. The connection is made from the baremetal +server kvm01. + + .. code-block:: bash + + $ ssh -o StrictHostKeyChecking=no -i /var/lib/opnfv/mcp.rsa -l ubuntu x.y.z.141 + ubuntu@kvm01:~$ virsh console ctl01 + +User *ubuntu* has sudo rights. User *opnfv* has sudo rights only on aarch64 deploys. + + +============================= +Exploring the Cloud with Salt +============================= + +To gather information about the cloud, the salt commands can be used. It is based +around a master-minion idea where the salt-master pushes config to the minions to +execute actions. + +For example tell salt to execute a ping to 8.8.8.8 on all the nodes. + +.. figure:: img/saltstack.png + +Complex filters can be done to the target like compound queries or node roles. +For more information about Salt see the :ref:`references` section. + +Some examples are listed below. Note that these commands are issued from Salt master +with *root* user. + + +#. View the IPs of all the components + + .. code-block:: bash + + root@cfg01:~$ salt "*" network.ip_addrs + cfg01.baremetal-mcp-ocata-odl-ha.local: + - 10.20.0.2 + - 172.16.10.100 + mas01.baremetal-mcp-ocata-odl-ha.local: + - 10.20.0.3 + - 172.16.10.3 + - 192.168.11.3 + ......................... + + +#. View the interfaces of all the components and put the output in a file with yaml format + + .. code-block:: bash + + root@cfg01:~$ salt "*" network.interfaces --out yaml --output-file interfaces.yaml + root@cfg01:~# cat interfaces.yaml + cfg01.baremetal-mcp-ocata-odl-ha.local: + enp1s0: + hwaddr: 52:54:00:72:77:12 + inet: + - address: 10.20.0.2 + broadcast: 10.20.0.255 + label: enp1s0 + netmask: 255.255.255.0 + inet6: + - address: fe80::5054:ff:fe72:7712 + prefixlen: '64' + scope: link + up: true + ......................... + + +#. View installed packages in MaaS node + + .. code-block:: bash + + root@cfg01:~# salt "mas*" pkg.list_pkgs + mas01.baremetal-mcp-ocata-odl-ha.local: + ---------- + accountsservice: + 0.6.40-2ubuntu11.3 + acl: + 2.2.52-3 + acpid: + 1:2.0.26-1ubuntu2 + adduser: + 3.113+nmu3ubuntu4 + anerd: + 1 + ......................... + + +#. Execute any linux command on all nodes (list the content of */var/log* in this example) + + .. code-block:: bash + + root@cfg01:~# salt "*" cmd.run 'ls /var/log' + cfg01.baremetal-mcp-ocata-odl-ha.local: + alternatives.log + apt + auth.log + boot.log + btmp + cloud-init-output.log + cloud-init.log + ......................... + + +#. Execute any linux command on nodes using compound queries filter + + .. code-block:: bash + + root@cfg01:~# salt -C '* and cfg01*' cmd.run 'ls /var/log' + cfg01.baremetal-mcp-ocata-odl-ha.local: + alternatives.log + apt + auth.log + boot.log + btmp + cloud-init-output.log + cloud-init.log + ......................... + + +#. Execute any linux command on nodes using role filter + + .. code-block:: bash + + root@cfg01:~# salt -I 'nova:compute' cmd.run 'ls /var/log' + cmp001.baremetal-mcp-ocata-odl-ha.local: + alternatives.log + apache2 + apt + auth.log + btmp + ceilometer + cinder + cloud-init-output.log + cloud-init.log + ......................... + + + +=================== +Accessing Openstack +=================== + +Once the deployment is complete, Openstack CLI is accessible from controller VMs (ctl01..03). +Openstack credentials are at */root/keystonercv3*. + + .. code-block:: bash + + root@ctl01:~# source keystonercv3 + root@ctl01:~# openstack image list + +--------------------------------------+-----------------------------------------------+--------+ + | ID | Name | Status | + +======================================+===============================================+========+ + | 152930bf-5fd5-49c2-b3a1-cae14973f35f | CirrosImage | active | + | 7b99a779-78e4-45f3-9905-64ae453e3dcb | Ubuntu16.04 | active | + +--------------------------------------+-----------------------------------------------+--------+ + + +The OpenStack Dashboard, Horizon is available at http://<controller VIP>:8078, e.g. http://10.16.0.101:8078. +The administrator credentials are *admin*/*opnfv_secret*. + +.. figure:: img/horizon_login.png + + +A full list of IPs/services is available at <proxy public VIP>:8090 for baremetal deploys. + +.. figure:: img/salt_services_ip.png + +For Virtual deploys, the most commonly used IPs are in the table below. + ++-----------+--------------+---------------+ +| Component | IP | Default value | ++===========+==============+===============+ +| gtw01 | x.y.z.110 | 172.16.10.110 | ++-----------+--------------+---------------+ +| ctl01 | x.y.z.100 | 172.16.10.100 | ++-----------+--------------+---------------+ +| cmp001 | x.y.z.105 | 172.16.10.105 | ++-----------+--------------+---------------+ +| cmp002 | x.y.z.106 | 172.16.10.106 | ++-----------+--------------+---------------+ + + +.. _references: + +========== +References +========== + +1) `Installation instructions <http://docs.opnfv.org/en/stable-euphrates/submodules/fuel/docs/release/installation/installation.instruction.html>`_ +2) `Saltstack Documentation <https://docs.saltstack.com/en/latest/topics>`_ +3) `Saltstack Formulas <http://salt-formulas.readthedocs.io/en/latest/develop/overview-reclass.html>`_ + + diff --git a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml index 0d47682c1..1766f9700 100644 --- a/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml +++ b/mcp/config/scenario/baremetal/os-nosdn-ovs-ha.yaml @@ -13,7 +13,6 @@ cluster: - virtual_control_plane - dpdk - openstack_ha - - neutron_compute - networks virtual: nodes: diff --git a/mcp/config/states/maas b/mcp/config/states/maas index eea3e0ef6..7ccf0188e 100755 --- a/mcp/config/states/maas +++ b/mcp/config/states/maas @@ -8,6 +8,7 @@ ############################################################################## CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x +ERASE_ENV=${ERASE_ENV:-0} # shellcheck disable=SC1090 source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh" @@ -52,6 +53,17 @@ function maas_fixup() { return 0 } +# Optionally destroy MaaS machines from a previous run +if [ "${ERASE_ENV}" -gt 1 ]; then + dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \ + grep -Pzo '\s+system_id: \K.+\n') + for node_system_id in ${dnodes}; do + salt -C 'mas01*' state.apply maas.machines.delete \ + pillar="{'system_id': '${node_system_id}'}" + sleep 30 + done +fi + # MaaS rack/region controller, node commissioning salt -C 'mas01*' cmd.run "add-apt-repository ppa:maas/stable" diff --git a/mcp/config/states/neutron_compute b/mcp/config/states/neutron_compute index f1285479e..c6b90e20d 100755 --- a/mcp/config/states/neutron_compute +++ b/mcp/config/states/neutron_compute @@ -9,6 +9,4 @@ CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x -salt -I 'neutron:compute' state.sls neutron || true -salt -I 'neutron:compute' file.append /etc/sudoers.d/neutron_sudoers \ - args='neutron ALL = (root) NOPASSWD: /usr/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' +salt -I 'neutron:compute' state.sls neutron diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane index cfd5e421c..c355126f7 100755 --- a/mcp/config/states/virtual_control_plane +++ b/mcp/config/states/virtual_control_plane @@ -8,10 +8,25 @@ ############################################################################## CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x +ERASE_ENV=${ERASE_ENV:-0} # shellcheck disable=SC1090 source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh" +# Optionally destroy VCP VMs from a previous run +if [ "${ERASE_ENV}" -eq 1 ]; then + kvm_vms=$(salt --out yaml 'kvm*' virt.list_domains | \ + sed -e 's/- //g' -e 's/:.*$//g') + for line in ${kvm_vms}; do + if [[ "${line}" =~ ^kvm ]]; then + kvm_node=${line} + elif [ -n "${kvm_node}" ]; then + salt "${kvm_node}" virt.purge dirs=True "${line}" || true + fi + done +fi + +# KVM, compute node prereqs (libvirt first), VCP deployment # patch the networking module for Debian based distros debian_ip_source=/usr/lib/python2.7/dist-packages/salt/modules/debian_ip.py salt -C 'kvm* or cmp*' file.line $debian_ip_source \ diff --git a/mcp/patches/0001-opendaylight-formula-neutron.patch b/mcp/patches/0001-opendaylight-formula-neutron.patch index 157271aa3..8d02cd992 100644 --- a/mcp/patches/0001-opendaylight-formula-neutron.patch +++ b/mcp/patches/0001-opendaylight-formula-neutron.patch @@ -60,7 +60,7 @@ index b61e313..02da3b1 100644 +password = {{ server.backend.password }} +{%- endif %} diff --git a/neutron/files/ocata/neutron-generic.conf.Debian b/neutron/files/ocata/neutron-generic.conf.Debian -index 123386d..85b8076 100644 +index 123386d..d77f6c8 100644 --- a/neutron/files/ocata/neutron-generic.conf.Debian +++ b/neutron/files/ocata/neutron-generic.conf.Debian @@ -37,7 +37,7 @@ auth_strategy = keystone @@ -72,6 +72,15 @@ index 123386d..85b8076 100644 {% endif %} +@@ -668,7 +668,7 @@ root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + # needs to execute commands in Dom0 in the hypervisor of XenServer, this item + # should be set to 'xenapi_root_helper', so that it will keep a XenAPI session + # to pass commands to Dom0. (string value) +-root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf ++#root_helper_daemon = <None> + + # Seconds between nodes reporting state to server; should be less than + # agent_down_time, best if it is half or less than agent_down_time. (floating @@ -2092,3 +2092,8 @@ heartbeat_rate = 2 # Sets the list of available ciphers. value should be a string in the OpenSSL # cipher list format. (string value) @@ -82,7 +91,7 @@ index 123386d..85b8076 100644 +ovsdb_connection = {{ neutron.backend.ovsdb_connection }} +{%- endif %} diff --git a/neutron/files/ocata/neutron-server.conf.Debian b/neutron/files/ocata/neutron-server.conf.Debian -index 79376a2..c9630b8 100644 +index 79376a2..a7a4645 100644 --- a/neutron/files/ocata/neutron-server.conf.Debian +++ b/neutron/files/ocata/neutron-server.conf.Debian @@ -50,7 +50,7 @@ core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin @@ -94,6 +103,15 @@ index 79376a2..c9630b8 100644 {%- if server.lbaas is defined -%},lbaasv2{%- endif -%} {%- if fwaas.get('enabled', False) -%},{{ fwaas[fwaas.api_version]['service_plugin'] }}{%- endif -%} {%- if server.get('qos', 'True') -%},neutron.services.qos.qos_plugin.QoSPlugin{%- endif -%} +@@ -703,7 +703,7 @@ root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf + # needs to execute commands in Dom0 in the hypervisor of XenServer, this item + # should be set to 'xenapi_root_helper', so that it will keep a XenAPI session + # to pass commands to Dom0. (string value) +-root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf ++#root_helper_daemon = <None> + + # Seconds between nodes reporting state to server; should be less than + # agent_down_time, best if it is half or less than agent_down_time. (floating @@ -2245,3 +2245,8 @@ username = {{ server.identity.user }} password = {{ server.identity.password }} auth_url=http://{{ server.identity.host }}:35357 diff --git a/mcp/patches/patch.sh b/mcp/patches/patch.sh index ca5c14111..1da3bc597 100755 --- a/mcp/patches/patch.sh +++ b/mcp/patches/patch.sh @@ -14,7 +14,7 @@ if [ -r "$1" ]; then if [[ ! "${p_dest}" =~ '^#' ]] && [[ "${p_dest}" =~ $2 ]] && \ ! patch --dry-run -Rd "${p_dest}" -r - -s -p1 < \ "/root/fuel/mcp/patches/${p_file}" > /dev/null; then - patch -fd "${p_dest}" -p1 < "/root/fuel/mcp/patches/${p_file}" + patch -d "${p_dest}" -p1 < "/root/fuel/mcp/patches/${p_file}" fi done < "$1" fi diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-common/openstack_compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-common/openstack_compute.yml index a78850fc0..7163a8108 100644 --- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-common/openstack_compute.yml +++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-common/openstack_compute.yml @@ -7,7 +7,6 @@ ############################################################################## --- classes: - - system.linux.system.repo.mcp.openstack - system.linux.system.repo.mcp.extra - system.linux.storage.loopback - system.glusterfs.client.cluster diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-odl-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-odl-ha/openstack/compute.yml index fb78d3467..4418b0fde 100644 --- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-odl-ha/openstack/compute.yml +++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-odl-ha/openstack/compute.yml @@ -7,6 +7,7 @@ ############################################################################## --- classes: + - system.linux.system.repo.mcp.openstack - cluster.baremetal-mcp-ocata-common.openstack_compute - cluster.baremetal-mcp-ocata-odl-ha.openstack.compute_init - cluster.baremetal-mcp-ocata-odl-ha.infra diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-dpdk-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-dpdk-ha/openstack/compute.yml index aa6b4e34f..e610dc930 100644 --- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-dpdk-ha/openstack/compute.yml +++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-dpdk-ha/openstack/compute.yml @@ -7,6 +7,7 @@ ############################################################################## --- classes: + - system.linux.system.repo.mcp.openstack - cluster.baremetal-mcp-ocata-common.openstack_compute - cluster.baremetal-mcp-ocata-ovs-dpdk-ha.infra - system.neutron.compute.nfv.dpdk diff --git a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml index 128abb0e5..9ed3a80ea 100644 --- a/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml +++ b/mcp/reclass/classes/cluster/baremetal-mcp-ocata-ovs-ha/openstack/compute.yml @@ -10,3 +10,16 @@ classes: - cluster.baremetal-mcp-ocata-common.openstack_compute - cluster.baremetal-mcp-ocata-ovs-ha.openstack.compute_init - cluster.baremetal-mcp-ocata-ovs-ha.infra +parameters: + nova: + compute: + libvirt_service: libvirtd + libvirt_bin: /etc/default/libvirtd + linux: + system: + repo: + uca: + source: "deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial-updates/ocata main" + architectures: amd64 + key_id: EC4926EA + key_server: keyserver.ubuntu.com diff --git a/mcp/reclass/classes/system b/mcp/reclass/classes/system -Subproject 4b335b98fae73d59af0eb4bf81e33369d55a94c +Subproject 4edd3b5eec839a06a3369c8c0d47bbb79681e17 diff --git a/mcp/scripts/salt.sh b/mcp/scripts/salt.sh index a03d25fc4..ab096f3ee 100755 --- a/mcp/scripts/salt.sh +++ b/mcp/scripts/salt.sh @@ -12,19 +12,27 @@ CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x F_GIT_ROOT=$(git rev-parse --show-toplevel) -OPNFV_TMP_DIR="/home/${SALT_MASTER_USER}/fuel" +F_GIT_DIR=$(cd "${F_GIT_ROOT}/mcp" && git rev-parse --git-dir) +F_GIT_SUBD=${F_GIT_ROOT#${F_GIT_DIR%%/.git*}} +OPNFV_TMP_DIR="/home/${SALT_MASTER_USER}/opnfv" +OPNFV_GIT_DIR="/root/opnfv" OPNFV_FUEL_DIR="/root/fuel" OPNFV_RDIR="reclass/classes/cluster/all-mcp-ocata-common" +LOCAL_GIT_DIR="${F_GIT_ROOT%${F_GIT_SUBD}}" LOCAL_PDF_RECLASS=$1 +NODE_MASK='*' -# push to cfg01 current git repo first (including submodules), at ~ubuntu/fuel -# later we move it to ~root/fuel and delete the temporary clone +[[ "${CLUSTER_DOMAIN}" =~ virtual ]] || NODE_MASK='mas01*' + +# push to cfg01 current git repo first (including submodules), at ~ubuntu/opnfv +# later we move it to ~root/opnfv (and ln as ~root/fuel); delete the temp clone +remote_tmp="${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")" rsync -Erl --delete -e "ssh ${SSH_OPTS}" \ --exclude-from="${F_GIT_ROOT}/.gitignore" \ - "${F_GIT_ROOT}/" "${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")/" + "${LOCAL_GIT_DIR}/" "${remote_tmp}/" if [ -n "${LOCAL_PDF_RECLASS}" ] && [ -f "${LOCAL_PDF_RECLASS}" ]; then rsync -e "ssh ${SSH_OPTS}" "${LOCAL_PDF_RECLASS}" \ - "${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")/mcp/${OPNFV_RDIR}/opnfv/" + "${remote_tmp}${F_GIT_SUBD}/mcp/${OPNFV_RDIR}/opnfv/" fi # ssh to cfg01 @@ -32,14 +40,19 @@ fi ssh ${SSH_OPTS} "${SSH_SALT}" bash -s -e << SALT_INSTALL_END sudo -i set -e + export CI_DEBUG=${CI_DEBUG}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x echo -n 'Checking out cloud-init has finished running ...' while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo -n '.'; sleep 1; done echo ' done' mkdir -p /srv/salt /usr/share/salt-formulas/reclass - rm -rf ${OPNFV_FUEL_DIR} - mv ${OPNFV_TMP_DIR} ${OPNFV_FUEL_DIR} && chown -R root.root ${OPNFV_FUEL_DIR} + rm -rf ${OPNFV_GIT_DIR} + mv ${OPNFV_TMP_DIR} ${OPNFV_GIT_DIR} && chown -R root.root ${OPNFV_GIT_DIR} + find ${OPNFV_GIT_DIR} -name '.git' -type f | while read f_git; do + sed -i 's@${LOCAL_GIT_DIR}@${OPNFV_GIT_DIR}@g' \$f_git + done + ln -sf ${OPNFV_GIT_DIR}${F_GIT_SUBD} ${OPNFV_FUEL_DIR} ln -sf ${OPNFV_FUEL_DIR}/mcp/reclass /srv/salt ln -sf ${OPNFV_FUEL_DIR}/mcp/deploy/scripts /srv/salt cd /srv/salt/${OPNFV_RDIR} && rm -f arch && ln -sf "\$(uname -i)" arch @@ -50,6 +63,7 @@ ssh ${SSH_OPTS} "${SSH_SALT}" bash -s -e << SALT_INSTALL_END cd /srv/salt/scripts export DEBIAN_FRONTEND=noninteractive + OLD_DOMAIN=\$(grep -Pzo "id: cfg01\.\K(\S*)" /etc/salt/minion.d/minion.conf) || true BOOTSTRAP_SALTSTACK_OPTS=" -r -dX stable 2016.11 " \ MASTER_HOSTNAME=cfg01.${CLUSTER_DOMAIN} DISTRIB_REVISION=nightly \ EXTRA_FORMULAS="nfs" \ @@ -61,12 +75,24 @@ ssh ${SSH_OPTS} "${SSH_SALT}" bash -s -e << SALT_INSTALL_END cd ${OPNFV_FUEL_DIR}/mcp/patches && ./patch.sh patches.list reclass salt-call state.apply salt - salt '*' saltutil.sync_all - salt '*' state.apply salt | grep -Fq 'No response' && salt '*' state.apply salt + + # In case scenario changed (and implicitly domain name), re-register minions + if [ -n "\${OLD_DOMAIN}" ] && [ "\${OLD_DOMAIN}" != "${CLUSTER_DOMAIN}" ]; then + salt "*.\${OLD_DOMAIN}" cmd.run "grep \${OLD_DOMAIN} -Rl /etc/salt | \ + xargs --no-run-if-empty sed -i 's/\${OLD_DOMAIN}/${CLUSTER_DOMAIN}/g'; \ + service salt-minion restart" || true + salt-key -yd "*.\${OLD_DOMAIN}" + salt-key -Ay + fi + + # Init specific to VMs on FN (all for virtual, cfg|mas for baremetal) + salt -C "${NODE_MASK} or cfg01*" saltutil.sync_all + salt -C "${NODE_MASK} or cfg01*" state.apply salt | \ + grep -Fq 'No response' && salt -C "${NODE_MASK} or cfg01*" state.apply salt salt -C 'I@salt:master' state.sls linux - salt -C '* and not cfg01*' state.sls linux || true - salt -C '* and not cfg01*' pkg.upgrade refresh=False + salt -C "${NODE_MASK} and not cfg01*" state.sls linux || true + salt -C "${NODE_MASK} and not cfg01*" pkg.upgrade refresh=False - salt '*' state.sls ntp + salt -C "${NODE_MASK} or cfg01*" state.sls ntp SALT_INSTALL_END |