diff options
-rw-r--r-- | build/bash_completion_apex | 56 | ||||
-rw-r--r-- | build/rpm_specs/opnfv-apex-common.spec | 10 | ||||
-rw-r--r-- | ci/PR_revision.log | 4 | ||||
-rwxr-xr-x | ci/deploy.sh | 19 | ||||
-rwxr-xr-x | ci/util.sh | 18 | ||||
-rw-r--r-- | config/deploy/deploy_settings.yaml | 70 | ||||
-rw-r--r-- | docs/installationprocedure/architecture.rst | 58 | ||||
-rw-r--r-- | docs/installationprocedure/baremetal.rst | 3 | ||||
-rw-r--r-- | docs/installationprocedure/references.rst | 2 | ||||
-rw-r--r-- | docs/releasenotes/release-notes.rst | 219 | ||||
-rwxr-xr-x | lib/overcloud-deploy-functions.sh | 16 |
11 files changed, 390 insertions, 85 deletions
diff --git a/build/bash_completion_apex b/build/bash_completion_apex new file mode 100644 index 00000000..b3c963e3 --- /dev/null +++ b/build/bash_completion_apex @@ -0,0 +1,56 @@ +# bash/zsh completion support for OPNFV Apex +############################################################################## +# Copyright (c) 2016 Dan Radez (Red Hat) and others. +# +# All rights reserved. This program and the accompanying materials +# are made available under the terms of the Apache License, Version 2.0 +# which accompanies this distribution, and is available at +# http://www.apache.org/licenses/LICENSE-2.0 +############################################################################## + +# Pieces of this script are derived from the git bash completion script + +___main () { + local cur prev opts + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + opts=" -h $(${COMP_WORDS[0]} -h | grep -Eo '^ [^ ]+')" + if [[ ! $opts =~ $prev ]]; then + COMPREPLY=($(compgen -W "${opts}" -- ${cur})) + fi +} + +# these functions are setup like this in the thought that +# deploy and util will eventually diverge from each other +# for now they can use the same main logic so it's just +# abstracted to another function +__deploy_main () { + ___main +} + + +__util_main () { + ___main +} + + +__apex_func_wrap () { + local cur words cword prev + _get_comp_words_by_ref -n =: cur words cword prev + $1 +} + +# Setup function for bash completion +__apex_complete () { + local wrapper="__apex_wrap${2}" + eval "$wrapper () { __apex_func_wrap $2 ; }" + complete -o bashdefault -o default -o nospace -F $wrapper $1 2>/dev/null \ + || complete -o default -o nospace -F $wrapper $1 +} + +# run completion setup +__apex_complete ./deploy.sh __deploy_main +__apex_complete opnfv-deploy __deploy_main +__apex_complete ./util.sh __util_main +__apex_complete opnfv-util __util_main diff --git a/build/rpm_specs/opnfv-apex-common.spec b/build/rpm_specs/opnfv-apex-common.spec index 8fd241b4..6ad5782f 100644 --- a/build/rpm_specs/opnfv-apex-common.spec +++ b/build/rpm_specs/opnfv-apex-common.spec @@ -23,7 +23,7 @@ https://wiki.opnfv.org/apex %build rst2html docs/installationprocedure/index.rst docs/installation-instructions.html -rst2html docs/release-notes/release-notes.rst docs/release-notes.html +rst2html docs/releasenotes/release-notes.rst docs/release-notes.html %global __python %{__python3} @@ -33,6 +33,9 @@ install ci/deploy.sh %{buildroot}%{_bindir}/opnfv-deploy install ci/clean.sh %{buildroot}%{_bindir}/opnfv-clean install ci/util.sh %{buildroot}%{_bindir}/opnfv-util +mkdir -p %{buildroot}%{_sysconfdir}/bash_completion.d/ +install build/bash_completion_apex %{buildroot}%{_sysconfdir}/bash_completion.d/apex + mkdir -p %{buildroot}%{_sysconfdir}/opnfv-apex/ install config/deploy/os-nosdn-nofeature-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml install config/deploy/os-nosdn-fdio-noha.yaml %{buildroot}%{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml @@ -81,7 +84,7 @@ install lib/installer/domain.xml %{buildroot}%{_var}/opt/opnfv/lib/installer/ mkdir -p %{buildroot}%{_docdir}/opnfv/ install LICENSE.rst %{buildroot}%{_docdir}/opnfv/ install docs/installation-instructions.html %{buildroot}%{_docdir}/opnfv/ -install docs/release-notes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst +install docs/releasenotes/index.rst %{buildroot}%{_docdir}/opnfv/release-notes.rst install docs/release-notes.html %{buildroot}%{_docdir}/opnfv/ install config/deploy/deploy_settings.yaml %{buildroot}%{_docdir}/opnfv/deploy_settings.yaml.example install config/network/network_settings.yaml %{buildroot}%{_docdir}/opnfv/network_settings.yaml.example @@ -105,6 +108,7 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %{python3_sitelib}/apex/ %{_var}/opt/opnfv/lib/installer/onos/onos_gw_mac_update.sh %{_var}/opt/opnfv/lib/installer/domain.xml +%{_sysconfdir}/bash_completion.d/apex %{_sysconfdir}/opnfv-apex/os-nosdn-nofeature-noha.yaml %{_sysconfdir}/opnfv-apex/os-nosdn-fdio-noha.yaml %{_sysconfdir}/opnfv-apex/os-nosdn-ovs-noha.yaml @@ -132,6 +136,8 @@ install config/inventory/pod_example_settings.yaml %{buildroot}%{_docdir}/opnfv/ %doc %{_docdir}/opnfv/inventory.yaml.example %changelog +* Fri Sep 16 2016 Dan Radez <dradez@redhat.com> - 3.0-13 +- adding bash completion script * Tue Aug 30 2016 Tim Rozet <trozet@redhat.com> - 3.0-12 - Add clean library. * Mon Aug 1 2016 Tim Rozet <trozet@redhat.com> - 3.0-11 diff --git a/ci/PR_revision.log b/ci/PR_revision.log index 15787b95..f8b11a71 100644 --- a/ci/PR_revision.log +++ b/ci/PR_revision.log @@ -38,4 +38,6 @@ 76,Add networking-vpp ML2 mechanism driver 77,Update FDIO to use opendaylight_v2 mechanism driver 78,Fix spelling mistake in specs filter -79,Fix controller and compute ip array
\ No newline at end of file +79,Fix controller and compute ip array +80,Change TenantNIC and PublicNIC to be role specific +81,Fix duplicate NeutronServicePlugins diff --git a/ci/deploy.sh b/ci/deploy.sh index b3a518fc..cd90511c 100755 --- a/ci/deploy.sh +++ b/ci/deploy.sh @@ -78,11 +78,11 @@ done display_usage() { echo -e "Usage:\n$0 [arguments] \n" - echo -e " -d|--deploy-settings : Full path to deploy settings yaml file. Optional. Defaults to null" - echo -e " -i|--inventory : Full path to inventory yaml file. Required only for baremetal" - echo -e " -n|--net-settings : Full path to network settings file. Optional." - echo -e " -p|--ping-site : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8" - echo -e " -v|--virtual : Virtualize overcloud nodes instead of using baremetal." + echo -e " --deploy-settings | -d : Full path to deploy settings yaml file. Optional. Defaults to null" + echo -e " --inventory | -i : Full path to inventory yaml file. Required only for baremetal" + echo -e " --net-settings | -n : Full path to network settings file. Optional." + echo -e " --ping-site | -p : site to use to verify IP connectivity. Optional. Defaults to 8.8.8.8" + echo -e " --virtual | -v : Virtualize overcloud nodes instead of using baremetal." echo -e " --flat : disable Network Isolation and use a single flat network for the underlay network." echo -e " --no-post-config : disable Post Install configuration." echo -e " --debug : enable debug output." @@ -97,7 +97,6 @@ display_usage() { parse_cmdline() { echo -e "\n\n${blue}This script is used to deploy the Apex Installer and Provision OPNFV Target System${reset}\n\n" echo "Use -h to display help" - sleep 2 while [ "${1:0:1}" = "-" ] do @@ -172,6 +171,7 @@ parse_cmdline() { ;; esac done + sleep 2 if [[ ! -z "$NETSETS" && "$net_isolation_enabled" == "FALSE" ]]; then echo -e "${red}INFO: Single flat network requested. Only admin_network settings will be used!${reset}" @@ -226,11 +226,10 @@ main() { exit 1 fi #Correct the time on the server prior to launching any VMs - ntpdate $ntp_server - if [ $? == 0 ]; then + if ntpdate $ntp_server; then hwclock --systohc - else - echo -e "${red} ERROR: ntpdate failed to update the time on the server. ${reset}" + else + echo "${blue}WARNING: ntpdate failed to update the time on the server. ${reset}" fi setup_undercloud_vm if [ "$virtual" == "TRUE" ]; then @@ -23,19 +23,19 @@ resolve_cmd() { display_usage() { echo -e "Usage:\n$0 subcommand [ arguments ]\n" echo -e "Arguments:\n" - echo -e " undercloud [ user [ command ] ] Connect to Undercloud VM as user and optionally execute a command\n" - echo -e " user Optional: Defaults to 'stack'\n" - echo -e " command Optional: Defaults to none\n" + echo -e " undercloud [ user [ command ] ] Connect to Undercloud VM as user and optionally execute a command" + echo -e " user Optional: Defaults to 'stack'" + echo -e " command Optional: Defaults to none" echo -e "" - echo -e " opendaylight Connect to OpenDaylight Karaf console\n" + echo -e " opendaylight Connect to OpenDaylight Karaf console" echo -e "" - echo -e " overcloud [ node [ command ] ] Connect to an Overcloud node and optionally execute a command\n" - echo -e " node Required: in format controller|compute<number>. Example: controller0\n" - echo -e " command Optional: Defaults to none\n" + echo -e " overcloud [ node [ command ] ] Connect to an Overcloud node and optionally execute a command" + echo -e " node Required: in format controller|compute<number>. Example: controller0" + echo -e " command Optional: Defaults to none" echo -e "" - echo -e " debug-stack Print parsed deployment failures to stdout \n" + echo -e " debug-stack Print parsed deployment failures to stdout" echo -e "" - echo -e " mock-detached on | off Add firewall rules to the jump host to mock a detached deployment \n" + echo -e " mock-detached on | off Add firewall rules to the jump host to mock a detached deployment\n" } ##translates the command line argument diff --git a/config/deploy/deploy_settings.yaml b/config/deploy/deploy_settings.yaml index 82cc0811..e7821f18 100644 --- a/config/deploy/deploy_settings.yaml +++ b/config/deploy/deploy_settings.yaml @@ -1,10 +1,80 @@ +# The only global parameter at this time is ha_enabled, which will use +# the tripleo ha architecture described here: +# https://github.com/beekhof/osp-ha-deploy/blob/master/HA-keepalived.md +# with 3 controllers by default +# +# If ha_enabled is false, there will only be one controller. global_params: ha_enabled: true deploy_options: + # Which SDN controller to use. Valid options are 'opendaylight', 'onos', + # 'opendaylight-external', 'opencontrail' or false. A value of false will + # use Neutron's OVS ML2 controller. sdn_controller: opendaylight + + # Which version of ODL to use. This is only valid if 'opendaylight' was used + # above. If 'Boron' is specified, ODL Boron will be used. If no value is specified, + # Lithium will be used. + #odl_version: Boron + + # Whether to configure ODL L3 support. This will disable the Neutron L3 Agent and + # use ODL instead. sdn_l3: false + + # Whether to install and configure Tacker (VNF Manager) tacker: true + + # Whether to configure Congress (policy as a service) datasources + # Note: Congress is already installed by default congress: false + + # Whether to configure ODL or ONOS with Service Function Chaining support. This + # requires the opnfv-apex-opendaylight-sfc package to be installed, since it + # uses a different overcloud image. sfc: false + + # Whether to configure ODL with SDNVPN support. vpn: false + + # Which dataplane to use for overcloud tenant networks. Valid options are + # 'ovs', 'ovs_dpdk' and 'fdio'. + dataplane : ovs + + # Whether to install and configure fdio functionality in the overcloud + # The dataplane should be specified as fdio if this is set to true + vpp: false + + # Whether to run vsperf after the install has completed + #vsperf: false + + # Set performance options on specific roles. The valid roles are 'Compute', 'Controller' + # and 'Storage', and the valid sections are 'kernel' and 'nova' + #performance: + # Controller: + # kernel: + # # In this example, these three settings will be passed to the kernel boot line. + # # Any key/value pair can be entered here, so care should be taken to ensure that machines + # # do not fail to boot. + # # + # # isolcpus is generally used to push host processes off a particular core, + # # so that it can be dedicated to a specific process. On control nodes + # # this could be an ovs_dpdk process. + # isolcpus: 1 + # # Hugepages are required for ovs_dpdk support. + # hugepage: 2M + # # intel_iommu is also required for ovs_dpdk support. + # intel_iommu: 'on' + # Compute: + # nova: + # # This is currently the only available option in the nova section. It will + # # add the provided string to vcpu_pin_set in nova.conf. This is used to pin + # # guest VMs to a set of CPU cores, and is decsribed in more detail here: + # # http://docs.openstack.org/mitaka/config-reference/compute/config-options.html + # libvirtpin: 1 + # kernel: + # # On compute nodes, isolcpus is usually used to reserve cores for use either by VMs + # # or ovs_dpdk + # isolcpus: 0 + # hugepage: 2M + # intel_iommu: 'on' diff --git a/docs/installationprocedure/architecture.rst b/docs/installationprocedure/architecture.rst index c63da27c..f1861d0a 100644 --- a/docs/installationprocedure/architecture.rst +++ b/docs/installationprocedure/architecture.rst @@ -23,6 +23,64 @@ The overcloud is OPNFV. Configuration will be passed into undercloud and the undercloud will use OpenStack's orchestration component, named Heat, to execute a deployment that will provision the target OPNFV nodes. +Apex High Availability Architecture +=================================== + +Undercloud +---------- + +The undercloud is not Highly Available. End users do not depend on the +underloud. It is only for management purposes. + +Overcloud +--------- + +Apex will deploy three control nodes in an HA deployment. Each of these nodes +will run the following services: + +- Stateless OpenStack services +- MariaDB / Galera +- RabbitMQ +- OpenDaylight +- HA Proxy +- Pacemaker & VIPs + +Stateless OpenStack services + All running statesless OpenStack services are load balanced by HA Proxy. + Pacemaker monitors the services and ensures that they are running. + +Stateful OpenStack services + All running stateful OpenStack services are load balanced by HA Proxy. + They are monitored by pacemaker in an active/passive failover configuration. + +MariaDB / Galera + The MariaDB database is replicated across the control nodes using Galera. + Pacemaker is responsible for a proper start up of the Galera cluster. HA + Proxy provides and active/passive failover methodology to connections to the + database. + +RabbitMQ + The message bus is managed by Pacemaker to ensure proper start up and + establishment of clustering across cluster members. + +OpenDaylight + OpenDaylight is currently installed on all three control nodes but only + started on the first control node. OpenDaylight's HA capabilities are not yet + mature enough to be enabled. + +HA Proxy + HA Proxy is monitored by Pacemaker to ensure it is running across all nodes + and available to balance connections. + +Pacemaker & VIPs + Pacemaker has relationships and restraints setup to ensure proper service + start up order and Virtual IPs associated with specific services are running + on the proper host. + +VM Migration is configured and VMs can be evacuated as needed or as invoked +by tools such as heat as part of a monitored stack deployment in the overcloud. + + OPNFV Scenario Architecture =========================== diff --git a/docs/installationprocedure/baremetal.rst b/docs/installationprocedure/baremetal.rst index eb48a289..d41c77e0 100644 --- a/docs/installationprocedure/baremetal.rst +++ b/docs/installationprocedure/baremetal.rst @@ -205,7 +205,8 @@ Edit the 2 settings files in /etc/opnfv-apex/. These files have comments to help you customize them. 1. deploy_settings.yaml - This file includes basic configuration options deployment. + This file includes basic configuration options deployment, and also documents + all available options. Alternatively, there are pre-built deploy_settings files available in (``/etc/opnfv-apex/``). These files are named with the naming convention os-sdn_controller-enabled_feature-[no]ha.yaml. These files can be used in diff --git a/docs/installationprocedure/references.rst b/docs/installationprocedure/references.rst index f1eac0aa..a63a8421 100644 --- a/docs/installationprocedure/references.rst +++ b/docs/installationprocedure/references.rst @@ -18,7 +18,7 @@ OPNFV `OPNFV Apex project page <https://wiki.opnfv.org/apex>`_ -`OPNFV Apex release notes <http://artifacts.opnfv.org/apex/colorado/docs/release-notes/release-notes.html#references>`_ +`OPNFV Apex release notes <http://artifacts.opnfv.org/apex/colorado/docs/releasenotes/release-notes.html#references>`_ OpenStack --------- diff --git a/docs/releasenotes/release-notes.rst b/docs/releasenotes/release-notes.rst index 4a262ed3..96d654a8 100644 --- a/docs/releasenotes/release-notes.rst +++ b/docs/releasenotes/release-notes.rst @@ -20,7 +20,7 @@ All Apex and "common" entities are protected by the Apache License ( http://www.apache.org/licenses/ ) -Version history +Version History =============== @@ -28,13 +28,16 @@ Version history | **Date** | **Ver.** | **Authors** | **Comment** | | | | | | +-------------+-----------+-----------------+----------------------+ +| 2016-09-20 | 2.1.0 | Tim Rozet | More updates for | +| | | | Colorado | ++-------------+-----------+-----------------+----------------------+ | 2016-08-11 | 2.0.0 | Dan Radez | Updates for Colorado | +-------------+-----------+-----------------+----------------------+ | 2015-09-17 | 1.0.0 | Dan Radez | Rewritten for | | | | | RDO Manager update | +-------------+-----------+-----------------+----------------------+ -Important notes +Important Notes =============== This is the OPNFV Colorado release that implements the deploy stage of the @@ -51,19 +54,23 @@ Summary Colorado release with the Apex deployment toolchain will establish an OPNFV target system on a Pharos compliant lab infrastructure. The current definition -of an OPNFV target system is and OpenStack Liberty combined with OpenDaylight -Beryllium. The system is deployed with OpenStack High Availability (HA) for -most OpenStack services. OpenDaylight is deployed in non-HA form as HA support -is not availble for OpenDaylight at the time of the Colorado release. Ceph +of an OPNFV target system is OpenStack Mitaka combined with an SDN +controller, such as OpenDaylight. The system is deployed with OpenStack High +Availability (HA) for most OpenStack services. SDN controllers are deployed +only on the first controller (see HAIssues_ for known HA SDN issues). Ceph storage is used as Cinder backend, and is the only supported storage for Colorado. Ceph is setup as 3 OSDs and 3 Monitors, one OSD+Mon per Controller -node. +node in an HA setup. Apex also supports non-HA deployments, which deploys a +single controller and n number of compute nodes. Furthermore, Apex is +capable of deploying scenarios in a bare metal or virtual fashion. Virtual +deployments use multiple VMs on the jump host and internal networking to +simulate the a bare metal deployment. - Documentation is built by Jenkins - .iso image is built by Jenkins - .rpm packages are built by Jenkins - Jenkins deploys a Colorado release with the Apex deployment toolchain - baremetal, which includes 3 control+network nodes, and 2 compute nodes. + bare metal, which includes 3 control+network nodes, and 2 compute nodes. Release Data ============ @@ -78,7 +85,7 @@ Release Data | **Release designation** | colorado.1.0 | | | | +--------------------------------------+--------------------------------------+ -| **Release date** | 2016-09-14 | +| **Release date** | 2016-09-22 | | | | +--------------------------------------+--------------------------------------+ | **Purpose of the delivery** | OPNFV Colorado release | @@ -95,11 +102,11 @@ deployment toolchain. It is based on following upstream versions: - OpenStack (Mitaka release) -- OpenDaylight (Beryllium release) +- OpenDaylight (Beryllium/Boron releases) - CentOS 7 -Document version changes +Document Version Changes ~~~~~~~~~~~~~~~~~~~~~~~~ This is the first tracked version of Colorado release with the Apex @@ -111,24 +118,45 @@ The following documentation is provided with this release: - OPNFV Release Notes for the Colorado release with the Apex deployment toolchain - ver. 1.0.0 (this document) -Feature additions +Feature Additions ~~~~~~~~~~~~~~~~~ +--------------------------------------+--------------------------------------+ | **JIRA REFERENCE** | **SLOGAN** | | | | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-32 | Build.sh integration of RDO Manager | -| | | +| JIRA: APEX-107 | OpenDaylight HA - OVSDB Clustering | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-6 | Deploy.sh integration of RDO Manager | -| | | +| JIRA: APEX-108 | Migrate to OpenStack Mitaka | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-30 | Support VLAN tagged deployments | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-105 | Enable Huge Page Configuration | +| | Options | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-111 | Allow RAM to be specified for | +| | Control/Compute in Virtual | +| | Deployments | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-119 | Enable OVS DPDK as a deployment | +| | Scenario in Apex | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-126 | Tacker Service deployed by Apex | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-34 | Migrate and update Release | -| | Documentation for Colorado | +| JIRA: APEX-135 | Congress Service deployed by Apex | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-127 | Nova Instance CPU Pinning | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-130 | IPv6 Underlay Deployment | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-133 | FDIO with Honeycomb Agent | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-141 | Integrate VSPERF into Apex | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-172 | Enable ONOS SFC | +--------------------------------------+--------------------------------------+ -Bug corrections +Bug Corrections ~~~~~~~~~~~~~~~ **JIRA TICKETS:** @@ -137,24 +165,74 @@ Bug corrections | **JIRA REFERENCE** | **SLOGAN** | | | | +--------------------------------------+--------------------------------------+ -| | | -| | | +| JIRA: APEX-86 | Need ability to specify number of | +| | compute nodes | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-180 | Baremetal deployment error: Failed to| +| | mount root partition /dev/sda on | +| | /mnt/rootfs | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-161 | Heat autoscaling stack creation fails| +| | for non-admin users | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-198 | Missing NAT iptables rule for public | +| | network in instack VM | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-147 | Installer doesn't generate/distribute| +| | SSH keys between compute nodes | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-109 | ONOS routes local subnet traffic to | +| | GW | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-146 | Swift service present in available | +| | endpoints | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-160 | Enable force_metadata to support | +| | subnets with VM as the router | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-114 | OpenDaylight GUI is not available | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-100 | DNS1 and DNS2 should be handled in | +| | nic bridging | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-100 | DNS1 and DNS2 should be handled in | +| | nic bridging | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-155 | NIC Metric value not used when | +| | bridging NICs | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-136 | 2 network deployment fails | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-89 | Deploy Ceph OSDs on compute nodes | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-137 | added arping ass dependency for | +| | ONOS deployments | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-121 | VM Storage deletion intermittently | +| | fails | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-182 | Nova services not correctly deployed | ++--------------------------------------+--------------------------------------+ +| JIRA: APEX-153 | brbm bridge not created in jumphost | +--------------------------------------+--------------------------------------+ Deliverables ------------ -Software deliverables +Software Deliverables ~~~~~~~~~~~~~~~~~~~~~ -Apex .iso file -Apex overcloud .rpm (opnfv-apex) -Apex undercloud .rpm (opnfv-apex-undercloud) -Apex common .rpm (opnfv-apex-common) -build.sh - Builds the above artifacts -opnfv-deploy - Automatically deploys Target OPNFV System -opnfv-clean - Automatically resets a Target OPNFV Deployment - -Documentation deliverables +- Apex .iso file +- Apex overcloud .rpm (opnfv-apex) - For nosdn and OpenDaylight Scenarios +- Apex overcloud onos .rpm (opnfv-apex-onos) - ONOS Scenarios +- Apex overcloud ODL SFC .rpm (opnfv-apex-opendaylight-sfc) - ODL SFC Scenario +- Apex undercloud .rpm (opnfv-apex-undercloud) +- Apex common .rpm (opnfv-apex-common) +- build.sh - Builds the above artifacts +- opnfv-deploy - Automatically deploys Target OPNFV System +- opnfv-clean - Automatically resets a Target OPNFV Deployment +- opnfv-util - Utility to connect to or debug Overcloud nodes + OpenDaylight + +Documentation Deliverables ~~~~~~~~~~~~~~~~~~~~~~~~~~ - OPNFV Installation instructions for the Colorado release with the Apex deployment toolchain - ver. 1.0.0 @@ -173,10 +251,11 @@ System Limitations **Storage:** Ceph is the only supported storage configuration. -**Min master requirements:** At least 16GB of RAM +**Min master requirements:** At least 16GB of RAM for baremetal jumphost, +24GB for virtual deployments (noHA). -Known issues +Known Issues ------------ **JIRA TICKETS:** @@ -185,37 +264,23 @@ Known issues | **JIRA REFERENCE** | **SLOGAN** | | | | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-89 | Deploy Ceph OSDs on the compute | -| | nodes also | -+--------------------------------------+--------------------------------------+ -| JIRA: APEX-27 | OpenContrail Support | -| | | +| JIRA: APEX-203 | Swift proxy enabled and fails in noha| +| | deployments | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-30 | Support for VLAN tagged network | -| | deployment architecture | +| JIRA: APEX-215 | Keystone services not configured and | +| | the error is silently ignored (VLAN | +| | Deployments) | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-100 | DNS1 and DNS2 not handled in | -| | nic bridging | +| JIRA: APEX-208 | Need ability to specify which NIC to | +| | place VLAN on | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-47 | Integrate Tacker as part of SFC | -| | Experimental Feature | +| JIRA: APEX-254 | Add dynamic hugepages configuration | +--------------------------------------+--------------------------------------+ -| JIRA: APEX-84 | --flat option no longer working | -| | | -+--------------------------------------+--------------------------------------+ -| JIRA: APEX-51 | Integrate SDNVPN as a deploy option | -| | | -+--------------------------------------+--------------------------------------+ -| JIRA: APEX-99 | Syntax error when | -| | running opnfv-deploy | -+--------------------------------------+--------------------------------------+ -| JIRA: APEX-86 | Compute node count configurable | -| | for virtual deployments | -+--------------------------------------+--------------------------------------+ -| JIRA: APEX-141 | Adding VSPERF support | -| | | +| JIRA: APEX-138 | Unclear error message when interface | +| | set to dhcp | +--------------------------------------+--------------------------------------+ + Workarounds ----------- **-** @@ -223,6 +288,30 @@ Workarounds Scenario specific release notes =============================== +Scenario os-odl_l3-nofeature known issues +----------------------------------------- + +* `APEX-112 <https://jira.opnfv.org/browse/APEX-112>`_: + ODL routes local subnet traffic to GW + +Scenario os-odl_l2-nofeature known issues +----------------------------------------- + +* `APEX-149 <https://jira.opnfv.org/browse/APEX-149>`_: + Openflow rules are populated very slowly + +Scenario os-odl_l2-sfc-noha known issues +---------------------------------------- + +* `APEX-275 <https://jira.opnfv.org/browse/APEX-275>`_: + Metadata fails in Boron + +Scenario os-nosdn-ovs known issues +---------------------------------- + +* `APEX-274 <https://jira.opnfv.org/browse/APEX-274>`_: + OVS DPDK scenario does not create vhost user ports + Scenario os-odl_l2-fdio-noha known issues ----------------------------------------- @@ -244,6 +333,18 @@ Scenario os-odl_l2-fdio-noha known issues none on controller * `FDS-82 <https://jira.opnfv.org/browse/FDS-82>`_: Nova list shows no vms but there are some on computes in paused state +* `APEX-217 <https://jira.opnfv.org/browse/APEX-217>`_: + qemu not configured with correct group:user + +.. _HAIssues: + +General HA scenario known issues +-------------------------------- + +* `COPPER-22 <https://jira.opnfv.org/browse/COPPER-22>`_: + Congress service HA deployment is not yet supported/verified. +* `APEX-276 <https://jira.opnfv.org/browse/APEX-276>`_: + ODL HA unstable and crashes frequently Test Result =========== @@ -268,4 +369,4 @@ http://wiki.opnfv.org/releases/Colorado :Authors: Tim Rozet (trozet@redhat.com) :Authors: Dan Radez (dradez@redhat.com) -:Version: 1.0.0 +:Version: 2.1.0 diff --git a/lib/overcloud-deploy-functions.sh b/lib/overcloud-deploy-functions.sh index 498fd135..c7301fdd 100755 --- a/lib/overcloud-deploy-functions.sh +++ b/lib/overcloud-deploy-functions.sh @@ -178,8 +178,20 @@ EOI # set NIC heat params and resource registry ssh -T ${SSH_OPTIONS[@]} "stack@$UNDERCLOUD" <<EOI -sudo sed -i '/TenantNIC:/c\ TenantNIC: '${private_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml -sudo sed -i '/PublicNIC:/c\ PublicNIC: '${public_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml +if [ -n "${private_network_compute_interface}" ]; then + sudo sed -i '/ComputeTenantNIC:/c\ ComputeTenantNIC: '${private_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml +fi +if [ -n "${private_network_controller_interface}" ]; then + sudo sed -i '/ControllerTenantNIC:/c\ ControllerTenantNIC: '${private_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml +fi +# TODO: PublicNIC is not used today, however, in the future, we'll bind public nic to DPDK as well for certain scenarios. At that time, +# we'll need to make sure public network is enabled. +if [ -n "${public_network_compute_interface}" ]; then + sudo sed -i '/ComputePublicNIC:/c\ ComputePublicNIC: '${public_network_compute_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml +fi +if [ -n "${public_network_controller_interface}" ]; then + sudo sed -i '/ControllerPublicNIC:/c\ ControllerPublicNIC: '${public_network_controller_interface} /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml +fi EOI DEPLOY_OPTIONS+=" -e /usr/share/openstack-tripleo-heat-templates/environments/numa.yaml" |