summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--INFO5
-rw-r--r--config/installers/fuel/pod_config.yml.j2261
-rw-r--r--config/pdf/idf-pod1.schema.yaml87
-rw-r--r--config/utils/gen_config_lib.py224
-rwxr-xr-xconfig/utils/generate_config.py101
-rwxr-xr-xconfig/utils/validate_schema.py18
-rw-r--r--docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.pngbin0 -> 51232 bytes
-rw-r--r--docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.pngbin0 -> 50055 bytes
-rw-r--r--docs/release/scenario-lifecycle/ODL Generic Scenarios Evolution.pngbin0 -> 95014 bytes
-rw-r--r--docs/release/scenario-lifecycle/create-sdf.pngbin0 -> 402973 bytes
-rw-r--r--docs/release/scenario-lifecycle/creating-scenarios.rst104
-rw-r--r--docs/release/scenario-lifecycle/current-status.rst75
-rw-r--r--docs/release/scenario-lifecycle/deployment-options.rst128
-rw-r--r--docs/release/scenario-lifecycle/feature-compatibility-nosdn.pngbin0 -> 24694 bytes
-rw-r--r--docs/release/scenario-lifecycle/feature-compatibility-odl.pngbin0 -> 32124 bytes
-rw-r--r--docs/release/scenario-lifecycle/generic-scenarios.rst53
-rw-r--r--docs/release/scenario-lifecycle/index.rst24
-rw-r--r--docs/release/scenario-lifecycle/mano-scenarios.rst31
-rw-r--r--docs/release/scenario-lifecycle/parent-child-relations.rst62
-rw-r--r--docs/release/scenario-lifecycle/parent-child.pngbin0 -> 26454 bytes
-rw-r--r--docs/release/scenario-lifecycle/pdf-and-sdf.pngbin0 -> 189523 bytes
-rw-r--r--docs/release/scenario-lifecycle/scenario-descriptor-files.rst228
-rw-r--r--docs/release/scenario-lifecycle/scenario-overview.rst166
-rw-r--r--docs/release/scenario-lifecycle/scenario-tree+idea.pngbin0 -> 128763 bytes
-rw-r--r--docs/release/scenario-lifecycle/scenario-tree-danube.pngbin0 -> 80299 bytes
-rw-r--r--docs/release/scenario-lifecycle/scenario-tree.pngbin0 -> 81067 bytes
-rw-r--r--docs/release/scenario-lifecycle/sibling.pngbin0 -> 32538 bytes
-rw-r--r--docs/release/scenario-lifecycle/specific-scenarios.rst34
-rw-r--r--docs/release/scenario-lifecycle/workflows.rst70
-rw-r--r--labs/arm/idf-pod6.yaml28
-rw-r--r--labs/lf/idf-pod4.yaml74
-rw-r--r--labs/lf/idf-pod5.yaml22
-rw-r--r--labs/lf/pod4.yaml161
-rw-r--r--labs/lf/pod5.yaml14
-rw-r--r--labs/zte/idf-pod1.yaml2
35 files changed, 1752 insertions, 220 deletions
diff --git a/INFO b/INFO
index 3f003bc9..2ebe872e 100644
--- a/INFO
+++ b/INFO
@@ -2,8 +2,8 @@ Project: Testbed infrastructure (Pharos)
Project Creation Date: January 8, 2015
Project Category: Integration & Testing
Lifecycle State: Mature
-Primary Contact: Jack Morgan (jack.morgan@intel.com)
-Project Lead: Jack Morgan (jack.morgan@intel.com)
+Primary Contact: Julien Zhang (zhang.jun3g@zte.com.cn)
+Project Lead: Julien Zhang (zhang.jun3g@zte.com.cn)
Jira Project Name: Testbed infrastructure project
Jira Project Prefix: PHAROS
Mailing list tag: [pharos]
@@ -25,4 +25,5 @@ Trevor Cooper <trevor.cooper@intel.com>
Link to TSC approval of the project: http://meetbot.opnfv.org/meetings/opnfv-meeting/
Link(s) to approval of additional committers:
http://lists.opnfv.org/pipermail/opnfv-tech-discuss/2016-May/010567.html
+https://lists.opnfv.org/pipermail/opnfv-tsc/2018-February/004105.html
Via email, RT Ticket: 23593
diff --git a/config/installers/fuel/pod_config.yml.j2 b/config/installers/fuel/pod_config.yml.j2
index dfa9679d..9a6b4be1 100644
--- a/config/installers/fuel/pod_config.yml.j2
+++ b/config/installers/fuel/pod_config.yml.j2
@@ -5,158 +5,171 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-{%- set net_admin = conf.idf.net_config.admin.network %}
-{%- set net_mgmt = conf.idf.net_config.mgmt.network %}
-{%- set net_private = conf.idf.net_config.private.network %}
-{%- set net_public = conf.idf.net_config.public.network %}
-{%- set net_public_mask = conf.idf.net_config.public.mask %}
-{%- set vlan_mgmt = conf.idf.net_config.mgmt.vlan %}
-{%- set vlan_private = conf.idf.net_config.private.vlan %}
-{%- if conf.idf.net_config.public.dns is defined %}
- {%- set dns_public = conf.idf.net_config.public.dns %}
+{%- set net = conf.idf.net_config %}
+{%- set net_admin = [net.admin.network, net.admin.mask] | join("/") %}
+{%- set net_mgmt = [net.mgmt.network, net.mgmt.mask] | join("/") %}
+{%- set net_private = [net.private.network, net.private.mask] | join("/") %}
+{%- set net_public = [net.public.network, net.public.mask] | join("/") %}
+{%- set networks = [net_admin, net_mgmt, net_private, net_public] %}
+
+{%- set vlan_mgmt = net.mgmt.vlan %}
+{%- set vlan_private = net.private.vlan %}
+{%- set pxe_interface = net.admin.interface %}
+
+{%- if net.public.dns is defined %}
+ {%- set dns_public = net.public.dns %}
+{%- else %}
+ {%- set dns_public = [ '8.8.8.8', '8.8.4.4' ] %}
{%- endif %}
-{%- set pxe_interface = conf.idf.net_config.admin.interface %}
-{%- if conf.idf.net_config.public.gateway is defined %}
- {%- set net_public_gw = conf.idf.net_config.public.gateway %}
+
+{%- if net.public.gateway is defined %}
+ {%- set net_public_gw = net.public.gateway %}
{%- endif %}
{%- if conf.idf.fuel.network.public_pool is defined %}
{%- set net_public_pool_start = conf.idf.fuel.network.public_pool.start_ip %}
{%- set net_public_pool_end = conf.idf.fuel.network.public_pool.end_ip %}
{%- endif %}
+
{%- if conf.idf.fuel.maas is defined %}
{%- set maas_timeout_comissioning = conf.idf.fuel.maas.timeout_comissioning %}
{%- set maas_timeout_deploying = conf.idf.fuel.maas.timeout_deploying %}
+{%- else %}
+ {%- set maas_timeout_comissioning = 10 %}
+ {%- set maas_timeout_deploying = 15 %}
{%- endif %}
-{%- if dns_public is not defined %}
- {%- set dns_public = [ '8.8.8.8', '8.8.4.4' ] %}
-{%- endif %}
-{%- if net_public_gw is not defined %}
- {%- set net_public_gw = net_public | ipaddr_index('1') %}
-{%- endif %}
+{%- set cmp_nodes = 3 %}
+
+{%- set net_admin_hosts = [
+ 'opnfv_infra_config_pxe_address',
+ 'opnfv_infra_maas_node01_deploy_address',
+ 'opnfv_infra_maas_pxe_start_address'] %}
+
+{%- set net_mgmt_hosts = [
+ 'opnfv_infra_config_address',
+ 'opnfv_infra_maas_node01_address',
+ 'opnfv_openstack_proxy_control_address',
+ 'opnfv_openstack_proxy_node01_control_address',
+ 'opnfv_openstack_proxy_node02_control_address',
+ 'opnfv_openstack_gateway_node01_address',
+ 'opnfv_openstack_gateway_node02_address',
+ 'opnfv_openstack_gateway_node03_address',
+ 'opnfv_infra_kvm_address',
+ 'opnfv_infra_kvm_node01_address',
+ 'opnfv_infra_kvm_node02_address',
+ 'opnfv_infra_kvm_node03_address',
+ 'opnfv_openstack_database_address',
+ 'opnfv_openstack_database_node01_address',
+ 'opnfv_openstack_database_node02_address',
+ 'opnfv_openstack_database_node03_address',
+ 'opnfv_openstack_message_queue_address',
+ 'opnfv_openstack_message_queue_node01_address',
+ 'opnfv_openstack_message_queue_node02_address',
+ 'opnfv_openstack_message_queue_node03_address',
+ 'opnfv_openstack_telemetry_address',
+ 'opnfv_openstack_telemetry_node01_address',
+ 'opnfv_openstack_telemetry_node02_address',
+ 'opnfv_openstack_telemetry_node03_address',
+ 'opnfv_openstack_control_address',
+ 'opnfv_openstack_control_node01_address',
+ 'opnfv_openstack_control_node02_address',
+ 'opnfv_openstack_control_node03_address',
+ 'opnfv_opendaylight_server_node01_single_address',
+ 'opnfv_stacklight_monitor_address',
+ 'opnfv_stacklight_monitor_node01_address',
+ 'opnfv_stacklight_monitor_node02_address',
+ 'opnfv_stacklight_monitor_node03_address',
+ 'opnfv_stacklight_log_address',
+ 'opnfv_stacklight_log_node01_address',
+ 'opnfv_stacklight_log_node02_address',
+ 'opnfv_stacklight_log_node03_address',
+ 'opnfv_stacklight_telemetry_address',
+ 'opnfv_stacklight_telemetry_node01_address',
+ 'opnfv_stacklight_telemetry_node02_address',
+ 'opnfv_stacklight_telemetry_node03_address'] %}
+
+{%- set net_public_hosts = [
+ 'opnfv_openstack_proxy_address',
+ 'opnfv_openstack_proxy_node01_address',
+ 'opnfv_openstack_proxy_node02_address',
+ 'opnfv_openstack_gateway_node01_external_address',
+ 'opnfv_openstack_gateway_node02_external_address',
+ 'opnfv_openstack_gateway_node03_external_address',
+ 'opnfv_openstack_control_node01_external_address',
+ 'opnfv_openstack_control_node02_external_address',
+ 'opnfv_openstack_control_node03_external_address'] %}
+
+{%- set net_private_hosts = [
+ 'opnfv_openstack_gateway_node01_tenant_address',
+ 'opnfv_openstack_gateway_node02_tenant_address',
+ 'opnfv_openstack_gateway_node03_tenant_address'] %}
+
+{%- set hosts = {
+ net_admin: net_admin_hosts,
+ net_mgmt: net_mgmt_hosts,
+ net_private: net_private_hosts,
+ net_public: net_public_hosts } %}
+
+{%- set start_ip = {
+ net_admin: 1,
+ net_mgmt: 1,
+ net_private: 1,
+ net_public: 1 } %}
+
+{%- set total_public_hosts = net_public_hosts | length + cmp_nodes %}
{%- if net_public_pool_start is not defined or net_public_pool_end is not defined %}
- {%- set net_public_pool_start = net_public | ipaddr_index('80') %}
- {%- set net_public_pool_end = net_public | ipaddr_index('100') %}
-{%- endif %}
-{%- if maas_timeout_comissioning is not defined or maas_timeout_deploying is not defined %}
- {%- set maas_timeout_comissioning = 10 %}
- {%- set maas_timeout_deploying = 15 %}
+ {%- set net_public_pool_start = net_public | ipnet_hostaddr(total_public_hosts + start_ip[net_public] +1) %}
+ {%- set net_public_pool_end = net_public | ipnet_hostmax -1 %}
{%- endif %}
+
---
parameters:
_param:
+ opnfv_maas_timeout_comissioning: {{ maas_timeout_comissioning }}
+ opnfv_maas_timeout_deploying: {{ maas_timeout_deploying }}
+
opnfv_jump_bridge_admin: {{ conf.idf.fuel.jumphost.bridges.admin }}
opnfv_jump_bridge_mgmt: {{ conf.idf.fuel.jumphost.bridges.mgmt }}
opnfv_jump_bridge_private: {{ conf.idf.fuel.jumphost.bridges.private }}
opnfv_jump_bridge_public: {{ conf.idf.fuel.jumphost.bridges.public }}
- opnfv_infra_config_address: {{ net_mgmt | ipaddr_index(100) }}
- opnfv_infra_config_pxe_address: {{ net_admin | ipaddr_index(2) }}
- opnfv_infra_maas_node01_address: {{ net_mgmt | ipaddr_index(3) }}
- opnfv_infra_maas_node01_deploy_address: {{ net_admin | ipaddr_index(3) }}
- opnfv_infra_kvm_address: {{ net_mgmt | ipaddr_index(140) }}
- opnfv_infra_kvm_node01_address: {{ net_mgmt | ipaddr_index(141) }}
- opnfv_infra_kvm_node02_address: {{ net_mgmt | ipaddr_index(142) }}
- opnfv_infra_kvm_node03_address: {{ net_mgmt | ipaddr_index(143) }}
-
- opnfv_infra_maas_pxe_network_address: {{ net_admin }}
- opnfv_infra_maas_pxe_start_address: {{ net_admin | ipaddr_index(4) }}
- opnfv_infra_maas_pxe_end_address: {{ net_admin | ipaddr_index(100) }}
-
- opnfv_openstack_gateway_node01_address: {{ net_mgmt | ipaddr_index(124) }}
- opnfv_openstack_gateway_node02_address: {{ net_mgmt | ipaddr_index(125) }}
- opnfv_openstack_gateway_node03_address: {{ net_mgmt | ipaddr_index(126) }}
- opnfv_openstack_gateway_node01_tenant_address: {{ net_private | ipaddr_index(124) }}
- opnfv_openstack_gateway_node02_tenant_address: {{ net_private | ipaddr_index(125) }}
- opnfv_openstack_gateway_node03_tenant_address: {{ net_private | ipaddr_index(126) }}
- opnfv_openstack_gateway_node01_external_address: {{ net_public | ipaddr_index(124) }}
- opnfv_openstack_gateway_node02_external_address: {{ net_public | ipaddr_index(125) }}
- opnfv_openstack_gateway_node03_external_address: {{ net_public | ipaddr_index(126) }}
- opnfv_openstack_proxy_address: {{ net_public | ipaddr_index(103) }}
- opnfv_openstack_proxy_node01_address: {{ net_public | ipaddr_index(104) }}
- opnfv_openstack_proxy_node02_address: {{ net_public | ipaddr_index(105) }}
- opnfv_openstack_proxy_control_address: {{ net_mgmt | ipaddr_index(103) }}
- opnfv_openstack_proxy_node01_control_address: {{ net_mgmt | ipaddr_index(104) }}
- opnfv_openstack_proxy_node02_control_address: {{ net_mgmt | ipaddr_index(105) }}
- opnfv_openstack_control_address: {{ net_mgmt | ipaddr_index(10) }}
- opnfv_openstack_control_node01_address: {{ net_mgmt | ipaddr_index(11) }}
- opnfv_openstack_control_node02_address: {{ net_mgmt | ipaddr_index(12) }}
- opnfv_openstack_control_node03_address: {{ net_mgmt | ipaddr_index(13) }}
- opnfv_openstack_control_node01_external_address: {{ net_public | ipaddr_index(11) }}
- opnfv_openstack_control_node02_external_address: {{ net_public | ipaddr_index(12) }}
- opnfv_openstack_control_node03_external_address: {{ net_public | ipaddr_index(13) }}
- opnfv_openstack_database_address: {{ net_mgmt | ipaddr_index(50) }}
- opnfv_openstack_database_node01_address: {{ net_mgmt | ipaddr_index(51) }}
- opnfv_openstack_database_node02_address: {{ net_mgmt | ipaddr_index(52) }}
- opnfv_openstack_database_node03_address: {{ net_mgmt | ipaddr_index(53) }}
- opnfv_openstack_message_queue_address: {{ net_mgmt | ipaddr_index(40) }}
- opnfv_openstack_message_queue_node01_address: {{ net_mgmt | ipaddr_index(41) }}
- opnfv_openstack_message_queue_node02_address: {{ net_mgmt | ipaddr_index(42) }}
- opnfv_openstack_message_queue_node03_address: {{ net_mgmt | ipaddr_index(43) }}
- opnfv_openstack_telemetry_address: {{ net_mgmt | ipaddr_index(75) }}
- opnfv_openstack_telemetry_node01_address: {{ net_mgmt | ipaddr_index(76) }}
- opnfv_openstack_telemetry_node02_address: {{ net_mgmt | ipaddr_index(77) }}
- opnfv_openstack_telemetry_node03_address: {{ net_mgmt | ipaddr_index(78) }}
- opnfv_openstack_compute_node01_single_address: {{ net_mgmt | ipaddr_index(101) }}
- opnfv_openstack_compute_node02_single_address: {{ net_mgmt | ipaddr_index(102) }}
- opnfv_openstack_compute_node03_single_address: {{ net_mgmt | ipaddr_index(103) }}
- opnfv_openstack_compute_node01_control_address: {{ net_mgmt | ipaddr_index(101) }}
- opnfv_openstack_compute_node02_control_address: {{ net_mgmt | ipaddr_index(102) }}
- opnfv_openstack_compute_node03_control_address: {{ net_mgmt | ipaddr_index(103) }}
- opnfv_openstack_compute_node01_tenant_address: {{ net_private | ipaddr_index(101) }}
- opnfv_openstack_compute_node02_tenant_address: {{ net_private | ipaddr_index(102) }}
- opnfv_openstack_compute_node03_tenant_address: {{ net_private | ipaddr_index(103) }}
- opnfv_openstack_compute_node01_external_address: {{ net_public | ipaddr_index(101) }}
- opnfv_openstack_compute_node02_external_address: {{ net_public | ipaddr_index(102) }}
-
- opnfv_opendaylight_server_node01_single_address: {{ net_mgmt | ipaddr_index(111) }}
-
- opnfv_net_public: {{ net_public }}/{{ net_public_mask }}
- opnfv_net_public_mask: {{ net_public_mask | netmask }}
+ opnfv_infra_maas_pxe_network_address: {{ net.admin.network }}
+ opnfv_infra_maas_pxe_end_address: {{ net_admin | ipnet_hostmax }}
+ opnfv_net_public: {{ net_public }}
+ opnfv_net_public_mask: {{ net_public | ipnet_netmask }}
opnfv_net_public_gw: {{ net_public_gw }}
opnfv_net_public_pool_start: {{ net_public_pool_start }}
opnfv_net_public_pool_end: {{ net_public_pool_end }}
opnfv_name_servers: {{ dns_public }}
opnfv_dns_server01: '{{ dns_public[0] }}'
-
opnfv_net_mgmt_vlan: {{ vlan_mgmt }}
opnfv_net_tenant_vlan: {{ vlan_private }}
- opnfv_maas_timeout_comissioning: {{ maas_timeout_comissioning }}
- opnfv_maas_timeout_deploying: {{ maas_timeout_deploying }}
+{%- for network in networks %}
+{%- for key in hosts[network] %}
+{%- set i = loop.index + start_ip[network] %}
+ {{key}}: {{ network | ipnet_hostaddr(i) }}
+{%- endfor %}
+{%- endfor %}
+
+{%- for cmp in range(1, cmp_nodes +1) %}
+ {%- set n = '%02d' | format(cmp) %}
+ {%- set mgmt = net_mgmt_hosts | length + start_ip[net_mgmt] + loop.index %}
+ {%- set pub = net_public_hosts | length + start_ip[net_public] + loop.index %}
+ {%- set pri = net_private_hosts | length + start_ip[net_private] + loop.index %}
+ opnfv_openstack_compute_node{{n}}_single_address: {{ net_mgmt | ipnet_hostaddr(mgmt) }}
+ opnfv_openstack_compute_node{{n}}_control_address: {{ net_mgmt | ipnet_hostaddr(mgmt) }}
+ opnfv_openstack_compute_node{{n}}_tenant_address: {{ net_private | ipnet_hostaddr(pri) }}
+ opnfv_openstack_compute_node{{n}}_external_address: {{ net_public | ipnet_hostaddr(pub) }}
+{%- endfor %}
- opnfv_maas_node01_architecture: '{{ conf.nodes.0.node.arch | dpkg_arch }}/generic'
- opnfv_maas_node01_power_address: {{ conf.nodes.0.remote_management.address.rsplit('/')[0] }}
- opnfv_maas_node01_power_type: {{ conf.nodes.0.remote_management.type }}
- opnfv_maas_node01_power_user: {{ conf.nodes.0.remote_management.user }}
- opnfv_maas_node01_power_password: {{ conf.nodes.0.remote_management.pass }}
- opnfv_maas_node01_interface_mac: '{{ conf.nodes.0.interfaces[pxe_interface].mac_address }}'
-
- opnfv_maas_node02_architecture: '{{ conf.nodes.1.node.arch | dpkg_arch }}/generic'
- opnfv_maas_node02_power_address: {{ conf.nodes.1.remote_management.address.rsplit('/')[0] }}
- opnfv_maas_node02_power_type: {{ conf.nodes.1.remote_management.type }}
- opnfv_maas_node02_power_user: {{ conf.nodes.1.remote_management.user }}
- opnfv_maas_node02_power_password: {{ conf.nodes.1.remote_management.pass }}
- opnfv_maas_node02_interface_mac: '{{ conf.nodes.1.interfaces[pxe_interface].mac_address }}'
-
- opnfv_maas_node03_architecture: '{{ conf.nodes.2.node.arch | dpkg_arch }}/generic'
- opnfv_maas_node03_power_address: {{ conf.nodes.2.remote_management.address.rsplit('/')[0] }}
- opnfv_maas_node03_power_type: {{ conf.nodes.2.remote_management.type }}
- opnfv_maas_node03_power_user: {{ conf.nodes.2.remote_management.user }}
- opnfv_maas_node03_power_password: {{ conf.nodes.2.remote_management.pass }}
- opnfv_maas_node03_interface_mac: '{{ conf.nodes.2.interfaces[pxe_interface].mac_address }}'
-
- opnfv_maas_node04_architecture: '{{ conf.nodes.3.node.arch | dpkg_arch }}/generic'
- opnfv_maas_node04_power_address: {{ conf.nodes.3.remote_management.address.rsplit('/')[0] }}
- opnfv_maas_node04_power_type: {{ conf.nodes.3.remote_management.type }}
- opnfv_maas_node04_power_user: {{ conf.nodes.3.remote_management.user }}
- opnfv_maas_node04_power_password: {{ conf.nodes.3.remote_management.pass }}
- opnfv_maas_node04_interface_mac: '{{ conf.nodes.3.interfaces[pxe_interface].mac_address }}'
-
- opnfv_maas_node05_architecture: '{{ conf.nodes.4.node.arch | dpkg_arch }}/generic'
- opnfv_maas_node05_power_address: {{ conf.nodes.4.remote_management.address.rsplit('/')[0] }}
- opnfv_maas_node05_power_type: {{ conf.nodes.4.remote_management.type }}
- opnfv_maas_node05_power_user: {{ conf.nodes.4.remote_management.user }}
- opnfv_maas_node05_power_password: {{ conf.nodes.4.remote_management.pass }}
- opnfv_maas_node05_interface_mac: '{{ conf.nodes.4.interfaces[pxe_interface].mac_address }}'
+{%- for node in conf.nodes %}
+ {%- set n = '%02d' | format(loop.index) %}
+ opnfv_maas_node{{n}}_architecture: '{{ node.node.arch | dpkg_arch }}/generic'
+ opnfv_maas_node{{n}}_power_address: {{ node.remote_management.address.rsplit('/')[0] }}
+ opnfv_maas_node{{n}}_power_type: {{ node.remote_management.type }}
+ opnfv_maas_node{{n}}_power_user: {{ node.remote_management.user }}
+ opnfv_maas_node{{n}}_power_password: {{ node.remote_management.pass }}
+ opnfv_maas_node{{n}}_interface_mac: '{{ node.interfaces[pxe_interface].mac_address }}'
+{%- endfor %}
diff --git a/config/pdf/idf-pod1.schema.yaml b/config/pdf/idf-pod1.schema.yaml
index 0705fcc5..857749c4 100644
--- a/config/pdf/idf-pod1.schema.yaml
+++ b/config/pdf/idf-pod1.schema.yaml
@@ -14,14 +14,14 @@ definitions:
v0.1:
# NOTE: I hope this is going away soon, so I won't model it yet
type: 'object'
- daisy:
- v0.1:
- type: 'object'
- # NOTE: To be properly modeled by Daisy maintainers
compass:
v0.1:
type: 'object'
# NOTE: To be properly modeled by Compass4NFV maintainers
+ daisy:
+ v0.1:
+ type: 'object'
+ # NOTE: To be properly modeled by Daisy maintainers
fuel:
v0.1:
type: 'object'
@@ -73,8 +73,67 @@ definitions:
additionalProperties: false
required: ['node']
additionalProperties: false
+ reclass: # Optional
+ type: 'object'
+ properties:
+ node:
+ type: 'array'
+ items:
+ type: 'object'
+ properties:
+ compute_params:
+ type: 'object'
+ properties:
+ common: # Optional
+ type: 'object'
+ properties: &compute_params_common_properties
+ nova_cpu_pinning: # Optional
+ type: 'string'
+ compute_hugepages_size:
+ type: 'string'
+ enum: ['2M', '1G']
+ compute_hugepages_count:
+ type: 'number'
+ compute_hugepages_mount:
+ type: 'string'
+ compute_kernel_isolcpu: # Optional
+ type: 'string'
+ compute_ovs_pmd_cpu_mask: # Optional
+ type: ['string', 'number']
+ compute_ovs_memory_channels: # Optional
+ type: ['string', 'number']
+ required: ['compute_hugepages_size', 'compute_hugepages_count',
+ 'compute_hugepages_mount']
+ additionalProperties: false
+ dpdk: # Optional
+ type: 'object'
+ properties:
+ <<: *compute_params_common_properties
+ compute_dpdk_driver:
+ type: 'string'
+ compute_ovs_dpdk_socket_mem:
+ type: ['string', 'number']
+ compute_ovs_dpdk_lcore_mask:
+ type: ['string', 'number']
+ dpdk0_driver:
+ type: 'string'
+ dpdk0_n_rxq:
+ type: 'number'
+ required: ['compute_dpdk_driver', 'dpdk0_driver', 'dpdk0_n_rxq',
+ 'compute_ovs_dpdk_socket_mem',
+ 'compute_ovs_dpdk_lcore_mask']
+ additionalProperties: false
+ additionalProperties: false
+ required: ['compute_params']
+ additionalProperties: false
+ required: ['node']
+ additionalProperties: false
required: ['jumphost', 'network']
additionalProperties: false
+ osa:
+ v0.1:
+ type: 'object'
+ # NOTE: To be properly modeled by XCI maintainers
##############################################################################
# Top-level structure:
@@ -94,14 +153,16 @@ properties:
type: 'array'
items:
type: 'string'
- enum: ['apex', 'compass4nfv', 'daisy', 'fuel', 'joid']
+ enum: ['apex', 'compass4nfv', 'daisy', 'fuel', 'joid', 'osa']
net_config:
type: 'object'
- fuel:
+ compass:
type: 'object'
daisy:
type: 'object'
- compass:
+ fuel:
+ type: 'object'
+ osa:
type: 'object'
required: ['version']
additionalProperties: false
@@ -114,16 +175,20 @@ properties:
properties:
net_config:
$ref: '#/definitions/net_config/v0.1'
- fuel:
- $ref: '#/definitions/fuel/v0.1'
- daisy:
- $ref: '#/definitions/daisy/v0.1'
compass:
$ref: '#/definitions/compass/v0.1'
+ daisy:
+ $ref: '#/definitions/daisy/v0.1'
+ fuel:
+ $ref: '#/definitions/fuel/v0.1'
+ osa:
+ $ref: '#/definitions/osa/v0.1'
fuel:
required: ['net_config']
daisy:
required: ['net_config']
+ osa:
+ required: ['net_config']
# Do not allow any properties not defined here. This lets us catch typos.
additionalProperties: false
diff --git a/config/utils/gen_config_lib.py b/config/utils/gen_config_lib.py
new file mode 100644
index 00000000..1e7229be
--- /dev/null
+++ b/config/utils/gen_config_lib.py
@@ -0,0 +1,224 @@
+##############################################################################
+# Copyright (c) 2018 OPNFV and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+"""Library for generate_config functions and custom jinja2 filters"""
+
+import logging
+from ipaddress import IPv4Network, IPv4Address
+
+
+def load_custom_filters(environment):
+ """Load all defined filters into the jinja2 enviroment"""
+
+ # TODO deprecate ipaddr_index and netmask for the better ipnet ones
+ filter_list = {
+ 'dpkg_arch': filter_dpkg_arch,
+ 'storage_size_num': filter_storage_size_num,
+ 'ipnet_hostaddr': filter_ipnet_hostaddr,
+ 'ipnet_hostmin': filter_ipnet_hostmin,
+ 'ipnet_hostmax': filter_ipnet_hostmax,
+ 'ipnet_broadcast': filter_ipnet_broadcast,
+ 'ipnet_netmask': filter_ipnet_netmask,
+ 'ipnet_contains_ip': filter_ipnet_contains_ip,
+ 'ipnet_contains_iprange': filter_ipnet_contains_iprange,
+ 'ipnet_range_size': filter_ipnet_range_size,
+ 'ipaddr_index': filter_ipaddr_index,
+ 'netmask': filter_netmask
+ }
+
+ for name, function in filter_list.items():
+ environment.filters[name] = function
+
+
+def filter_dpkg_arch(arch, to_dpkg=True):
+ """Convert DPKG-compatible from processor arch and vice-versa"""
+
+ # Processor architecture (as reported by $(uname -m))
+ # vs DPKG architecture mapping
+ dpkg_arch_table = {
+ 'aarch64': 'arm64',
+ 'x86_64': 'amd64',
+ }
+ arch_dpkg_table = dict(
+ zip(dpkg_arch_table.values(), dpkg_arch_table.keys()))
+
+ if to_dpkg:
+ return dpkg_arch_table[arch]
+ else:
+ return arch_dpkg_table[arch]
+
+
+def filter_storage_size_num(size_str):
+ """Convert human-readable size string to a string convertible to float"""
+
+ # pattern: '^[1-9][\d\.]*[MGT]B?$', multiplier=1000 (not KiB)
+ if size_str.endswith('B'):
+ size_str = size_str[:-1]
+ try:
+ size_num = 1000000
+ for multiplier in ['M', 'G', 'T']:
+ if size_str.endswith(multiplier):
+ return '{:.2f}'.format(size_num * float(size_str[:-1]))
+ size_num = size_num * 1000
+ return '{:.2f}'.format(float(size_str))
+ except ValueError as ex:
+ logging.error(size_str + " is not a valid size string")
+ raise
+
+
+def filter_ipnet_hostaddr(network_cidr, index):
+ """Return the host IP address on given index from an IP network"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ try:
+ return IPv4Network(network_cidr_str)[index]
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+ except IndexError as ex:
+ logging.error(network_cidr_str + " has not enough range for "
+ + str(index) + " host IPs.")
+ raise
+
+
+def filter_ipnet_broadcast(network_cidr):
+ """Return broadcast IP address from given IP network"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ try:
+ return IPv4Network(network_cidr_str).broadcast_address
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_hostmin(network_cidr):
+ """Return the first host IP address from given IP network"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ try:
+ return IPv4Network(network_cidr_str)[1]
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_hostmax(network_cidr):
+ """Return the last host IP address from given IP network"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ try:
+ return IPv4Network(network_cidr_str)[-2]
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_netmask(network_cidr):
+ """Return the IP netmask from given IP network"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ try:
+ return IPv4Network(network_cidr_str).netmask
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_contains_ip(network_cidr, ip_address):
+ """Check if an IP network cointains a given range"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ ip_address_str = unicode(ip_address)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ ip_address_str = str(ip_address)
+ try:
+ return IPv4Address(ip_address_str) in IPv4Network(network_cidr_str)
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_contains_iprange(network_cidr, range_start, range_end):
+ """Check if an IP network cointains a given range"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ range_start_str = unicode(range_start)
+ range_end_str = unicode(range_end)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ range_start_str = str(range_start)
+ range_end_str = str(range_end)
+ try:
+ ipnet = IPv4Network(network_cidr_str)
+ return (IPv4Address(range_start_str) in ipnet
+ and IPv4Address(range_end_str) in ipnet)
+ except ValueError as ex:
+ logging.error(network_cidr_str + " is not a valid network address")
+ raise
+
+
+def filter_ipnet_range_size(network_cidr, range_start, range_end):
+ """Get the size of an IP range between two IP addresses"""
+ try:
+ network_cidr_str = unicode(network_cidr)
+ range_start_str = unicode(range_start)
+ range_end_str = unicode(range_end)
+ except NameError as ex:
+ network_cidr_str = str(network_cidr)
+ range_start_str = str(range_start)
+ range_end_str = str(range_end)
+ try:
+ ipnet = IPv4Network(network_cidr_str)
+ ip1 = IPv4Address(range_start_str)
+ ip2 = IPv4Address(range_end_str)
+
+ if ip1 in ipnet and ip2 in ipnet:
+ index1 = list(ipnet.hosts()).index(ip1)
+ index2 = list(ipnet.hosts()).index(ip2)
+ ip_range_size = index2 - index1 + 1
+ return ip_range_size
+ else:
+ raise ValueError
+ except ValueError as ex:
+ logging.error(range_start_str + " and " + range_end_str +
+ " are not valid IP addresses for range inside " +
+ network_cidr_str)
+ raise
+
+
+# This filter is too simple and does not take network mask into account.
+# TODO Deprecate for filter_ipnet_hostaddr
+def filter_ipaddr_index(base_address, index):
+ """Return IP address in given network at given index"""
+ try:
+ base_address_str = unicode(base_address)
+ except NameError as ex:
+ base_address_str = str(base_address)
+ return IPv4Address(base_address_str) + int(index)
+
+
+# TODO deprecate for filter_ipnet_netmask
+def filter_netmask(prefix):
+ """Get netmask from prefix length integer"""
+ try:
+ prefix_str = unicode(prefix)
+ except NameError as ex:
+ prefix_str = str(prefix)
+ return IPv4Network("1.0.0.0/"+prefix_str).netmask
diff --git a/config/utils/generate_config.py b/config/utils/generate_config.py
index b2b52f0b..f45f7888 100755
--- a/config/utils/generate_config.py
+++ b/config/utils/generate_config.py
@@ -7,89 +7,76 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-"""This module does blah blah."""
+"""Generate configuration from PDF/IDF and jinja2 installer template"""
+
import argparse
-import ipaddress
import logging
-import os
+from os.path import abspath, exists, isfile, split
+from subprocess import CalledProcessError, check_output
+import gen_config_lib
import yaml
from jinja2 import Environment, FileSystemLoader
-from subprocess import CalledProcessError, check_output
+
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--yaml", "-y", type=str, required=True)
-PARSER.add_argument("--jinja2", "-j", type=str, required=True)
+PARSER.add_argument("--jinja2", "-j", type=str, required=True, action='append')
+PARSER.add_argument("--includesdir", "-i", action='append', default=['/'])
+PARSER.add_argument("--batch", "-b", action='store_true')
+PARSER.add_argument("--verbose", "-v", action='count')
ARGS = PARSER.parse_args()
-# Processor architecture vs DPKG architecture mapping
-DPKG_ARCH_TABLE = {
- 'aarch64': 'arm64',
- 'x86_64': 'amd64',
-}
-ARCH_DPKG_TABLE = dict(zip(DPKG_ARCH_TABLE.values(), DPKG_ARCH_TABLE.keys()))
+LOADER = yaml.CSafeLoader if yaml.__with_libyaml__ else yaml.SafeLoader
+ARGS.jinja2 = [abspath(x) for x in ARGS.jinja2]
-# Custom filter to allow simple IP address operations returning
-# a new address from an upper or lower (negative) index
-def ipaddr_index(base_address, index):
- """Return IP address in given network at given index"""
- try:
- base_address_str = unicode(base_address)
- #pylint: disable=unused-variable
- except NameError as ex:
- base_address_str = str(base_address)
- return ipaddress.ip_address(base_address_str) + int(index)
+logging.basicConfig()
+LOGGER = logging.getLogger('generate_config')
+if ARGS.verbose:
+ LOGGER.setLevel(logging.INFO)
-# Custom filter to transform a prefix netmask to IP address format netmask
-def netmask(prefix):
- """Get netmask from prefix length integer"""
- try:
- prefix_str = unicode(prefix)
- except NameError as ex:
- prefix_str = str(prefix)
- return ipaddress.IPv4Network("1.0.0.0/"+prefix_str).netmask
-
-# Custom filter to convert between processor architecture
-# (as reported by $(uname -m)) and DPKG-style architecture
-def dpkg_arch(arch, to_dpkg=True):
- """Return DPKG-compatible from processor arch and vice-versa"""
- if to_dpkg:
- return DPKG_ARCH_TABLE[arch]
- else:
- return ARCH_DPKG_TABLE[arch]
-
-ENV = Environment(loader=FileSystemLoader(os.path.dirname(ARGS.jinja2)))
-ENV.filters['ipaddr_index'] = ipaddr_index
-ENV.filters['netmask'] = netmask
-ENV.filters['dpkg_arch'] = dpkg_arch
+ENV = Environment(
+ loader=FileSystemLoader(ARGS.includesdir),
+ extensions=['jinja2.ext.do']
+)
+gen_config_lib.load_custom_filters(ENV)
# Run `eyaml decrypt` on the whole file, but only if PDF data is encrypted
# Note: eyaml return code is 0 even if keys are not available
try:
- if os.path.isfile(ARGS.yaml) and 'ENC[PKCS7' in open(ARGS.yaml).read():
- DICT = yaml.safe_load(check_output(['eyaml', 'decrypt',
- '-f', ARGS.yaml]))
+ if isfile(ARGS.yaml) and 'ENC[PKCS7' in open(ARGS.yaml).read():
+ DICT = yaml.load(check_output(['eyaml', 'decrypt',
+ '-f', ARGS.yaml]), Loader=LOADER)
except CalledProcessError as ex:
- logging.error('eyaml decryption failed! Fallback to raw data.')
+ LOGGER.error('eyaml decryption failed! Fallback to raw data.')
except OSError as ex:
- logging.warn('eyaml not found, skipping decryption. Fallback to raw data.')
+ LOGGER.warn('eyaml not found, skipping decryption. Fallback to raw data.')
try:
DICT['details']
except (NameError, TypeError) as ex:
with open(ARGS.yaml) as _:
- DICT = yaml.safe_load(_)
+ DICT = yaml.load(_, Loader=LOADER)
# If an installer descriptor file (IDF) exists, include it (temporary)
-IDF_PATH = '/idf-'.join(os.path.split(ARGS.yaml))
-if os.path.exists(IDF_PATH):
+IDF_PATH = '/idf-'.join(split(ARGS.yaml))
+if exists(IDF_PATH):
with open(IDF_PATH) as _:
- IDF = yaml.safe_load(_)
+ IDF = yaml.load(_, Loader=LOADER)
DICT['idf'] = IDF['idf']
# Print dictionary generated from yaml (uncomment for debug)
# print(DICT)
-# Render template and print generated conf to console
-TEMPLATE = ENV.get_template(os.path.basename(ARGS.jinja2))
-
-#pylint: disable=superfluous-parens
-print(TEMPLATE.render(conf=DICT))
+for _j2 in ARGS.jinja2:
+ TEMPLATE = ENV.get_template(_j2)
+ OUTPUT = TEMPLATE.render(conf=DICT)
+ # Render template and write generated conf to file or stdout
+ if ARGS.batch:
+ if _j2.endswith('.j2'):
+ LOGGER.info('Parsing {}'.format(_j2))
+ with open(_j2[:-3], 'w') as _:
+ _.write(OUTPUT)
+ else:
+ LOGGER.warn('Skipping {}, name does not end in ".j2"'.format(_j2))
+ else:
+ # pylint: disable=superfluous-parens
+ print(OUTPUT)
diff --git a/config/utils/validate_schema.py b/config/utils/validate_schema.py
index 42f475d8..1676e15d 100755
--- a/config/utils/validate_schema.py
+++ b/config/utils/validate_schema.py
@@ -11,21 +11,20 @@ import argparse
import jsonschema
import yaml
+
PARSER = argparse.ArgumentParser()
PARSER.add_argument("--yaml", "-y", type=str, required=True)
PARSER.add_argument("--schema", "-s", type=str, required=True)
ARGS = PARSER.parse_args()
+LOADER = yaml.CSafeLoader if yaml.__with_libyaml__ else yaml.SafeLoader
with open(ARGS.yaml) as _:
- _DICT = yaml.safe_load(_)
+ _DICT = yaml.load(_, Loader=LOADER)
with open(ARGS.schema) as _:
- _SCHEMA = yaml.safe_load(_)
+ _SCHEMA = yaml.load(_, Loader=LOADER)
+
-# Draft 4 (latest supported by py-jsonschema) does not support value-based
-# decisions properly, see related github issue:
-# https://github.com/json-schema-org/json-schema-spec/issues/64
-# Workaround: build 'version_x.y: true' on the fly based on 'version: x.y'
def schema_version_workaround(node):
"""Traverse nested dictionaries and handle 'version' key where found."""
if 'version' in node:
@@ -33,9 +32,14 @@ def schema_version_workaround(node):
for item in node.items():
if type(item) is dict:
schema_version_workaround(item)
+
+# Draft 4 (latest supported by py-jsonschema) does not support value-based
+# decisions properly, see related github issue:
+# https://github.com/json-schema-org/json-schema-spec/issues/64
+# Workaround: build 'version_x.y: true' on the fly based on 'version: x.y'
schema_version_workaround(_DICT)
if 'idf' in _DICT:
- schema_version_workaround(_DICT['idf'])
+ schema_version_workaround(_DICT['idf'])
_VALIDATOR = jsonschema.Draft4Validator(_SCHEMA)
for error in _VALIDATOR.iter_errors(_DICT):
diff --git a/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png b/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png
new file mode 100644
index 00000000..8ac99508
--- /dev/null
+++ b/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-FDIO.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png b/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png
new file mode 100644
index 00000000..15f3893d
--- /dev/null
+++ b/docs/release/scenario-lifecycle/From OS-BASIC to NOSDN-OVS.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/ODL Generic Scenarios Evolution.png b/docs/release/scenario-lifecycle/ODL Generic Scenarios Evolution.png
new file mode 100644
index 00000000..6f2e0a44
--- /dev/null
+++ b/docs/release/scenario-lifecycle/ODL Generic Scenarios Evolution.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/create-sdf.png b/docs/release/scenario-lifecycle/create-sdf.png
new file mode 100644
index 00000000..c8a44ba8
--- /dev/null
+++ b/docs/release/scenario-lifecycle/create-sdf.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/creating-scenarios.rst b/docs/release/scenario-lifecycle/creating-scenarios.rst
new file mode 100644
index 00000000..dbff9c18
--- /dev/null
+++ b/docs/release/scenario-lifecycle/creating-scenarios.rst
@@ -0,0 +1,104 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Creating Scenarios
+--------------------
+
+General
+^^^^^^^^^
+
+A new scenario needs to be created, when a new combination of upstream
+components or features shall be supported, that cannot be provided with the
+existing scenarios in parallel to their existing features.
+
+Typically new scenarios are created as children of existing scenarios.
+They start as specific scenario and as they mature, they either merge back
+their features to the parent or promote to a generic scenario.
+
+Scenario Owners
+^^^^^^^^^^^^^^^^
+
+Each scenario must have an "owner". Scenario owners have the following responsibilities:
+
+* The scenario owner is responsible for the contents and usage of the scenario.
+* He shall define the contents for the scenario deployment:
+
+ * The components and their versions that need to be deployed
+ * Options for the deployment of such components, e.g. settings, optional features, ..
+ * Which installers to use
+ * Deployment options (HA, NOHA, hardware types, ..)
+
+* He shall define the usage of the scenario in the development process:
+
+ * Initiate integration to CI
+ * Define which testcases to run
+ * Applies that the scenario joins a release
+
+* The owner maintains the Scenario Descriptor File (SDF)
+* Drives for the scenario be supported by more installers
+
+The scenario owner of a specific scenario typically comes from the feature project
+that develops the features introduced by the scenario.
+
+The scenario owner of a generic scenario will need to drive more integration tasks than
+feature development. Thus he typically will come from a project with a broader scope
+than a single feature, e.g. a testing project.
+The scenario owner of a generic scenario needs to cover issues of all installers, so
+only in exceptional cases he will come from an installer project.
+
+Creating Generic Scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Generic scenarios provide stable and mature deployments of an OPNFV release. Therefore
+it is important to have generic scenarios in place that provide the main capabilities
+needed for NFV environments. On the other hand the number of generic scenarios needs
+to be limited because of resources.
+
+* Creation of a new generic scenario needs TSC consensus.
+* Typically the generic scenario is created by promoting an existing specific
+ scenario. Thus the only the additional information needs to be provided.
+* The scenario owner needs to verify that the scenario fulfills the above requirements.
+* Since specific scenarios typically are owned by the project who have initiated it,
+ and generic scenarios provide a much broader set of features, in many cases a
+ change of owner is appropriate. In most cases it will be appropriate to assign
+ a testing expert as scenario owner.
+
+Creating Specific Scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+As already stated, typically specific scenarios are created as children of existing
+scenarios. The parent can be a generic or a specific scenario.
+
+Creation of specific scenarios shall be very easy and can be done any time. However,
+support might be low priority during a final release preparation, e.g. after a MS6.
+
+* The PTL of the project developing the feature(s) or integrating a component etc can
+ request the scenario (tbd from whom: CI or release manager, no need for TSC)
+* The PTL shall provide some justification why a new scenario is needed.
+ It will be approptiate to discuss that justification in the weekly technical
+ discussion meeting.
+* The PTL should have prepared that by finding support from one of the installers.
+* The PTL should explain from which "parent scenario" (see below) the work will start,
+ and what are the planned additions.
+* The PTL shall assign a unique name. Naming rules will be set by TSC.
+* The PTL shall provide some time schedule plans when the scenario wants to join
+ a release, when he expects the scenario merge to other scenarios, and he expects
+ the features may be made available in generic scenarios.
+ A scenario can join a release at the MS0 after its creation.
+* The PTL should explain the infrastructure requirements and clarify that sufficient
+ resources are available for the scenario.
+* The PTL shall assign a scenario owner.
+* The scenario owner shall maintain the scenario descriptor file according to the
+ template.
+* The scenario owner shall drive the necessary discussions with installers and testing
+ teams to get their support.
+* In case the scenario needs new keywords in the SDF, the scenario owner shall discuss
+ those with the installer teams and CI.
+* The scenario owner shall initiate the scenario be integrated in CI and
+ participate in releases.
+* When the scenario joins a release this needs to be done in time for the relevant
+ milestones.
+
+
diff --git a/docs/release/scenario-lifecycle/current-status.rst b/docs/release/scenario-lifecycle/current-status.rst
new file mode 100644
index 00000000..c8da13a5
--- /dev/null
+++ b/docs/release/scenario-lifecycle/current-status.rst
@@ -0,0 +1,75 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Current Status
+---------------
+
+This chapter summarizes the scenario analysis to provide some background.
+It also defines the way to introduce the scenario processes.
+
+Arno
+^^^^^^^^
+
+In Arno release, the scenario concept was not created yet.
+Looking back, we can say we had one scenario with OpenStack, ODL and KVM,
+that could be deployed in two ways, by the two installers available in Arno.
+
+Brahmaputra
+^^^^^^^^^^^^^^^^
+
+In Brahmaputra, we added options for SDN (ONOS, OCL) and some optional
+features (sfc, sdnvpn, kvm, l3 enabled ODL).
+Thus we had 9 scenarios, some of them to be deployed with 2 installers,
+that planned to participate in the release. Not all of them succeeded.
+
+Colorado
+^^^^^^^^^^^^
+
+In Colorado more components and features were added to a total of 17
+combinations of components and features. Some were supported by one
+of the four installers, others by multiple installers. In addition HA
+and NOHA options were defined.
+This lead to 28 combinations that planned to participate.
+
+Danube
+^^^^^^^^^^
+
+In Danube the number of combinations of components and features increased
+to 24, but since installer support increased and more scenarios planned
+to provide HA and NOHA options, the number of combinations was 54.
+
+In addition to that some scenarios were defined later in during development
+and some scenarios worked on ARM support.
+
+This created the need to better understand relationships and
+incompatibilities of the scenarios to drive for a manageable process
+for scenarios.
+
+As a result the relationship between the scenarios can be
+visualized by a scenario tree.
+
+.. figure:: scenario-tree-danube.png
+
+The process for generic and specific scenarios is not in place for the
+Danube release yet. But the different branches of the scenario tree
+provide the candidates to define generic scenario during the timeframe
+of the next release.
+
+Euphrates
+^^^^^^^^^^
+
+tbd: statistics on Euphrates Scenarios
+
+During Euphrates timeframe, dynamic POD allocation is introduced in CI.
+This is a prerequisite to make use of the SDF in the CI pipeline.
+Therefore in this timeframe, scenario processes are introduced only in
+a documentation way and as support for release management.
+
+Also the definition of generic scenarios can be done.
+
+
+
+
+
diff --git a/docs/release/scenario-lifecycle/deployment-options.rst b/docs/release/scenario-lifecycle/deployment-options.rst
new file mode 100644
index 00000000..2c0a3429
--- /dev/null
+++ b/docs/release/scenario-lifecycle/deployment-options.rst
@@ -0,0 +1,128 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Deployment Options
+-------------------
+
+What are deployment options?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. Editors note: Some installers call it settings. Prefer options, because it allows
+.. cases with multiple options.
+
+During the analysis of scenario definitions in Colorado and Danube releases, it became
+visible, that HA and NOHA deployment of otherwise identical scenarios shouldn't be
+called different scenarios.
+
+This understanding leads to the definition of another kind of attributes
+in scenario definitions. Many scenarios can be deployed in different ways:
+
+* **HA** configuration of OpenStack modules (that is redundancy using multiple
+ controllers running OpenStack services) versus NOHA with only a single controller
+ running a single instance of each OpenStack service
+* Some scenarios can be deployed on intel and on ARM **hardware**.
+* We can see the **installation tools** in the same way. Independent of the installer
+ that was used for the deployment of a scenario, the same functionality will be
+ provided and we can run the same testcases.
+
+Please note that a scenario can support multiple deployment options. And a scenario
+definition must specify at least one option of each type.
+
+In future there will be more deployment options, e.g. redundancy models or other
+clustering options of SDN controllers, or upscaling compute or control nodes.
+
+CI Pipeline needs to test all configuration options of a scenario.
+
+* Development cycles (verify-jobs, daily, weekly) don‘t need to run all
+ options each time
+* Release testing must cover all those combinations of configuration options that
+ will be part of the release. Typically the HA configurations are released on
+ bare metal with the allowed hardware options and all installers that can deploy
+ those. Release of an NOHA option should be an exception, e.g. for a scenarios
+ that are not mature yet.
+* Virtual deployments are not mentioned here. All scenarios should allow virtual
+ deployment where applicable.
+ But in release testing, bare metal deployment will be necessary.
+ CI will use virtual deployments as much as appropriate for resource reasons.
+
+
+Deployment options or new scenarios
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In general we can say that a different scenario is needed when the set of components
+is changed (or in some cases a general deploy-time configuration of a component). If
+we deploy the same components in a different way, we can define this via deployment
+options.
+
+**Examples**
+
+* Deploying different SDN controller or data plane (OVS/FD.IO) requires different
+ scenario.
+* HA/NOHA will deploy the same components on different number of nodes, so it is a
+ deployment option.
+* Different hardware types should not lead to new scenarios. Typically the same
+ scenario can be deployed on multiple hardware.
+
+
+HA and NOHA
+^^^^^^^^^^^^^
+
+Both, HA and NOHA options of a scenario are important.
+
+* HA deployment is important to be released in major OPNFV releases, because
+ telco deployments typically have strong requirements on availability.
+* NOHA deployments require less resources and are sufficient for many use cases.
+ For instance sandbox testing can be done easier and also automatic verification
+ in the CI pipeline can make use of it.
+* Generic scenarios shall support the HA and NOHA option.
+* Specific scenarios can focus on the NOHA option if their features are independent
+ from the controller redundancy. But before merging with generic scenarios, they
+ should provide both options.
+
+
+Hardware types
+^^^^^^^^^^^^^^^^^
+
+In its first releases, OPNFV could be deployed on Intel hardware only. Later, support
+for ARM hardware was added and now 5 scenarios can already be deployed on both.
+
+
+Virtual deployment
+^^^^^^^^^^^^^^^^^^^^^^
+
+Many, but not all scenarios can be deployed on virtual PODs. Therefore the scenario
+definition shall specify whether virtual deployment is possible.
+
+Typically a virtual HA deployment shall look very much the same as a bare-metal HA
+deployment, that is the distribution of modules on nodes/VMs is similar. But there
+might be cases where there are differences. Thus, the scenario specification needs
+to provide the data for each separately.
+
+
+Deployment tools
+^^^^^^^^^^^^^^^^^^^
+
+Deployment tools (installers) are in a very similar relation to the scenarios.
+Each scenario can be deployed by one or more installer. Thus we can specify the
+installers for a scenario as a deployment option.
+
+However, the installers need additional detailed information for the deployment.
+Every installer may not support the same HA, hardware, virtualization options,
+or same distribution of modules. Each deployment may look slightly different
+per installer.
+
+The scenario definition needs to provide such information in a way it can be easily
+consumed by the installers.
+
+
+
+Other deployment options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This set of deployment options is based on what is required by Danube scenarios.
+Future releases will most likely introduce additional deployment options.
+
+
+
diff --git a/docs/release/scenario-lifecycle/feature-compatibility-nosdn.png b/docs/release/scenario-lifecycle/feature-compatibility-nosdn.png
new file mode 100644
index 00000000..09520aa1
--- /dev/null
+++ b/docs/release/scenario-lifecycle/feature-compatibility-nosdn.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/feature-compatibility-odl.png b/docs/release/scenario-lifecycle/feature-compatibility-odl.png
new file mode 100644
index 00000000..600082a6
--- /dev/null
+++ b/docs/release/scenario-lifecycle/feature-compatibility-odl.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/generic-scenarios.rst b/docs/release/scenario-lifecycle/generic-scenarios.rst
new file mode 100644
index 00000000..f159c0c9
--- /dev/null
+++ b/docs/release/scenario-lifecycle/generic-scenarios.rst
@@ -0,0 +1,53 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Generic Scenarios
+------------------
+
+Generic scenarios provide a stable environment for users who want to base their
+products on them.
+
+* Generic scenarios provide a basic combination of upstream components together
+ with the superset of possible mature features that can be deployed on them.
+* Generic scenarios should be supported by all installers.
+* All generic scenarios in a release should have the same common major versions
+ of the included upstream components.
+ These upstream versions can then be seen as the upstream versions for the
+ release. E.g. that way we can say: “OPNFV xxx contains OpenStack abc,
+ ODL def, ONOS ghi, OVS jkl“.
+ But most installers cannot directly reference any
+ upstream version. This may lead to minor differences.
+ Nevertheless features and test cases require all installers using the same
+ major versions.
+* Generic scenarios should use stable sources
+ and lock the versions before the release by either pointing to a tag or sha1.
+ According to the LF badging program it should be possible to reproduce
+ the release from source again.
+ Thus the upstream repos should be in safe locations.
+ Also only tagged source versions should be used for the release, so the
+ release can be reproduced identically for different purposes such as
+ reproducing a baug reported by users and issuing the fix appropriately,
+ even after the upstream project has applied patches.
+ .. Editors note: There is discussion ongoing in INFRA and SEC working groups how
+ .. to realize this. Thus the description is still a bit vague. Details will be
+ .. added later either here or in some INFRA document.
+* Generic scenarios should be stable and mature. Therefore they will be tested more
+ thoroughly and run special release testing so a high level of stability can be
+ provided.
+* Generic scenarios will live through many OPNFV releases.
+* More resources will be allocated to maintaining generic scenarios and they will
+ have priority for CI resources.
+ .. Editors note: Discussion ongoing in INFRA about toolchain issues.
+
+Note: in some cases it might be difficult for an installer to support all generic
+scenarios immediately. In this case an exception can be defined, but the installer
+has to provide a plan how to achieve support for all generic scenarios.
+
+Note: in some cases, upstream projects don‘t have proper CI process with
+tagged stable versions. Also some installers‘ way of working doesn‘t allow
+selecting the repo and tag. Thus a stepwise approach will be necessary to
+fulfill this requirement.
+
+
diff --git a/docs/release/scenario-lifecycle/index.rst b/docs/release/scenario-lifecycle/index.rst
new file mode 100644
index 00000000..c1a9a528
--- /dev/null
+++ b/docs/release/scenario-lifecycle/index.rst
@@ -0,0 +1,24 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+**********************
+Scenario Lifecycle
+**********************
+
+Contents:
+
+.. toctree::
+ :numbered:
+ :maxdepth: 4
+
+ scenario-overview.rst
+ generic-scenarios.rst
+ specific-scenarios.rst
+ parent-child-relations.rst
+ creating-scenarios.rst
+ deployment-options.rst
+ mano-scenarios.rst
+ current-status.rst
+ scenario-descriptor-files.rst
+ workflows.rst
diff --git a/docs/release/scenario-lifecycle/mano-scenarios.rst b/docs/release/scenario-lifecycle/mano-scenarios.rst
new file mode 100644
index 00000000..0eee1431
--- /dev/null
+++ b/docs/release/scenario-lifecycle/mano-scenarios.rst
@@ -0,0 +1,31 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+MANO Scenarios
+---------------
+
+Since OPNFV organizes all deployments using scenarios, also MANO components need
+to be covered by scenarios.
+
+On the other side all NFVI/VIM level scenarios need to be orchestrated using a
+set of components from the NFVO and VNFM layer.
+
+The idea here is therefore to specify for a MANO scenario:
+
+* The MANO components to deploy
+* A list of supported NFVI/VIM level scenarios that can be orchestrated
+ using this MANO scenario.
+
+The MANO test cases will define the VNFs to use.
+
+MANO scenarios will have more work to do if they require new nodes to be deployed on.
+They should include this aspect in their resource planning/requests and contact
+Infra/Pharos in case that a change of the Pharos spec is needed and new PODs need
+to be made available based on the amended spec.
+
+More details need to be investigated as we gain experience with the MANO scenarios
+
+
+
diff --git a/docs/release/scenario-lifecycle/parent-child-relations.rst b/docs/release/scenario-lifecycle/parent-child-relations.rst
new file mode 100644
index 00000000..ca156190
--- /dev/null
+++ b/docs/release/scenario-lifecycle/parent-child-relations.rst
@@ -0,0 +1,62 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Parent - Child Relations
+-------------------------
+
+In many cases, development adds a feature to an existing scenario by adding additional
+components. This is called creating a child scenario from a parent.
+
+* Parent scenarios typically are more stable than children.
+* Children should plan to merge their feature back to the parent.
+* Merge back will often add components to the parent.
+
+.. figure:: parent-child.png
+
+* Child scenarios can be part of releases.
+* Child scenarios should merge back to their parent after 2 releases.
+* If a child scenario lives through several releases, it might be desirable
+ to “rebase/cherrypick” a child scenario to follow changes in the parent scenario.
+* Child scenarios typically support a smaller number of deployment options than
+ their parent
+
+Child scenarios are specific scenarios. Parent scenarios can be generic or specific
+scenarios.
+
+Child scenarios can be created any time. If they want to join a release, they have
+to be created before MS0 of that release.
+
+
+Siblings
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In some cases it could make more sense to create a sibling rather than a child
+(e.g. if expected that merging back to parent will be difficult).
+In other words, the content of a child scenario will be incompatible with content
+of the parent scenario.
+In that case, the child scenario should rather become a new branch instead of
+merging back to the parent.
+
+.. figure:: sibling.png
+
+Typically the sibling uses alternate components/solutions than the parent – in
+long term it might evolve into a new generic scenario, that is a new branch
+in the scenario tree.
+
+Creation of the sibling shall not be gated. It should be covered in the scope of
+an approved project, so there cannot be too big surprises.
+
+But at a certain time the new scenario will want to change its status from a
+specific scenario to a generic scenario. This move will need TSC approval.
+For the application, the scenario owner shall demonstrate that the scenario
+fulfills the requirements of a generic scenario (see later).
+
+Examples: SDN controller options, Container technologies, data plane solutions,
+MANO solutions.
+
+Please note that from time to time, the TSC will need to review the
+set of generic scenarios and "branches" in the scenario tree.
+
+
diff --git a/docs/release/scenario-lifecycle/parent-child.png b/docs/release/scenario-lifecycle/parent-child.png
new file mode 100644
index 00000000..2f711496
--- /dev/null
+++ b/docs/release/scenario-lifecycle/parent-child.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/pdf-and-sdf.png b/docs/release/scenario-lifecycle/pdf-and-sdf.png
new file mode 100644
index 00000000..729c5a44
--- /dev/null
+++ b/docs/release/scenario-lifecycle/pdf-and-sdf.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/scenario-descriptor-files.rst b/docs/release/scenario-lifecycle/scenario-descriptor-files.rst
new file mode 100644
index 00000000..b6c44f75
--- /dev/null
+++ b/docs/release/scenario-lifecycle/scenario-descriptor-files.rst
@@ -0,0 +1,228 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Scenario Descriptor Files
+----------------------------
+
+What are Scenario Descriptor Files?
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Every scenario is described in its own descriptor file.
+The scenario descriptor file will be used by several parties:
+
+* Installer tools will read from it the list of components to be installed
+ and the configuration (e.g. deployment options and necessary details) to use.
+* The dynamic CI process will read from it the prerequisites of the scenario
+ to select the resource that has the needed capabilities for the deployment.
+ It will also select the installer
+ from the list of supported installers and the other deployment options as
+ supported in their combination.
+
+ The dynamic CI process will provide the installers with the deployment option
+ to use for a particular deployment.
+
+* The scenario owner needs to provide the descriptor file.
+
+ When compiling it the scenario owner typically needs to work together with
+ the installers, so the installers will support the required components and
+ options.
+* The testing framework can read from the scenario descriptor file necessary
+ information to know which features can be tested on the scenario.
+* The scenario descriptor file will also contain some maintenance information
+
+
+Structure of the file
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The scenario descriptor file is a yaml file. The syntax will allow to specify
+additional descriptor files, to make it better readable or structure common
+configurations across multiple scenarios.
+
+The file has following main sections:
+
+* metadata (owner, history, description)
+* list of components (names, versions, submodules)
+* deployment options (HA/NOHA, hardware&virtualization, installers, including
+ possible combinations and necessary details)
+* other prerequisites (e.g. memory requirement more than pharos spec)
+* list of features to be tested
+
+More information to be provided in next version of this document. The file will
+be defined based on the installer-specific files for scenario specification
+used by the 4 installers in Danube release. Thus it will be made sure that the
+information needed by the installers will be covered.
+
+All scenario files will be stored in a central repo, e.g. Octopus. There will
+also be a commented template to help create scenario descriptor files.
+
+
+Metadata
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In Danube timeframe only Fuel installer has some metadata in the descriptor file.
+The new template contains:
+
+* Unique name
+
+ This is a free name, there is a recommendation to take fish for names, matching
+ OPNFV release naming with rivers.
+
+* A free text title
+
+ This should be a short descriptive text telling the main purpose
+
+* A version number for the descriptor file
+
+ Three digits, separated with dots, as used by Fuel in Danube
+
+* Creation date
+* Comment
+
+ The file should contain a clear description of the purpose of the scenario,
+ including the main benefits and major features.
+ If applicable, the parent scenario should be mentioned.
+
+* First OPNFV version to use the scenario
+* Author/Owner
+
+* A list of additional contact persons, e.g. from installers or major components
+
+Components
+^^^^^^^^^^^^^^^^
+
+In this section all components are listed together with their version.
+For some components in addtion submodules can be listed.
+
+More details will be added.
+
+
+Deployment options
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This section will list the supported deployment options. In each category at least
+one option must be supported.
+
+* hardware (cpu) types (intel or ARM)
+* Virtualization (bare-metal or vPOD)
+* availability (HA or NOHA)
+
+ This subsection needs to specify also what does an HA deployment need, e.g.:
+
+::
+
+ availability:
+
+ - type: HA
+ nodes:
+ - name: host1
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host2
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host3
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host4
+ - openstack-compute
+ - ceph-osd
+ - name: host5
+ - openstack-compute
+ - ceph-osd
+ - type: NOHA
+ hosts:
+ - name: host1
+ roles:
+ - openstack-controller
+ - odl
+ - ceph-adm
+ - ceph-mon
+ - name: host2
+ - openstack-compute
+ - ceph-osd
+ - name: host3
+ - openstack-compute
+ - ceph-osd
+
+
+
+* deployment tool (apex, compass, fuel, daisy, joid)
+
+ In the section for each deployment tool, the combinations of the first three
+ options have to be listed, e.g.:
+
+::
+
+ deployment-tools:
+
+ - type: fuel
+ cpu: intel
+ pod: baremetal
+ availability: HA
+ - type: fuel
+ cpu: intel
+ pod: virtual
+ availability: HA
+ - type: fuel
+ cpu: intel
+ pod: virtual
+ availability: NOHA
+
+Please note that this allows easy definition of other availability options
+including scaling and redundant configuration of SDN controllers.
+
+
+Prerequisites
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+This section will list additional prerequisites. Currently there is only
+one case where a scenario has additional prerequisites to the Pharos spec.
+E.g. a component could requires more RAM on the nodes than defined in
+Pharos spec.
+In general it should be preferred to issue such requirements to pharos
+using the pharos change request process, but in some cases in might be
+better to specify additional prerequisites.
+
+Another use case for these prerequisites will be usage of specilized
+hardware, e.g. for acceleration. This needs further study.
+
+The section can be empty or omitted.
+
+
+Testcases
+^^^^^^^^^^^^^^^^
+
+This section will provide information for functest and yardstick to decide
+on the proper test cases for the scenario.
+
+More details will be added.
+
+
+Shared settings
+^^^^^^^^^^^^^^^^
+
+This descriptor file might get quite long and complex. Also some of the settings
+will be shared between several scenarios, e.g. a long OpenStack module list.
+
+Therefore it shall be possible to reference another file like a macro.
+In that case all the file content is included in that place, e.g.:
+
+::
+
+ availability:
+
+ - type: HA
+ file: odl-ha-configuration.yaml
+
+
diff --git a/docs/release/scenario-lifecycle/scenario-overview.rst b/docs/release/scenario-lifecycle/scenario-overview.rst
new file mode 100644
index 00000000..4a7ff7a0
--- /dev/null
+++ b/docs/release/scenario-lifecycle/scenario-overview.rst
@@ -0,0 +1,166 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+.. Scenario Lifecycle
+.. ==========================================
+
+Note: This document is still work in progress.
+
+Overview
+-------------
+
+Problem Statement:
+^^^^^^^^^^^^^^^^^^^
+
+OPNFV provides the NFV reference platform in different variants, where
+each variant is called a "scenario".
+
+OPNFV introduces scenarios in order to provide a way to deploy the stack
+using different combinations of upstream components, or to provide
+different sets of pre-defined configuration options for these
+components.
+
+In some cases a scenario is introduced in order to provide isolation of
+a specific development effort from other ongoing development efforts,
+similar to the purpose of a branch in a code repository.
+
+A certain amount of effort and resources is required in order to include
+a scenario in a release. The number of scenarios has increased over
+time, so it is necessary to identify ways to manage the number of
+scenarios and to avoid that their number grows infinitely. To enable
+this, we have to clearly define how to handle the lifecycle of
+scenarios, i.e. how to create, how to terminate, etc.
+
+
+Scenario types:
+^^^^^^^^^^^^^^^^^^^
+Some OPNFV scenarios have an experimental nature, since they introduce
+new technologies or features that are not yet mature or well integrated
+enough to provide a stable release. Nevertheless there also needs to be
+a way to provide the user with the opportunity to try these new features
+in an OPNFV release context.
+
+Other scenarios are used to provide stable environments for users
+desiring a certain combination of upstream components or interested in
+particular capabilities or use cases.
+
+The new OPNFV scenario lifecycle process proposed herein will support
+this by defining two types of scenarios:
+
+* **Generic scenarios** cover a stable set of common features provided
+by different components and target long-term usage and maintenance of
+the scenario. Only stable versions of upstream components are allowed to
+be deployed in a generic scenario. Across all generic scenarios in a
+given OPNFV release, the same version of a given upstream component
+should be deployed. Creation of generic scenarios and promotion of
+specific to generic scenario requires TSC approval, see section 5.
+Generic scenarios will get priority over specific scenarios in terms of
+maintenance effort and CI resources.
+
+* **Specific scenarios** are needed during development to introduce new
+upstream components or new features. They are typically derived from a
+generic scenario and are intended to bring their features back into the
+parent generic scenario once they are mature enough. It is also possible
+that multiple specific scenarios are merged before bringing them back to
+the parent scenario, for example in order to test and develop the
+integration of two specific features in isolation. Specific scenarios
+can consume unreleased upstream versions or apply midstream patches.
+Creation of specific scenarios is not gated, but if a project intends to
+release a specific scenario, it has to indicate that in its release plan
+at milestone MS1. The scenario itself can be created at any time, by
+means of a simple request by a PTL to the release manager.
+
+OPNFV scenarios are deployed using one of the OPNFV installer tools.
+Deploying a scenario will normally be supported by multiple installers.
+The capabilities provided by the resulting deployments should be
+identical. The set of tests to run and their results should be the same,
+independent of the installer that had been used. Performance or other
+behavioral aspects outside the scope of existing OPNFV tests could be
+different.
+
+
+Parent-child and sibling relations:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When a developer decides to define a new scenario, he typically will
+take one of the existing scenarios and do some changes, such as:
+
+* add additional components
+* change a deploy-time configuration
+* use a component in a more experimental version or with midstream
+patches applied
+
+In this case the already existing scenario is called a "parent" and the
+new scenario would be a "child".
+
+Typically parent scenarios are generic scenarios, but it is possible to
+derive from specific scenarios as well. it is expected that the child
+scenario develops its additions over some time up to a sufficient
+maturity, and then merges back to the parent. This way a continuous
+evolution of the generic scenarios as well as a manageable overall
+number of scenairos is ensured.
+
+In some cases a child scenario will diverge from its parent in a way
+that cannot easily be combined with the parent. Therefore, is is also
+possible to "promote" a scenario from specific to generic. If this is
+foreseeable upfront, the specific scenario can also be derived as a
+sibling rather that child.
+
+Promoting a scenario from specific to generic or creating a new generic
+scenario requires TSC approval. This document defines a process for
+this, see section 5.
+
+
+Scenario deployment options:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Many OPNFV scenarios can be deployed in different variants that do not
+justify creation of separate scenarios. An example would be HA (high
+availability) or non-HA configuration of otherwise identical scenarios.
+HA configurations deploy some components according to a redundancy
+model. Deployment options will also be used if the same scenario can be
+deployed on multiple types of hardware, i.e. Intel and ARM.
+
+In these cases multiple deployment options are defined for the same
+scenario. The set of distinguishable deployment option types (e.g.
+redundancy, processor architecture, etc.) will be pre-determined and
+each scenario will have to define at least one option for each option
+type.
+
+It is emphasized that virtual deployments vs. bare-metal deployments are
+intentionally not considered as deployment options. This should be a
+transparent feature of the installer based on the same scenario
+definition.
+
+For generic scenarios, there are certain expectations on the set of
+supported deployment options, e.g. a generic scenario should support at
+least an HA deployment and preferably both HA and non-HA.
+
+
+Scenario descriptor file:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Every scenario will be described in a scenario descriptor yaml file.
+This file shall contain all the necessary information for different users, such
+as the installers (which components to deploy etc.),
+the ci process (resource requirements in order to identify the right pod, machines, etc.).
+
+The scenario descriptor file will also document which installer
+can be used for a scenario and how the CI process can trigger automatic deployment
+for a scenario via one of the supported installers.
+
+
+MANO scenarios:
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+In early OPNFV releases, scenarios covered components of the infrastructure,
+that is NFVI and VIM.
+With the introduction of MANO, an additional dimension for scenarios is needed.
+The same MANO components need to be used together with each of the infrastructure
+scenarios. Thus MANO scenarios will define the MANO components and a list of
+infrastructure scenarios to work with. Please note that MANO scenarios follow
+the same lifecycle and rules for generic and specific scenarios like the
+infrastructure scenarios.
+
diff --git a/docs/release/scenario-lifecycle/scenario-tree+idea.png b/docs/release/scenario-lifecycle/scenario-tree+idea.png
new file mode 100644
index 00000000..b6d4d8ac
--- /dev/null
+++ b/docs/release/scenario-lifecycle/scenario-tree+idea.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/scenario-tree-danube.png b/docs/release/scenario-lifecycle/scenario-tree-danube.png
new file mode 100644
index 00000000..54c111e1
--- /dev/null
+++ b/docs/release/scenario-lifecycle/scenario-tree-danube.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/scenario-tree.png b/docs/release/scenario-lifecycle/scenario-tree.png
new file mode 100644
index 00000000..619b5a34
--- /dev/null
+++ b/docs/release/scenario-lifecycle/scenario-tree.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/sibling.png b/docs/release/scenario-lifecycle/sibling.png
new file mode 100644
index 00000000..82d48052
--- /dev/null
+++ b/docs/release/scenario-lifecycle/sibling.png
Binary files differ
diff --git a/docs/release/scenario-lifecycle/specific-scenarios.rst b/docs/release/scenario-lifecycle/specific-scenarios.rst
new file mode 100644
index 00000000..5f426e7d
--- /dev/null
+++ b/docs/release/scenario-lifecycle/specific-scenarios.rst
@@ -0,0 +1,34 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Specific Scenarios
+------------------
+
+Specific scenarios are used for OPNFV development and help to isolate a path of development.
+
+* Specific scenarios typically focus on a feature or topic.
+* Specific scenarios allow to advance in development for their main feature without
+ de-stabilizing other features.
+* Specific scenarios provide additional flexibility in their handling to allow the
+ development be agile.
+* Specific scenarios can use new version of their main upstream component or even
+ apply midstream patches during OPNFV deployment, i.e. the deployable artifact
+ is created via cross community CI or even only in OPNFV and not upstream.
+* Specific scenarios should have a limited life time. After a few releases, the feature
+ development should have matured and the feature made available different configurations
+ if possible. Typically the scenario then should be merged with other scenarios, best
+ with generic scenarios.
+* Normally specific scenarios will be released within the major OPNFV releases. But
+ they don't need to fulfill maturity requirements (stable upstream versions and repos,
+ stability testing), and can deviate in the used upstream versions.
+* In exceptional cases we might release a specific scenario independently, in case there
+ is a need. Thus specific scenarios provide a way to a more DevOps-like process.
+* Specific scenarios will likely have a shorter support period after release as they are of
+ interest to a smaller user community vs generic scenarios.
+* They will be granted less CI resources than generic scenarios, e.g. for periodic
+ CI jobs.
+* We may need to prioritize resources post-release for maintenance / regression testing.
+
+
diff --git a/docs/release/scenario-lifecycle/workflows.rst b/docs/release/scenario-lifecycle/workflows.rst
new file mode 100644
index 00000000..c07b0f70
--- /dev/null
+++ b/docs/release/scenario-lifecycle/workflows.rst
@@ -0,0 +1,70 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2017 OPNFV Ulrich Kleber (Huawei)
+
+
+Workflows
+----------
+
+Summary
+^^^^^^^^
+
+The general principle can be summarized by the following diagram:
+
+.. figure:: pdf-and-sdf.png
+
+Workflows for Scenario Owners
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The scenario owner creates the descriptor file based on the template.
+
+.. figure:: create-sdf.png
+
+Create new scenario from scratch
++++++++++++++++++++++++++++++++++++++++++++++++++
+
+This workflow will be exceptional.
+Most scenarios can easier start as children of an existing scenario;
+thus the author (scenario owner) can derive the SDF from the parent.
+But scenarios introducing new technologies affecting the whole architecture,
+e.g.containers, or higher level scenarios (e.g.MANO and Multisite which
+reference existing scenarios) can start without a parent.
+
+The following steps need to be done:
+
+ #. (Project team) Define set of components that need to be deployed
+ #. (Project) Find installers that can deploy the components
+ #. (Project&installer&CI) Agree on new keywords in SDF (e.g. component, feature name)
+ #. (Project) Assign owner
+ #. (Owner) Edit SDF, submit to octopus repo
+ #. (Owner) register scenario to participate in release as appropriate
+ #. (Owner&CI-team) Adapt jenkins triggers, so new scenario can be scheduled in valid installer/POD/Options combination(s).
+ #. (Installer-team) test deployment of components
+ #. (Project-team) Define test cases; register in test db
+
+Create child scenario by adding feature to existing scenario
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Add additional installer to a specific scenario
+++++++++++++++++++++++++++++++++++++++++++++++++
+
+Add additional hardware or availability option to a scenario
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Merge child scenario back to parent
+++++++++++++++++++++++++++++++++++++
+
+Promote specific scenario to generic scenario
+++++++++++++++++++++++++++++++++++++++++++++++
+
+Introduce SDF for existing Danube/Euphrates scenarios
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+
+Workflows for Installers
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Workflows for CI Tools
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+
diff --git a/labs/arm/idf-pod6.yaml b/labs/arm/idf-pod6.yaml
index 80e49d1f..d9f9e735 100644
--- a/labs/arm/idf-pod6.yaml
+++ b/labs/arm/idf-pod6.yaml
@@ -30,7 +30,7 @@ idf:
network: 10.2.0.0
mask: 24
private:
- interface: 1
+ interface: 2
vlan: 2325
network: 10.1.0.0
mask: 24
@@ -81,3 +81,29 @@ idf:
busaddr: *busaddr
- interfaces: *interfaces
busaddr: *busaddr
+ reclass:
+ node:
+ - compute_params: &compute_params
+ common:
+ nova_cpu_pinning: &nova_cpu_pinning_common "1,2,3,4,5,6,7,8,9,10,11,12"
+ compute_hugepages_size: 2M
+ compute_hugepages_count: 8192
+ compute_hugepages_mount: /mnt/hugepages_2M
+ compute_kernel_isolcpu: *nova_cpu_pinning_common
+ dpdk:
+ nova_cpu_pinning: "5-7,13-15"
+ compute_hugepages_size: 2M
+ compute_hugepages_count: 8192
+ compute_hugepages_mount: /mnt/hugepages_2M
+ compute_kernel_isolcpu: 2,3,5,6,7,10,11,13,14,15
+ compute_dpdk_driver: vfio
+ compute_ovs_pmd_cpu_mask: "0xc04"
+ compute_ovs_dpdk_socket_mem: "2048"
+ compute_ovs_dpdk_lcore_mask: "0x8"
+ compute_ovs_memory_channels: "2"
+ dpdk0_driver: vfio-pci
+ dpdk0_n_rxq: 2
+ - compute_params: *compute_params
+ - compute_params: *compute_params
+ - compute_params: *compute_params
+ - compute_params: *compute_params
diff --git a/labs/lf/idf-pod4.yaml b/labs/lf/idf-pod4.yaml
new file mode 100644
index 00000000..09939d2b
--- /dev/null
+++ b/labs/lf/idf-pod4.yaml
@@ -0,0 +1,74 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 installer descriptor file ###
+
+idf:
+ version: 0.1
+ net_config:
+ oob:
+ interface: 0
+ ip-range: 172.30.8.83-172.30.8.88
+ vlan: 410
+ admin:
+ interface: 0
+ vlan: native
+ network: 192.168.12.0
+ mask: 24
+ mgmt:
+ interface: 1
+ vlan: 450
+ network: 192.168.3.0
+ mask: 24
+ storage:
+ interface: 2
+ vlan: 451
+ network: 192.168.4.0
+ mask: 24
+ private:
+ interface: 1
+ vlan: 452
+ network: 192.168.5.0
+ mask: 24
+ public:
+ interface: 2
+ vlan: 414
+ network: 172.30.12.64
+ mask: 26
+ gateway: 172.30.12.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ fuel:
+ jumphost:
+ bridges:
+ admin: 'pxebr'
+ mgmt: 'br-ctl'
+ private: ~
+ public: ~
+ network:
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'eno1'
+ - 'eno3'
+ - 'eno4'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:04:00.0'
+ - '0000:02:00.0'
+ - '0000:02:00.1'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
diff --git a/labs/lf/idf-pod5.yaml b/labs/lf/idf-pod5.yaml
index fb93fde9..c1af4ed8 100644
--- a/labs/lf/idf-pod5.yaml
+++ b/labs/lf/idf-pod5.yaml
@@ -18,30 +18,32 @@ idf:
admin:
interface: 0
vlan: native
- network: 10.20.0.128
- mask: 25
- gateway: 10.20.0.129
- dns: 10.20.0.129
+ network: 192.168.11.0
+ mask: 24
mgmt:
interface: 1
vlan: 450
- network: 192.168.0.128
- mask: 25
+ network: 192.168.0.0
+ mask: 24
storage:
interface: 2
vlan: 451
- network: 192.168.1.128
- mask: 25
+ network: 192.168.1.0
+ mask: 24
private:
interface: 1
vlan: 452
- network: 192.168.2.128
- mask: 25
+ network: 192.168.2.0
+ mask: 24
public:
interface: 2
vlan: 415
network: 172.30.13.64
mask: 26
+ gateway: 172.30.13.1
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
fuel:
jumphost:
bridges:
diff --git a/labs/lf/pod4.yaml b/labs/lf/pod4.yaml
new file mode 100644
index 00000000..2163c9c4
--- /dev/null
+++ b/labs/lf/pod4.yaml
@@ -0,0 +1,161 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation, Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### LF POD 4 descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: Trevor Bramwell
+ contact: tbramwell@linuxfoundation.org
+ lab: Linux Foundation
+ location: Portland, Oregon, USA
+ type: development
+ link: https://wiki.opnfv.org/display/pharos/LF+POD+4
+jumphost:
+ name: pod4-jump
+ node: &nodeparams
+ type: baremetal
+ vendor: Intel Corporation
+ model: S2600WT2R
+ arch: x86_64
+ cpus: 88
+ cpu_cflags: haswell
+ cores: 22
+ memory: 62G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 480G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ os: centos-7
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: admin
+ pass: octopus
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.83
+ mac_address: "a4:bf:01:01:b0:bb"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b0:b9"
+ speed: 1gb
+ address: 192.168.12.1
+ name: 'nic1'
+ features: 'dpdk|sriov'
+ - mac_address: "00:1e:67:fd:9a:04"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:9a:05"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
+nodes:
+ - name: pod4-node1
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.84
+ mac_address: "a4:bf:01:01:ab:b6"
+ interfaces:
+ - mac_address: "a4:bf:01:01:ab:b4"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ - mac_address: "00:1e:67:fd:9b:32"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:9b:33"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
+ - name: pod4-node2
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.85
+ mac_address: "a4:bf:01:01:b6:97"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b6:95"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ - mac_address: "00:1e:67:fd:98:e2"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:98:e3"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
+ - name: pod4-node3
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.86
+ mac_address: "a4:bf:01:01:66:fe"
+ interfaces:
+ - mac_address: "a4:bf:01:01:66:fc"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ - mac_address: "00:1e:67:fd:9c:c8"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:9c:c9"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
+ - name: pod4-node4
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.87
+ mac_address: "a4:bf:01:01:b2:f5"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b2:f3"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ - mac_address: "00:1e:67:fd:9b:38"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:9b:39"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
+ - name: pod4-node5
+ node: *nodeparams
+ disks: *disks
+ remote_management:
+ <<: *remote_params
+ address: 172.30.8.88
+ mac_address: "a4:bf:01:01:b5:11"
+ interfaces:
+ - mac_address: "a4:bf:01:01:b5:0f"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ - mac_address: "00:1e:67:fd:99:40"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+ - mac_address: "00:1e:67:fd:99:41"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic3'
diff --git a/labs/lf/pod5.yaml b/labs/lf/pod5.yaml
index 61ca3ebe..47ebc9e8 100644
--- a/labs/lf/pod5.yaml
+++ b/labs/lf/pod5.yaml
@@ -18,7 +18,7 @@ details:
link: https://wiki.opnfv.org/display/pharos/LF+POD+5
jumphost:
name: pod5-jump
- node: &nodeparas
+ node: &nodeparams
type: baremetal
vendor: Intel Corporation
model: S2600WT2R
@@ -48,7 +48,7 @@ jumphost:
- mac_address: "a4:bf:01:01:ad:71"
speed: 1gb
features: 'dpdk|sriov'
- address: 10.20.0.129
+ address: 192.168.11.1
name: 'nic1'
- mac_address: "00:1e:67:fd:9c:c2"
speed: 10gb
@@ -60,7 +60,7 @@ jumphost:
name: 'nic3'
nodes:
- name: pod5-node1
- node: *nodeparas
+ node: *nodeparams
disks: *disks
remote_management:
<<: *remote_params
@@ -80,7 +80,7 @@ nodes:
features: 'dpdk|sriov'
name: 'nic3'
- name: pod5-node2
- node: *nodeparas
+ node: *nodeparams
disks: *disks
remote_management:
<<: *remote_params
@@ -100,7 +100,7 @@ nodes:
features: 'dpdk|sriov'
name: 'nic3'
- name: pod5-node3
- node: *nodeparas
+ node: *nodeparams
disks: *disks
remote_management:
<<: *remote_params
@@ -120,7 +120,7 @@ nodes:
features: 'dpdk|sriov'
name: 'nic3'
- name: pod5-node4
- node: *nodeparas
+ node: *nodeparams
disks: *disks
remote_management:
<<: *remote_params
@@ -140,7 +140,7 @@ nodes:
features: 'dpdk|sriov'
name: 'nic3'
- name: pod5-node5
- node: *nodeparas
+ node: *nodeparams
disks: *disks
remote_management:
<<: *remote_params
diff --git a/labs/zte/idf-pod1.yaml b/labs/zte/idf-pod1.yaml
index 9118ae6a..a7428fd9 100644
--- a/labs/zte/idf-pod1.yaml
+++ b/labs/zte/idf-pod1.yaml
@@ -47,7 +47,7 @@ idf:
jumphost:
bridges:
admin: 'br6'
- mgmt: 'br-mgmt'
+ mgmt: ~
private: ~
public: 'br-external'
network: