aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--.gitmodules92
-rw-r--r--.gitreview2
-rw-r--r--CONTRIBUTING.rst23
-rw-r--r--INFO.yaml6
-rw-r--r--LICENSE209
-rw-r--r--LICENSE.rst90
-rw-r--r--README.rst173
-rw-r--r--ci/README.rst149
-rwxr-xr-xci/build.sh107
-rwxr-xr-xci/deploy.sh161
m---------docker0
-rw-r--r--docs/LICENSE395
-rw-r--r--docs/conf.py8
-rw-r--r--docs/conf.yaml10
-rw-r--r--docs/index.rst18
l---------docs/release/developer-guide/img/README.rst1
-rwxr-xr-xdocs/release/developer-guide/img/detail_fuel.pngbin0 -> 40664 bytes
-rwxr-xr-xdocs/release/developer-guide/img/overview_fuel.pngbin0 -> 53292 bytes
-rwxr-xr-xdocs/release/developer-guide/img/overview_mcp.pngbin0 -> 50856 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_gerrit.pngbin0 -> 20012 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_git_blue.pngbin0 -> 2373 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_git_orange.pngbin0 -> 1956 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_git_red.pngbin0 -> 2396 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_jenkins.pngbin0 -> 2666 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_k8.pngbin0 -> 2142 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_os.pngbin0 -> 2396 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_salt.pngbin0 -> 5339 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_trigger.pngbin0 -> 1917 bytes
-rwxr-xr-xdocs/release/developer-guide/img/symbol_user.pngbin0 -> 2521 bytes
-rw-r--r--docs/release/installation/img/README.rst14
-rw-r--r--docs/release/installation/img/arm_pod5.pngbin178079 -> 0 bytes
-rw-r--r--docs/release/installation/img/fuel_baremetal.pngbin272115 -> 0 bytes
-rwxr-xr-xdocs/release/installation/img/fuel_baremetal_ha.pngbin0 -> 279736 bytes
-rwxr-xr-xdocs/release/installation/img/fuel_baremetal_noha.pngbin0 -> 187877 bytes
-rwxr-xr-xdocs/release/installation/img/fuel_hybrid_noha.pngbin0 -> 186931 bytes
-rw-r--r--docs/release/installation/img/fuel_virtual.pngbin216442 -> 0 bytes
-rwxr-xr-xdocs/release/installation/img/fuel_virtual_noha.pngbin0 -> 234038 bytes
-rw-r--r--docs/release/installation/img/lf_pod2.pngbin178795 -> 0 bytes
-rw-r--r--docs/release/installation/index.rst9
-rw-r--r--docs/release/installation/installation.instruction.rst1655
-rw-r--r--docs/release/release-notes/index.rst9
-rw-r--r--docs/release/release-notes/release-notes.rst230
-rw-r--r--docs/release/scenarios/index.rst17
-rw-r--r--docs/release/scenarios/os-nosdn-fdio-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-nosdn-fdio-noha/os-nosdn-fdio-noha.rst39
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-ha/index.rst14
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst40
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst38
-rw-r--r--docs/release/scenarios/os-nosdn-onap-ha/index.rst14
-rw-r--r--docs/release/scenarios/os-nosdn-onap-ha/os-nosdn-ovs-ha.rst48
-rw-r--r--docs/release/scenarios/os-nosdn-onap-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-nosdn-onap-noha/os-nosdn-ovs-noha.rst45
-rw-r--r--docs/release/scenarios/os-nosdn-ovs-ha/index.rst6
-rw-r--r--docs/release/scenarios/os-nosdn-ovs-ha/os-nosdn-ovs-ha.rst15
-rw-r--r--docs/release/scenarios/os-nosdn-ovs-noha/index.rst6
-rw-r--r--docs/release/scenarios/os-nosdn-ovs-noha/os-nosdn-ovs-noha.rst15
-rw-r--r--docs/release/scenarios/os-odl-nofeature-ha/index.rst14
-rw-r--r--docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst40
-rw-r--r--docs/release/scenarios/os-odl-nofeature-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst39
-rw-r--r--docs/release/scenarios/os-odl-ovs-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-odl-ovs-noha/os-odl-ovs-noha.rst40
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-ha/index.rst14
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst40
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-noha/index.rst14
-rw-r--r--docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst39
-rw-r--r--docs/release/userguide/img/saltstack.pngbin14373 -> 0 bytes
-rw-r--r--docs/release/userguide/index.rst10
-rw-r--r--docs/release/userguide/userguide.rst1159
-rw-r--r--docs/requirements.txt12
-rw-r--r--mcp/config/labs/local/idf-pod1.yaml79
-rw-r--r--mcp/config/labs/local/idf-virtual1.yaml103
-rw-r--r--mcp/config/labs/local/pod1.yaml199
-rw-r--r--mcp/config/labs/local/virtual1.yaml127
-rw-r--r--mcp/config/scenario/.gitignore3
-rw-r--r--mcp/config/scenario/README.rst21
-rw-r--r--mcp/config/scenario/defaults.yaml53
-rw-r--r--mcp/config/scenario/defaults.yaml.j2158
-rw-r--r--mcp/config/scenario/k8-calico-iec-noha.yaml27
-rw-r--r--mcp/config/scenario/k8-calico-iec-vcp-noha.yaml28
-rw-r--r--mcp/config/scenario/k8-calico-nofeature-noha.yaml28
-rw-r--r--mcp/config/scenario/os-nosdn-fdio-ha.yaml (renamed from mcp/config/scenario/os-nosdn-ovs-ha.yaml.j2)41
-rw-r--r--mcp/config/scenario/os-nosdn-fdio-noha.yaml60
-rw-r--r--mcp/config/scenario/os-nosdn-nofeature-ha.yaml (renamed from mcp/config/scenario/os-nosdn-nofeature-ha.yaml.j2)37
-rw-r--r--mcp/config/scenario/os-nosdn-nofeature-noha.yaml.j252
-rw-r--r--mcp/config/scenario/os-nosdn-onap-ha.yaml48
-rw-r--r--mcp/config/scenario/os-nosdn-onap-noha.yaml41
-rw-r--r--mcp/config/scenario/os-nosdn-ovs-ha.yaml44
-rw-r--r--mcp/config/scenario/os-nosdn-ovs-noha.yaml25
-rw-r--r--mcp/config/scenario/os-odl-bgpvpn-noha.yaml38
-rw-r--r--mcp/config/scenario/os-odl-nofeature-ha.yaml (renamed from mcp/config/scenario/os-odl-nofeature-ha.yaml.j2)37
-rw-r--r--mcp/config/scenario/os-odl-nofeature-noha.yaml22
-rw-r--r--mcp/config/scenario/os-odl-ovs-noha.yaml31
-rw-r--r--mcp/config/scenario/os-odl-sfc-noha.yaml (renamed from mcp/patches/patches.list)34
-rw-r--r--mcp/config/scenario/os-ovn-nofeature-ha.yaml (renamed from mcp/config/scenario/os-ovn-nofeature-ha.yaml.j2)37
-rw-r--r--mcp/config/scenario/os-ovn-nofeature-noha.yaml20
-rwxr-xr-xmcp/config/states/akraino_iec62
-rwxr-xr-xmcp/config/states/baremetal_init31
-rwxr-xr-xmcp/config/states/kubernetes39
-rwxr-xr-xmcp/config/states/maas77
-rwxr-xr-xmcp/config/states/onap65
-rwxr-xr-xmcp/config/states/opendaylight8
-rwxr-xr-xmcp/config/states/openstack_ha64
-rwxr-xr-xmcp/config/states/openstack_noha30
-rwxr-xr-xmcp/config/states/quagga (renamed from mcp/config/states/dpdk)8
-rwxr-xr-x[-rw-r--r--]mcp/config/states/tacker (renamed from mcp/scripts/user-data.mcp.sh.j2)11
-rwxr-xr-xmcp/config/states/virtual_control_plane16
-rwxr-xr-xmcp/config/states/virtual_init43
-rw-r--r--mcp/deploy/images/.gitkeep0
m---------mcp/deploy/scripts0
-rw-r--r--mcp/patches/0011-system.repo-Debian-Add-keyserver-proxy-support.patch181
-rw-r--r--mcp/patches/Makefile2
-rw-r--r--mcp/patches/README.rst158
-rw-r--r--mcp/patches/config.mk3
-rw-r--r--mcp/patches/docker/0001-tasks.py-Allow-passing-extra-build-arg.patch59
-rw-r--r--mcp/patches/docker/0002-OPNFV-tag-convention-alignment.patch78
-rw-r--r--mcp/patches/docker/0003-OPNFV-package-installation-Ubuntu-user.patch112
-rw-r--r--mcp/patches/docker/0004-reclass-Set-ignore_overwritten_missing_references.patch34
-rw-r--r--mcp/patches/docker/0005-Add-saltminion-maas-build-target.patch108
-rw-r--r--mcp/patches/docker/0006-Use-archive.repo.saltstack.com-repos.patch42
-rw-r--r--mcp/patches/fuel-patch-copyright.template2
-rwxr-xr-xmcp/patches/patch.sh19
-rw-r--r--mcp/patches/reclass-system-salt-model/0001-Use-keystone-v3-endpoints-by-default.patch4
-rw-r--r--mcp/patches/reclass-system-salt-model/0003-system.repo-Pin-glusterfs-with-higher-prio.patch33
-rw-r--r--mcp/patches/salt-formula-aodh/0001-Extend-apache-service-state.patch47
-rw-r--r--mcp/patches/salt-formula-cinder/0001-Support-stein-version.patch25
-rw-r--r--mcp/patches/salt-formula-horizon/0001-Support-stein-version.patch35
-rw-r--r--mcp/patches/salt-formula-horizon/0002-Align-packages-with-Stein-reqs.patch30
-rw-r--r--mcp/patches/salt-formula-keystone/0001-Handle-extra-environment-variables.patch (renamed from mcp/patches/0008-Handle-extra-environment-variables.patch)3
-rw-r--r--mcp/patches/salt-formula-linux/0001-Set-ovs-bridges-as-L3-interfaces.patch (renamed from mcp/patches/0015-Set-ovs-bridges-as-L3-interfaces.patch)63
-rw-r--r--mcp/patches/salt-formula-linux/0002-network-Bring-in-basic-VPP-support.patch139
-rw-r--r--mcp/patches/salt-formula-linux/0003-OVS-Fix-Debian-service-deps-OVS-bridge-ifup.patch97
-rw-r--r--mcp/patches/salt-formula-linux/0004-dpdk-Handle-per-port-memory-model.patch25
-rw-r--r--mcp/patches/salt-formula-linux/0005-network-RHEL-Set-bridge-for-member-interfaces.patch44
-rw-r--r--mcp/patches/salt-formula-linux/0006-dpdk-Remove-invalid-vhost-options.patch25
-rw-r--r--mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch (renamed from mcp/patches/0002-maas-region-skip-credentials-update.patch)8
-rw-r--r--mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch (renamed from mcp/patches/0010-maas-region-allow-timeout-override.patch)22
-rw-r--r--mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch268
-rw-r--r--mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch43
-rw-r--r--mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch95
-rw-r--r--mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch231
-rw-r--r--mcp/patches/salt-formula-maas/0007-region-s-syncdb-migrate-for-MaaS-2.4-compatibility.patch30
-rw-r--r--mcp/patches/salt-formula-neutron/0001-Bring-in-basic-VPP-support.patch195
-rw-r--r--mcp/patches/salt-formula-neutron/0002-Align-packages-with-stein-reqs.patch114
-rw-r--r--mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch34
-rw-r--r--mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch44
-rw-r--r--mcp/patches/scripts/0001-salt-master-setup.sh-Allow-arm64-salt-bootstrap.patch59
-rw-r--r--mcp/patches/scripts/0003-salt-master-setup-Group-APT-install-formulas.patch68
-rw-r--r--mcp/reclass/classes/cluster/.gitignore27
-rw-r--r--mcp/reclass/classes/cluster/README.rst16
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml74
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/fdio_repo.yml118
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j244
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2172
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j253
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/init.yml.j217
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/lab_proxy_pdf.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-common-ha/include/lab_proxy_pdf.yml.j2)0
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/maas_proxy.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-common-ha/include/maas_proxy.yml.j2)0
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/passwords.yml64
-rw-r--r--mcp/reclass/classes/cluster/all-mcp-arch-common/uca_repo.yml68
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/glusterfs_repo.yml24
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j266
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/init.yml.j26
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml)58
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm_pdf.yml.j22
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/infra/maas.yml.j2174
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml42
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute_pdf.yml.j212
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j279
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_control_init.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_database.yml4
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j285
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_biport.yml.j27
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_triport.yml.j28
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_message_queue.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j213
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j211
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/haproxy_openstack_api.yml33
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/infra/config.yml.j227
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml)21
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute.yml35
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute_pdf.yml.j253
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml)116
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_control_pdf.yml.j232
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway.yml7
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway_pdf.yml.j294
-rw-r--r--mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j272
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/infra/config.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init.yml14
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init_vcp.yml.j212
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/infra/kvm.yml.j214
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/init.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute_pdf.yml.j236
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/control.yml13
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/database.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/init.yml14
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/message_queue.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/proxy.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/telemetry.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/infra/config.yml.j258
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/infra/init.yml14
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/init.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/compute.yml.j268
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/control.yml76
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/gateway.yml68
-rw-r--r--mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/init.yml30
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/akraino/iec.yml.j2117
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/akraino/init.yml32
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/infra/config.yml.j257
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/infra/init.yml.j231
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2112
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-iec-noha/init.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/config.yml.j279
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/init.yml13
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/init.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/common.yml.j275
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/compute.yml12
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/control.yml99
-rw-r--r--mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/init.yml.j2108
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml)19
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j219
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j27
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j27
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/openstack/compute.yml17
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/openstack/control.yml8
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-ha/openstack/init.yml.j226
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/infra/config.yml.j22
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/init.yml3
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml)15
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control_pdf.yml.j239
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/openstack/compute.yml.j291
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/openstack/control.yml8
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml25
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml.j257
-rw-r--r--mcp/reclass/classes/cluster/mcp-odl-noha/openstack/init.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/compute.yml8
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/control.yml42
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/database.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/init.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/message_queue.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/proxy.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/telemetry.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-noha/init.yml1
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/compute.yml.j228
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/control.yml6
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/openstack/compute.yml4
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/init.yml1
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/compute.yml.j23
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml)8
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml2
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-ha/openstack/compute.yml5
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-noha/infra/maas.yml11
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-noha/init.yml1
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/compute.yml.j29
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/control.yml6
-rw-r--r--mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml.j2 (renamed from mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml)5
m---------mcp/reclass/classes/system0
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml18
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-fdio-noha.local.yml (renamed from mcp/config/scenario/os-nosdn-nofeature-noha.yaml)28
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-iec-noha.local.yml18
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-k8s-calico-noha.local.yml18
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-odl-noha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovn-noha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-noha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml2
-rw-r--r--mcp/reclass/nodes/cfg01.mcp-ovs-noha.local.yml2
-rw-r--r--mcp/salt-formulas/maas/machines/delete.sls20
-rw-r--r--mcp/salt-formulas/maas/machines/mark_broken_fixed.sls20
-rw-r--r--mcp/salt-formulas/maas/machines/override_failed_testing.sls20
-rw-r--r--mcp/salt-formulas/maas/pxe_nat.sls37
-rw-r--r--mcp/salt-formulas/opendaylight/server.sls113
-rw-r--r--mcp/salt-formulas/opnfv/route_wrapper.sls27
m---------mcp/salt-formulas/salt-formula-aodh0
m---------mcp/salt-formulas/salt-formula-apache0
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/files/nova-libvirt-aarch64-rollup.diff27
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/init.sls7
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/nova_config.sls37
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/nova_libvirt.sls9
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/qemu_efi.sls4
-rw-r--r--mcp/salt-formulas/salt-formula-armband/armband/vgabios.sls9
m---------mcp/salt-formulas/salt-formula-barbican0
m---------mcp/salt-formulas/salt-formula-ceilometer0
m---------mcp/salt-formulas/salt-formula-cinder0
m---------mcp/salt-formulas/salt-formula-etcd0
m---------mcp/salt-formulas/salt-formula-glance0
m---------mcp/salt-formulas/salt-formula-gnocchi0
m---------mcp/salt-formulas/salt-formula-heat0
m---------mcp/salt-formulas/salt-formula-horizon0
m---------mcp/salt-formulas/salt-formula-keystone0
m---------mcp/salt-formulas/salt-formula-kubernetes0
m---------mcp/salt-formulas/salt-formula-linux0
m---------mcp/salt-formulas/salt-formula-maas0
m---------mcp/salt-formulas/salt-formula-neutron0
m---------mcp/salt-formulas/salt-formula-nfs0
m---------mcp/salt-formulas/salt-formula-nova0
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/cluster.yml60
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/single.yml (renamed from mcp/metadata/service/opendaylight/server/single.yml)8
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/metadata/service/support.yml (renamed from mcp/metadata/service/opendaylight/support.yml)0
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/config.sls94
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/jetty.xml (renamed from mcp/salt-formulas/opendaylight/files/jetty.xml)0
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-dhcpservice-config.xml23
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-natservice-config.xml23
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/setenv.shell (renamed from mcp/salt-formulas/opendaylight/files/setenv.shell)0
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/init.sls (renamed from mcp/salt-formulas/opendaylight/init.sls)0
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/map.jinja (renamed from mcp/salt-formulas/opendaylight/map.jinja)21
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/repo.sls56
-rw-r--r--mcp/salt-formulas/salt-formula-opendaylight/opendaylight/server.sls51
m---------mcp/salt-formulas/salt-formula-oslo-templates0
m---------mcp/salt-formulas/salt-formula-panko0
-rw-r--r--mcp/salt-formulas/salt-formula-quagga/metadata/service/server/single.yml12
-rw-r--r--mcp/salt-formulas/salt-formula-quagga/quagga/init.sls11
-rw-r--r--mcp/salt-formulas/salt-formula-quagga/quagga/map.jinja21
-rw-r--r--mcp/salt-formulas/salt-formula-quagga/quagga/server.sls41
m---------mcp/salt-formulas/salt-formula-rabbitmq0
m---------mcp/salt-formulas/salt-formula-redis0
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/metadata/service/server/single.yml (renamed from mcp/metadata/service/tacker/server/single.yml)3
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.conf (renamed from mcp/salt-formulas/tacker/files/tacker.conf)0
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.systemd (renamed from mcp/salt-formulas/tacker/files/tacker.systemd)0
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/tacker/init.sls (renamed from mcp/salt-formulas/tacker/init.sls)0
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/tacker/map.jinja (renamed from mcp/salt-formulas/tacker/map.jinja)0
-rw-r--r--mcp/salt-formulas/salt-formula-tacker/tacker/server.sls (renamed from mcp/salt-formulas/tacker/server.sls)0
-rw-r--r--mcp/scripts/.gitignore4
-rw-r--r--mcp/scripts/docker-compose/docker-compose.yaml.j2102
-rwxr-xr-xmcp/scripts/docker-compose/files/entrypoint.sh104
-rw-r--r--mcp/scripts/docker-compose/files/entrypoint_maas.sh.j262
-rw-r--r--mcp/scripts/docker-compose/files/hosts.j27
-rw-r--r--mcp/scripts/docker-compose/files/opnfv_master.conf21
-rw-r--r--mcp/scripts/globals.sh6
-rw-r--r--mcp/scripts/lib.sh518
-rw-r--r--mcp/scripts/lib_jump_common.sh213
-rw-r--r--mcp/scripts/lib_jump_deploy.sh591
-rw-r--r--mcp/scripts/lib_template.sh26
m---------mcp/scripts/pharos0
-rw-r--r--mcp/scripts/requirements_deb.yaml24
-rw-r--r--mcp/scripts/requirements_rpm.yaml20
-rwxr-xr-xmcp/scripts/salt.sh133
-rw-r--r--mcp/scripts/user-data.sh.j2 (renamed from mcp/scripts/user-data.admin.sh.j2)12
-rw-r--r--mcp/scripts/virsh_net/net_mcpcontrol.xml.j220
-rw-r--r--mcp/scripts/xdf_data.sh.j278
-rw-r--r--onboarding.txt15
-rw-r--r--prototypes/sfc_tacker/README.rst62
-rwxr-xr-xprototypes/sfc_tacker/poc.tacker-up.sh385
-rw-r--r--tox.ini26
358 files changed, 11862 insertions, 4587 deletions
diff --git a/.gitignore b/.gitignore
index fe7a86422..4a787d9b4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,8 +9,8 @@
**/docs_build/
**/docs_output/
**/releng/
-**/mcp/deploy/images/
-**/mcp/scripts/user-data.sh
**/mcp/scripts/virsh_net/*.xml
+**/mcp/scripts/docker-compose/*.yaml
**/mcp/scripts/*.img
-**/net_map.j2
+.tox/
+docs/_build/*
diff --git a/.gitmodules b/.gitmodules
index c7c44587c..c628f7794 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -2,11 +2,95 @@
path = mcp/reclass/classes/system
url = https://github.com/Mirantis/reclass-system-salt-model
branch = master
-[submodule "scripts"]
- path = mcp/deploy/scripts
- url = https://github.com/salt-formulas/salt-formulas-scripts
- branch = master
[submodule "pharos"]
path = mcp/scripts/pharos
url = https://github.com/opnfv/pharos
branch = master
+[submodule "docker"]
+ path = docker
+ url = https://github.com/epcim/docker-salt-formulas
+ branch = master
+[submodule "salt-formula-linux"]
+ path = mcp/salt-formulas/salt-formula-linux
+ url = https://github.com/salt-formulas/salt-formula-linux
+ branch = master
+[submodule "salt-formula-keystone"]
+ path = mcp/salt-formulas/salt-formula-keystone
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/keystone
+ branch = master
+[submodule "salt-formula-maas"]
+ path = mcp/salt-formulas/salt-formula-maas
+ url = https://github.com/salt-formulas/salt-formula-maas
+ branch = master
+[submodule "salt-formula-redis"]
+ path = mcp/salt-formulas/salt-formula-redis
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/redis
+ branch = master
+[submodule "salt-formula-cinder"]
+ path = mcp/salt-formulas/salt-formula-cinder
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/cinder
+ branch = master
+[submodule "salt-formula-heat"]
+ path = mcp/salt-formulas/salt-formula-heat
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/heat
+ branch = master
+[submodule "salt-formula-nova"]
+ path = mcp/salt-formulas/salt-formula-nova
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/nova
+ branch = master
+[submodule "salt-formula-neutron"]
+ path = mcp/salt-formulas/salt-formula-neutron
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/neutron
+ branch = master
+[submodule "salt-formula-oslo-templates"]
+ path = mcp/salt-formulas/salt-formula-oslo-templates
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/oslo-templates
+ branch = master
+[submodule "salt-formula-horizon"]
+ path = mcp/salt-formulas/salt-formula-horizon
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/horizon
+ branch = master
+[submodule "salt-formula-gnocchi"]
+ path = mcp/salt-formulas/salt-formula-gnocchi
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/gnocchi
+ branch = master
+[submodule "salt-formula-etcd"]
+ path = mcp/salt-formulas/salt-formula-etcd
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/etcd
+ branch = master
+[submodule "salt-formula-kubernetes"]
+ path = mcp/salt-formulas/salt-formula-kubernetes
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/kubernetes
+ branch = master
+[submodule "salt-formula-apache"]
+ path = mcp/salt-formulas/salt-formula-apache
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/apache
+ branch = master
+[submodule "salt-formula-aodh"]
+ path = mcp/salt-formulas/salt-formula-aodh
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/aodh
+ branch = master
+[submodule "salt-formula-panko"]
+ path = mcp/salt-formulas/salt-formula-panko
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/panko
+ branch = master
+[submodule "salt-formula-barbican"]
+ path = mcp/salt-formulas/salt-formula-barbican
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/barbican
+ branch = master
+[submodule "salt-formula-ceilometer"]
+ path = mcp/salt-formulas/salt-formula-ceilometer
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/ceilometer
+ branch = master
+[submodule "salt-formula-glance"]
+ path = mcp/salt-formulas/salt-formula-glance
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/glance
+ branch = master
+[submodule "salt-formula-rabbitmq"]
+ path = mcp/salt-formulas/salt-formula-rabbitmq
+ url = https://gerrit.mcp.mirantis.com/salt-formulas/rabbitmq
+ branch = master
+[submodule "salt-formula-nfs"]
+ path = mcp/salt-formulas/salt-formula-nfs
+ url = https://github.com/salt-formulas/salt-formula-nfs
+ branch = master
diff --git a/.gitreview b/.gitreview
index 49ca2619e..8687965ee 100644
--- a/.gitreview
+++ b/.gitreview
@@ -2,4 +2,4 @@
host=gerrit.opnfv.org
port=29418
project=fuel.git
-
+defaultbranch=master
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 000000000..226e0fc66
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,23 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. SPDX-License-Identifier: CC-BY-4.0
+.. (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+
+OPNFV Fuel Contributing
+=======================
+
+Get on board by filling this out and submitting it for review.
+This is all optional, it's just to give you a taste of the workflow.
+
+| Full Name: <change me>
+| IRC Nick: <change me>
+| Linux Foundation ID: <change me>
+| Favourite Open Source project: <change me>
+| How would you like to help this project: <change me>
+
+References
+==========
+#. `OPNFV Contribution Guidelines`_
+#. `OPNFV Developer Getting Started`_
+
+.. _`OPNFV Contribution Guidelines`: https://wiki.opnfv.org/display/DEV/Contribution+Guidelines
+.. _`OPNFV Developer Getting Started`: https://wiki.opnfv.org/display/DEV/Developer+Getting+Started
diff --git a/INFO.yaml b/INFO.yaml
index 4c5a2edc6..541f6a539 100644
--- a/INFO.yaml
+++ b/INFO.yaml
@@ -1,5 +1,5 @@
---
-project: 'Fuel based OPNFV installer (Fuel@OPNFV)'
+project: 'Fuel based OPNFV installer (OPNFV Fuel)'
project_creation_date: '2015.07.07'
project_category: 'Integration and testing'
lifecycle_state: 'Incubation'
@@ -41,6 +41,10 @@ committers:
email: 'guillermo.herrero@enea.com'
company: 'enea.com'
id: 'gherrero'
+ - name: 'Cristina Pauna'
+ email: 'Cristina.Pauna@enea.com'
+ company: 'enea.com'
+ id: 'CristinaPauna'
tsc:
# yamllint disable rule:line-length
approval: 'http//meetbot.opnfv.org/meetings/opnfv-meeting/2015/opnfv-meeting.2015-07-07-13.59.log.html'
diff --git a/LICENSE b/LICENSE
index 143e209aa..d64569567 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,13 +1,202 @@
-Copyright 2015 Open Platform for NFV Project, Inc. and its contributors
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
- http://www.apache.org/licenses/LICENSE-2.0
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/LICENSE.rst b/LICENSE.rst
deleted file mode 100644
index 67ef17d2a..000000000
--- a/LICENSE.rst
+++ /dev/null
@@ -1,90 +0,0 @@
-LICENSE
-=======
-This document is protected/licensed under the following conditions
-(c) Jonas Bjurel (Ericsson AB)
-Licensed under a Creative Commons Attribution 4.0 International License.
-You should have received a copy of the license along with this work.
-If not, see <http://creativecommons.org/licenses/by/4.0/>.
-
-Open Platform for NFV Project Software Licence
-==============================================
-Any software developed by the "Open Platform for NFV" Project is licenced under the
-Apache License, Version 2.0 (the "License");
-you may not use the content of this software bundle except in compliance with the License.
-You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Open Platform for NFV Project Documentation Licence
-===================================================
-Any documentation developed by the "Open Platform for NFV Project"
-is licensed under a Creative Commons Attribution 4.0 International License.
-You should have received a copy of the license along with this. If not,
-see <http://creativecommons.org/licenses/by/4.0/>.
-
-Unless required by applicable law or agreed to in writing, documentation
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Other applicable upstream project Licenses
-==================================================================
-You may not use the content of this software bundle except in compliance with the
-Licenses as listed below:
-
-+----------------+-----------------------------------------------------+
-| **Component** | **Licence** |
-+----------------+-----------------------------------------------------+
-| OpenStack | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
-| OpenDaylight | Eclipse Public License 1.0 |
-| | https://www.eclipse.org/legal/epl-v10.html |
-+----------------+-----------------------------------------------------+
-| PostgreSQL | PostgreSQL Licence: |
-| | http://opensource.org/licenses/postgresql |
-+----------------+-----------------------------------------------------+
-| MongoDB | GNU AGPL v3.0. |
-| | http://www.fsf.org/licensing/licenses/agpl-3.0.html |
-+----------------+-----------------------------------------------------+
-| CoroSync | BSD 2-Clause |
-| | http://opensource.org/licenses/bsd-license.php |
-+----------------+-----------------------------------------------------+
-| Pacemaker | GPL v2 |
-| | https://www.gnu.org/licenses/gpl-2.0.html |
-+----------------+-----------------------------------------------------+
-| RabbitMQ | Mozilla Public License |
-| | https://www.rabbitmq.com/mpl.html |
-+----------------+-----------------------------------------------------+
-| Linux | GPLv3 |
-| | https://www.gnu.org/copyleft/gpl.html |
-+----------------+-----------------------------------------------------+
-| Ceph | GPL v2 |
-| | https://www.gnu.org/licenses/gpl-2.0.html |
-+----------------+-----------------------------------------------------+
-| Puppet | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
-| Docker | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
-| Fuel | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
-| OpenJDK/JRE | GPL v2 |
-| | https://www.gnu.org/licenses/gpl-2.0.html |
-+----------------+-----------------------------------------------------+
-| Cobbler | GPL v2 |
-| | https://www.gnu.org/licenses/gpl-2.0.html |
-+----------------+-----------------------------------------------------+
-| Nailgun | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
-| Astute | Apache License 2.0 |
-| | https://www.apache.org/licenses/LICENSE-2.0 |
-+----------------+-----------------------------------------------------+
diff --git a/README.rst b/README.rst
index 467c211bf..2987f0b32 100644
--- a/README.rst
+++ b/README.rst
@@ -1,3 +1,176 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. SPDX-License-Identifier: CC-BY-4.0
.. (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+
+==========
+OPNFV Fuel
+==========
+
+|docs|
+
+.. |docs| image:: https://readthedocs.org/projects/opnfv-fuel/badge/?version=latest
+ :alt: OPNFV Fuel Documentation Status
+ :scale: 100%
+ :target: https://opnfv-fuel.readthedocs.io/en/latest/?badge=latest
+
+Description
+===========
+
+This is the OPNFV Hunter release that implements the deploy stage of the
+OPNFV CI pipeline via Fuel.
+
+Fuel is based on the `MCP`_ installation tool chain.
+More information available at `Mirantis Cloud Platform Documentation`_.
+
+The goal of the Fuel deployment process is to establish a lab ready platform
+accelerating further development of the OPNFV infrastructure.
+
+Release Notes
+=============
+
+- `OPNFV Fuel Release Notes on RTD`_
+
+Installation
+============
+
+- `OPNFV Fuel Installation Instruction on RTD`_
+
+Usage
+=====
+
+- `OPNFV Fuel User Guide on RTD`_
+
+Scenarios
+=========
+
+- `OPNFV Fuel Scenarios on RTD`_
+
+Contributing
+============
+
+- `OPNFV Fuel Contributing`_
+
+Support
+=======
+
+- `OPNFV Fuel Wiki Page`_
+- `OPNFV Community Support mailing list`_
+- `OPNFV Technical Discussion mailing list`_
+
+LICENSE
+=======
+
+| This document is protected/licensed under the following conditions
+| (c) Jonas Bjurel (Ericsson AB)
+| Licensed under a Creative Commons Attribution 4.0 International License.
+| You should have received a copy of the license along with this work.
+| If not, see <https://creativecommons.org/licenses/by/4.0/>.
+
+Open Platform for NFV Project Software Licence
+----------------------------------------------
+
+| Any software developed by the "Open Platform for NFV" Project is licenced under the
+| Apache License, Version 2.0 (the "License");
+| you may not use the content of this software bundle except in compliance with the License.
+| You may obtain a copy of the License at <https://www.apache.org/licenses/LICENSE-2.0>
+|
+| Unless required by applicable law or agreed to in writing, software
+| distributed under the License is distributed on an "AS IS" BASIS,
+| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+| See the License for the specific language governing permissions and
+| limitations under the License.
+
+Open Platform for NFV Project Documentation Licence
+---------------------------------------------------
+
+| Any documentation developed by the "Open Platform for NFV Project"
+| is licensed under a Creative Commons Attribution 4.0 International License.
+| You should have received a copy of the license along with this. If not,
+| see <https://creativecommons.org/licenses/by/4.0/>.
+|
+| Unless required by applicable law or agreed to in writing, documentation
+| distributed under the License is distributed on an "AS IS" BASIS,
+| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+| See the License for the specific language governing permissions and
+| limitations under the License.
+
+Other Applicable Upstream Project Licenses
+------------------------------------------
+
+You may not use the content of this software bundle except in compliance with the
+Licenses as listed below (non-exhaustive list, depending on end-user config):
+
++------------------+-------------------------------+
+| **Component** | **Licence** |
++------------------+-------------------------------+
+| `OpenStack`_ | `Apache License 2.0`_ |
++------------------+-------------------------------+
+| `OpenDaylight`_ | `Eclipse Public License 1.0`_ |
++------------------+-------------------------------+
+| `PostgreSQL`_ | `PostgreSQL Licence`_ |
++------------------+-------------------------------+
+| `MongoDB`_ | `GNU AGPL v3.0`_ |
++------------------+-------------------------------+
+| `RabbitMQ`_ | `Mozilla Public License`_ |
++------------------+-------------------------------+
+| `Linux`_ | `GPL v3`_ |
++------------------+-------------------------------+
+| `Docker`_ | `Apache License 2.0`_ |
++------------------+-------------------------------+
+| `OpenJDK`_/JRE | `GPL v2`_ |
++------------------+-------------------------------+
+| `SaltStack`_ | `Apache License 2.0`_ |
++------------------+-------------------------------+
+| `salt-formulas`_ | `Apache License 2.0`_ |
++------------------+-------------------------------+
+| `reclass`_ | `The Artistic Licence 2.0`_ |
++------------------+-------------------------------+
+| `MaaS`_ | `GNU AGPL v3.0`_ |
++------------------+-------------------------------+
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please see:
+
+#. `OPNFV Home Page`_
+#. `OPNFV Documentation`_
+#. `OPNFV Software Downloads`_
+#. `OPNFV Hunter Wiki Page`_
+#. `Mirantis Cloud Platform Documentation`_
+
+.. _`OpenStack`: https://www.openstack.org
+.. _`OpenDaylight`: https://www.opendaylight.org
+.. _`PostgreSQL`: https://www.postgresql.org
+.. _`MongoDB`: https://www.mongodb.com
+.. _`RabbitMQ`: https://www.rabbitmq.com
+.. _`Linux`: https://www.linux.org
+.. _`Docker`: https://www.docker.com
+.. _`OpenJDK`: https://openjdk.java.net/
+.. _`SaltStack`: https://www.saltstack.com
+.. _`salt-formulas`: https://github.com/salt-formulas
+.. _`reclass`: https://reclass.pantsfullofunix.net
+.. _`MaaS`: https://maas.io
+.. _`MCP`: https://www.mirantis.com/software/mcp/
+.. _`Mirantis Cloud Platform Documentation`: https://docs.mirantis.com/mcp/latest/
+.. _`OPNFV Home Page`: https://www.opnfv.org
+.. _`OPNFV Hunter Wiki Page`: https://wiki.opnfv.org/display/SWREL/Hunter
+.. _`OPNFV Documentation`: https://docs.opnfv.org
+.. _`OPNFV Software Downloads`: https://www.opnfv.org/software/downloads
+.. _`OPNFV Fuel Contributing`: CONTRIBUTING.rst
+.. _`OPNFV Fuel Wiki Page`: https://wiki.opnfv.org/display/fuel/Fuel+Opnfv
+.. _`OPNFV Community Support mailing list`: https://lists.opnfv.org/g/opnfv-users
+.. _`OPNFV Technical Discussion mailing list`: https://lists.opnfv.org/g/opnfv-tech-discuss
+.. _`OPNFV Fuel Release Notes on RTD`: https://opnfv-fuel.readthedocs.io/en/latest/release/release-notes/index.html
+.. _`OPNFV Fuel Installation Instruction on RTD`: https://opnfv-fuel.readthedocs.io/en/latest/release/installation/index.html
+.. _`OPNFV Fuel User Guide on RTD`: https://opnfv-fuel.readthedocs.io/en/latest/release/userguide/userguide.html
+.. _`OPNFV Fuel Scenarios on RTD`: https://opnfv-fuel.readthedocs.io/en/latest/release/scenarios/index.html
+.. LICENSE links
+.. _`Apache License 2.0`: https://www.apache.org/licenses/LICENSE-2.0
+.. _`Eclipse Public License 1.0`: https://www.eclipse.org/legal/epl-v10.html
+.. _`PostgreSQL Licence`: https://opensource.org/licenses/postgresql
+.. _`GNU AGPL v3.0`: https://www.gnu.org/licenses/agpl-3.0.html
+.. _`Mozilla Public License`: https://www.rabbitmq.com/mpl.html
+.. _`GPL v3`: https://www.gnu.org/copyleft/gpl.html
+.. _`GPL v2`: https://www.gnu.org/licenses/gpl-2.0.html
+.. _`The Artistic Licence 2.0`: https://www.perlfoundation.org/artistic-license-20.html
diff --git a/ci/README.rst b/ci/README.rst
index dc860c003..c25c58f11 100644
--- a/ci/README.rst
+++ b/ci/README.rst
@@ -4,99 +4,100 @@
Abstract
========
-The fuel/ci directory holds all Fuel@OPNFV programatic abstractions for
-the OPNFV community release and continous integration pipeline.
-There is now only one Fuel@OPNFV autonomous script for this, complying to the
+
+The ``ci`` directory holds all OPNFV Fuel programatic abstractions for
+the OPNFV community release and continuous integration pipeline.
+There are now two OPNFV Fuel autonomous scripts for this, complying to the
OPNFV CI pipeline guideline:
- - deploy.sh
-USAGE
+- ``build.sh``
+- ``deploy.sh``
+
+Usage
=====
-For usage information of the CI/CD scripts, please run:
- .. code-block:: bash
+For usage information of the CI/CD deploy script, please run:
- $ ./deploy.sh -h
+.. code-block:: console
-Details on the CI/CD deployment framework
+ jenkins@jumpserver:~/fuel/ci$ ./deploy.sh -h
+
+Details on the CI/CD Deployment Framework
=========================================
-Overview and purpose
+Overview and Purpose
--------------------
-The CI/CD deployment script relies on a configuration structure, providing base
-installer configuration (part of fuel repo: mcp/config), per POD specific
-configuration (part of a separate classified POD configuration repo: securedlab
-and deployment scenario configuration (part of fuel repo: mcp/config/scenario).
-- The base installer configuration resembles the least common denominator of all
+The CI/CD deployment script relies on a configuration structure, providing:
+
+- per POD specific configuration (defaults to using Pharos OPNFV project
+ ``PDF``/``IDF`` files for all OPNFV CI PODs).
+ Pharos OPNFV git repository is included as a git submodule at
+ ``mcp/scripts/pharos``.
+ Optionally, a custom configuration structure can be used via the ``-b``
+ deploy argument.
+ The POD specific parameters follow the ``PDF``/``IDF`` formats defined by
+ the Pharos OPNFV project.
+- deployment scenario configuration, part of fuel repo: ``mcp/config/scenario``.
+ Provides a high level, POD/HW environment independent scenario configuration
+ for a specific deployment. It defines what features shall be deployed - as
+ well as needed overrides of the base installer, POD/HW environment
+ configurations. Objects allowed to override are governed by the OPNFV Fuel
+ project.
+- base installer configuration, part of fuel repo: ``mcp/config/states``,
+ ``mcp/reclass``.
+ The base installer configuration resembles the least common denominator of all
HW/POD environment and deployment scenarios. These configurations are
- normally carried by the the installer projects in this case (Fuel@OPNFV).
-- Per POD specific configuration specifies POD unique parameters, the POD
- parameter possible to alter is governed by the Fuel@OPNFV project.
-- Deployment scenario configuration - provides a high level, POD/HW environment
- independent scenario configuration for a specifiv deployment. It defines what
- features shall be deployed - as well needed overrides of the base
- installer, POD/HW environment configurations. Objects allowed to override
- are governed by the Fuel@OPNFV project.
-
-Executing a deployment
+ normally carried by the the installer projects in this case (OPNFV Fuel).
+
+Executing a Deployment
----------------------
-deploy.sh must be executed locally at the target lab/pod/jumpserver
+
+``deploy.sh`` must be executed locally on the target lab/pod/jumpserver.
A configuration structure must be provided - see the section below.
It is straight forward to execute a deployment task - as an example:
- .. code-block:: bash
+.. code-block:: console
+
+ jenkins@jumpserver:~/fuel/ci$ ./deploy.sh -b file:///home/jenkins/config \
+ -l lf \
+ -p pod2 \
+ -s os-nosdn-nofeature-ha
- $ sudo deploy.sh -b file:///home/jenkins/config
- -l lf -p pod2 -s os-nosdn-nofeature-ha
+``-b`` argument should be expressed in URI style (eg: ``file://...`` or
+``http://...``). The resources can thus be local or remote.
--b and -i arguments should be expressed in URI style (eg: file://...
-or http://...). The resources can thus be local or remote.
+If ``-b`` is not used, the Pharos OPNFV project git submodule local path URI
+is used for the default configuration structure.
-Configuration repository structure
+Configuration Repository Structure
----------------------------------
+
The CI deployment engine relies on a configuration directory/file structure
-pointed to by the -b option described above.
-Normally this points to the secure classified OPNFV securedlab repo to which
-only jenkins and andmins have access to, but you may point to any local or
-remote strcture fullfilling the diectory/file structure below.
-The reason that this configuration structure needs to be secure/hidden
-is that there are security sensitive information in the various configuration
-files.
-
-FIXME: Below information is out of date and should be refreshed after PDF
-support is fully implemented.
-
-A local stripped version of this configuration structure with virtual
-deployment configurations also exist under build/config/.
+pointed to by the ``-b`` option described above.
+Normally this points to the ``mcp/scripts/pharos`` git repo submodule, but you
+may point to any local or remote strcture fullfilling the diectory/file
+structure below.
+This configuration structure supports optional encryption of certain security
+sensitive data, mechanism described in the Pharos documentation.
+
Following configuration directory and file structure should adheare to:
- .. code-block:: bash
-
- TOP
- !
- +---- labs
- !
- +---- lab-name-1
- ! !
- ! +---- pod-name-1
- ! ! !
- ! ! +---- fuel
- ! ! !
- ! ! +---- config
- ! ! !
- ! ! +---- dea-pod-override.yaml
- ! ! !
- ! ! +---- dha.yaml
- ! !
- ! +---- pod-name-2
- ! !
- !
- +---- lab-name-2
- ! !
-
-
-Creating a deployment scenario
-------------------------------
-Please find `mcp/config/README.rst` for instructions on how to create a new
-deployment scenario.
+.. code-block:: console
+
+ TOP
+ !
+ +---- labs
+ !
+ +---- lab-name-1
+ ! !
+ ! +---- pod1.yaml
+ ! !
+ ! +---- idf-pod1.yaml
+ ! !
+ ! +---- pod2.yaml
+ ! !
+ ! +---- idf-pod2.yaml
+ !
+ +---- lab-name-2
+ ! !
diff --git a/ci/build.sh b/ci/build.sh
new file mode 100755
index 000000000..3da67053b
--- /dev/null
+++ b/ci/build.sh
@@ -0,0 +1,107 @@
+#!/bin/bash -e
+# shellcheck disable=SC1004,SC1090
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+##############################################################################
+# BEGIN of Exit handlers
+#
+do_exit () {
+ local RC=$?
+ if [ ${RC} -eq 0 ]; then
+ notify_n "[OK] MCP: Docker build finished succesfully!" 2
+ else
+ notify_n "[ERROR] MCP: Docker build threw a fatal error!"
+ fi
+}
+#
+# End of Exit handlers
+##############################################################################
+
+##############################################################################
+# BEGIN of variables to customize
+#
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+MCP_REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
+DEPLOY_DIR=$(cd "${MCP_REPO_ROOT_PATH}/mcp/scripts"; pwd)
+DOCKER_DIR=$(cd "${MCP_REPO_ROOT_PATH}/docker"; pwd)
+DOCKER_TAG=${1:-latest}
+DOCKER_PUSH=${2---push} # pass an empty second arg to disable push
+CACHE_INVALIDATE=${CACHE_INVALIDATE:-0}
+SALT_VERSION='stable 2017.7'
+
+source "${DEPLOY_DIR}/globals.sh"
+source "${DEPLOY_DIR}/lib.sh"
+source "${DEPLOY_DIR}/lib_jump_common.sh"
+
+[ ! "${TERM:-unknown}" = 'unknown' ] || export TERM=vt220
+[ "${CACHE_INVALIDATE}" = 0 ] || CACHE_INVALIDATE=$(date +%s)
+
+export LC_ALL=en_US.utf-8
+export LANG=en_US.utf-8
+
+#
+# END of variables to customize
+##############################################################################
+
+##############################################################################
+# BEGIN of main
+#
+
+# Enable the automatic exit trap
+trap do_exit SIGINT SIGTERM EXIT
+
+# Set no restrictive umask so that Jenkins can remove any residuals
+umask 0000
+
+pushd "${DEPLOY_DIR}" > /dev/null
+
+# Install distro packages and pip-managed prerequisites
+notify "[NOTE] Installing required build-time distro and pip pkgs" 2
+jumpserver_pkg_install 'build'
+PYTHON_BIN_PATH="$(python3 -m site --user-base)/bin"
+PATH="$PATH:$PYTHON_BIN_PATH"
+# Clone git submodules and apply our patches
+make -C "${MCP_REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
+python3 -m pip install --upgrade pipenv --user
+docker_install
+
+popd > /dev/null
+pushd "${DOCKER_DIR}" > /dev/null
+
+env PIPENV_HIDE_EMOJIS=1 VIRTUALENV_ALWAYS_COPY=1 python3 -m pipenv --three install
+env PIPENV_HIDE_EMOJIS=1 VIRTUALENV_ALWAYS_COPY=1 python3 -m pipenv install invoke
+# shellcheck disable=SC2086
+env PIPENV_HIDE_EMOJIS=1 python3 -m pipenv run \
+ invoke build saltmaster-reclass \
+ --require 'salt salt-formulas opnfv reclass tini-saltmaster' \
+ --dist=ubuntu \
+ --dist-rel=bionic \
+ --formula-rev=nightly \
+ --opnfv-tag="${DOCKER_TAG}" \
+ --salt="${SALT_VERSION}" \
+ --build-arg-extra " \
+ CACHE_INVALIDATE=\"${CACHE_INVALIDATE}\"" \
+ ${DOCKER_PUSH}
+
+env PIPENV_HIDE_EMOJIS=1 python3 -m pipenv run \
+ invoke build saltminion-maas \
+ --require 'maas' \
+ --dist=ubuntu \
+ --dist-rel=bionic \
+ --opnfv-tag="${DOCKER_TAG}" \
+ --salt="${SALT_VERSION}" \
+ --build-arg-extra " \
+ CACHE_INVALIDATE=\"${CACHE_INVALIDATE}\"" \
+ ${DOCKER_PUSH}
+
+popd > /dev/null
+
+#
+# END of main
+##############################################################################
diff --git a/ci/deploy.sh b/ci/deploy.sh
index 10b639e3c..d04c4b23c 100755
--- a/ci/deploy.sh
+++ b/ci/deploy.sh
@@ -16,9 +16,9 @@ do_exit () {
local RC=$?
cleanup_mounts > /dev/null 2>&1
if [ ${RC} -eq 0 ]; then
- notify_n "[OK] MCP: Openstack installation finished succesfully!" 2
+ notify_n "[OK] MCP: Installation of $DEPLOY_SCENARIO finished succesfully!" 2
else
- notify_n "[ERROR] MCP: Openstack installation threw a fatal error!"
+ notify_n "[ERROR] MCP: Installation of $DEPLOY_SCENARIO threw a fatal error!"
fi
}
#
@@ -32,35 +32,38 @@ usage ()
{
cat << EOF
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-$(notify "$(basename "$0"): Deploy the Fuel@OPNFV MCP stack" 3)
+$(notify "$(basename "$0"): Deploy the OPNFV Fuel MCP stack" 3)
$(notify "USAGE:" 2)
$(basename "$0") -l lab-name -p pod-name -s deploy-scenario \\
[-b Lab Config Base URI] \\
[-S storage-dir] [-L /path/to/log/file.tar.gz] \\
- [-f[f]] [-F] [-e | -E[E]] [-d] [-D] [-N]
+ [-f] [-F[F]] [-e[e] | -E[E]] [-d] [-D] [-N] [-m] \\
+ [-o operating-system]
$(notify "OPTIONS:" 2)
-b Base-uri for the stack-configuration structure
-d Dry-run
-D Debug logging
- -e Do not launch environment deployment
+ -e Do not launch environment deployment (use twice to skip cloud setup)
-E Remove existing VCP VMs (use twice to redeploy baremetal nodes)
- -f Deploy on existing Salt master (use twice to also skip config sync)
- -F Do only create a Salt master
+ -f Deploy on existing Salt master (use twice or more to skip states)
+ -F Same as -e, do not launch environment deployment (legacy option)
-h Print this message and exit
-l Lab-name
-p Pod-name
+ -o Use specified operating system for jumpserver/VCP VMs
-P Skip installation of package dependencies
-s Deploy-scenario short-name
- -S Storage dir for VM images
+ -S Storage dir for VM images and other deploy artifacts
-L Deployment log path and file name
+ -m Use single socket CPU compute nodes (only affects virtual computes)
-N Experimental: Do not virtualize control plane (novcp)
$(notify_i "Description:" 2)
-Deploys the Fuel@OPNFV stack on the indicated lab resource.
+Deploys the OPNFV Fuel stack on the indicated lab resource.
-This script provides the Fuel@OPNFV deployment abstraction.
+This script provides the OPNFV Fuel deployment abstraction.
It depends on the OPNFV official configuration directory/file structure
and provides a fairly simple mechanism to execute a deployment.
@@ -73,36 +76,45 @@ $(notify_i "Input parameters to the build script are:" 2)
<base-uri>/labs/<lab-name>/idf-<pod-name>.yaml
The default is using the git submodule tracking 'OPNFV Pharos' in
<./mcp/scripts/pharos>.
- An example config is provided inside current repo in
- <./mcp/config>, automatically linked as <./mcp/scripts/pharos/labs/local>.
-d Dry-run - Produce deploy config files, but do not execute deploy
-D Debug logging - Enable extra logging in sh deploy scripts (set -x)
-e Do not launch environment deployment
+ If specified twice (e.g. -e -e), only the operating system and networks
+ will be provisioned, skipping cloud installation.
-E Remove existing VCP VMs. It will destroy and undefine all VCP VMs
currently defined on cluster KVM nodes. If specified twice (e.g. -E -E),
baremetal nodes (VCP too, implicitly) will be removed, then reprovisioned.
Only applicable for baremetal deploys.
+ If specified 3 times, a complete uninstallation (cleanup) will be performed
+ on the jumpserver (even for virtual deploys): VMs, virsh networks,
+ containers, networks, services etc.
-f Deploy on existing Salt master. It will skip infrastructure VM creation,
but it will still sync reclass configuration from current repo to Salt
- Master node. If specified twice (e.g. -f -f), config sync will also be
- skipped.
--F Do only create a Salt master
+ Master node.
+ Each additional use skips one more state file. For example, -fff would
+ skip the first 3 state files (e.g. virtual_init, maas, baremetal_init).
+-F Same as -e, do not launch environment deployment (legacy option)
-h Print this message and exit
-L Deployment log path and name, eg. -L /home/jenkins/job.log.tar.gz
-l Lab name as defined in the configuration directory, e.g. lf
- For the sample configuration in <./mcp/config>, lab name is 'local'.
-p POD name as defined in the configuration directory, e.g. pod2
- For the sample configuration in <./mcp/config>, POD name is 'virtual1'
- for virtual deployments or 'pod1' for baremetal (based on lf-pod2).
+-m Use single socket compute nodes. Instead of using default NUMA-enabled
+ topology for virtual compute nodes created via libvirt, configure a
+ single guest CPU socket.
-N Experimental: Instead of virtualizing the control plane (VCP), deploy
control plane directly on baremetal nodes
+-o Operating system to be preinstalled on jumpserver VMs (for virtual/hybrid
+ deployments) and/or VCP VMs (for baremetal deployments).
+ Defaults to 'ubuntu1804' (Bionic).
-P Skip installing dependency distro packages on current host
This flag should only be used if you have kept back older packages that
would be upgraded and that is undesirable on the current system.
Note that without the required packages, deploy will fail.
-s Deployment-scenario, this points to a short deployment scenario name, which
has to be defined in config directory (e.g. os-odl-nofeature-ha).
--S Storage dir for VM images, default is mcp/deploy/images
+-S Storage dir for VM images, default is /var/lib/opnfv/tmpdir
+ It is recommended to store the deploy artifacts on a fast disk, outside of
+ the current git repository (so clean operations won't erase it).
$(notify_i "[NOTE] sudo & virsh priviledges are needed for this script to run" 3)
@@ -111,7 +123,8 @@ Example:
$(notify_i "sudo $(basename "$0") \\
-b file:///home/jenkins/securedlab \\
-l lf -p pod2 \\
- -s os-odl-nofeature-ha" 2)
+ -s os-odl-nofeature-ha \\
+ -S /home/jenkins/tmpdir" 2)
EOF
}
@@ -123,24 +136,30 @@ EOF
# BEGIN of variables to customize
#
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
-DEPLOY_DIR=$(cd "${REPO_ROOT_PATH}/mcp/scripts"; pwd)
-STORAGE_DIR=$(cd "${REPO_ROOT_PATH}/mcp/deploy/images"; pwd)
+MCP_REPO_ROOT_PATH=$(readlink -f "$(dirname "${BASH_SOURCE[0]}")/..")
+DEPLOY_DIR=$(cd "${MCP_REPO_ROOT_PATH}/mcp/scripts"; pwd)
+MCP_STORAGE_DIR='/var/lib/opnfv/tmpdir'
URI_REGEXP='(file|https?|ftp)://.*'
-BASE_CONFIG_URI="file://${REPO_ROOT_PATH}/mcp/scripts/pharos"
+BASE_CONFIG_URI="file://${MCP_REPO_ROOT_PATH}/mcp/scripts/pharos"
+OPNFV_BRANCH=$(sed -ne 's/defaultbranch=//p' "${MCP_REPO_ROOT_PATH}/.gitreview")
+DEF_DOCKER_TAG=$(basename "${OPNFV_BRANCH/master/latest}")
# Customize deploy workflow
DRY_RUN=${DRY_RUN:-0}
USE_EXISTING_PKGS=${USE_EXISTING_PKGS:-0}
USE_EXISTING_INFRA=${USE_EXISTING_INFRA:-0}
-INFRA_CREATION_ONLY=${INFRA_CREATION_ONLY:-0}
-NO_DEPLOY_ENVIRONMENT=${NO_DEPLOY_ENVIRONMENT:-0}
+MCP_NO_DEPLOY_ENVIRONMENT=${MCP_NO_DEPLOY_ENVIRONMENT:-0}
ERASE_ENV=${ERASE_ENV:-0}
MCP_VCP=${MCP_VCP:-1}
+MCP_DOCKER_TAG=${MCP_DOCKER_TAG:-${DEF_DOCKER_TAG}}
+MCP_CMP_SS=${MCP_CMP_SS:-0}
+MCP_OS=${MCP_OS:-ubuntu1804}
source "${DEPLOY_DIR}/globals.sh"
source "${DEPLOY_DIR}/lib.sh"
source "${DEPLOY_DIR}/lib_template.sh"
+source "${DEPLOY_DIR}/lib_jump_common.sh"
+source "${DEPLOY_DIR}/lib_jump_deploy.sh"
#
# END of variables to customize
@@ -150,7 +169,7 @@ source "${DEPLOY_DIR}/lib_template.sh"
# BEGIN of main
#
set +x
-while getopts "b:dDfEFl:L:Np:Ps:S:he" OPTION
+while getopts "b:dDfEFl:L:No:p:Ps:S:he" OPTION
do
case $OPTION in
b)
@@ -170,11 +189,8 @@ do
f)
((USE_EXISTING_INFRA+=1))
;;
- F)
- INFRA_CREATION_ONLY=1
- ;;
- e)
- NO_DEPLOY_ENVIRONMENT=1
+ F|e)
+ ((MCP_NO_DEPLOY_ENVIRONMENT+=1))
;;
E)
((ERASE_ENV+=1))
@@ -185,9 +201,15 @@ do
L)
DEPLOY_LOG="${OPTARG}"
;;
+ m)
+ MCP_CMP_SS=1
+ ;;
N)
MCP_VCP=0
;;
+ o)
+ MCP_OS=${OPTARG}
+ ;;
p)
TARGET_POD=${OPTARG}
;;
@@ -199,7 +221,7 @@ do
;;
S)
if [[ ${OPTARG} ]]; then
- STORAGE_DIR="${OPTARG}"
+ MCP_STORAGE_DIR="${OPTARG}"
fi
;;
h)
@@ -236,14 +258,22 @@ pushd "${DEPLOY_DIR}" > /dev/null
# scenario, etc.
# Install required packages on jump server
+sudo mkdir -p "${MCP_STORAGE_DIR}"
+sudo chown -R "${USER}:${USER}" "${MCP_STORAGE_DIR}"
if [ ${USE_EXISTING_PKGS} -eq 1 ]; then
notify "[NOTE] Skipping distro pkg installation" 2
else
notify "[NOTE] Installing required distro pkgs" 2
- jumpserver_pkg_install
+ jumpserver_pkg_install 'deploy'
+ docker_install "${MCP_STORAGE_DIR}"
+ virtinst_install "${MCP_STORAGE_DIR}"
+ # Ubuntu 18.04 cloud image requires newer e2fsprogs
+ if [[ "${MCP_OS:-}" =~ ubuntu1804 ]]; then
+ e2fsprogs_install "${MCP_STORAGE_DIR}"
+ fi
fi
-if ! virsh list >/dev/null 2>&1; then
+if ! ${VIRSH} list >/dev/null 2>&1; then
notify_e "[ERROR] This script requires hypervisor access"
fi
@@ -251,7 +281,7 @@ fi
./sysinfo_print.sh
# Clone git submodules and apply our patches
-make -C "${REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
+make -C "${MCP_REPO_ROOT_PATH}/mcp/patches" deepclean patches-import
# Check scenario file existence
SCENARIO_DIR="$(readlink -f "../config/scenario")"
@@ -264,59 +294,74 @@ fi
generate_ssh_key
export MAAS_SSH_KEY="$(cat "$(basename "${SSH_KEY}").pub")"
-MCP_DPDK_MODE=$([[ "$DEPLOY_SCENARIO" =~ ovs ]] && echo 1 || echo 0)
# Expand jinja2 templates based on PDF data and env vars
-export MCP_VCP MCP_DPDK_MODE MCP_JUMP_ARCH=$(uname -i)
-do_templates_scenario "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
- "${BASE_CONFIG_URI}" "${SCENARIO_DIR}"
-do_templates_cluster "${STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
- "${REPO_ROOT_PATH}" \
- "${SCENARIO_DIR}/defaults.yaml" \
+[[ "${DEPLOY_SCENARIO}" =~ -ha$ ]] || MCP_VCP=0
+export MCP_REPO_ROOT_PATH MCP_VCP MCP_STORAGE_DIR MCP_DOCKER_TAG MCP_CMP_SS \
+ MCP_JUMP_ARCH=$(uname -i) MCP_DEPLOY_SCENARIO="${DEPLOY_SCENARIO}" \
+ MCP_NO_DEPLOY_ENVIRONMENT MCP_OS MCP_KERNEL_VER
+do_templates_scenario "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
+ "${BASE_CONFIG_URI}" "${SCENARIO_DIR}" \
"${SCENARIO_DIR}/${DEPLOY_SCENARIO}.yaml"
+do_templates_cluster "${MCP_STORAGE_DIR}" "${TARGET_LAB}" "${TARGET_POD}" \
+ "${MCP_REPO_ROOT_PATH}" \
+ "${SCENARIO_DIR}/defaults.yaml"
# Determine additional data (e.g. jump bridge names) based on XDF
source "${DEPLOY_DIR}/xdf_data.sh"
# Jumpserver prerequisites check
notify "[NOTE] Using bridges: ${OPNFV_BRIDGES[*]}" 2
-jumpserver_check_requirements "${virtual_nodes[*]}" "${OPNFV_BRIDGES[@]}"
+jumpserver_check_requirements "${cluster_states[*]}" "${virtual_nodes[*]}" \
+ "${OPNFV_BRIDGES[@]}"
# Infra setup
if [ ${DRY_RUN} -eq 1 ]; then
notify "[NOTE] Dry run, skipping all deployment tasks" 2
exit 0
+elif [ ${ERASE_ENV} -gt 2 ]; then
+ notify "[NOTE] Uninstall / cleanup all jumpserver Fuel resources" 2
+ cleanup_all "${MCP_STORAGE_DIR}" "${OPNFV_BRIDGES[@]}"
+ exit 0
elif [ ${USE_EXISTING_INFRA} -gt 0 ]; then
- notify "[NOTE] Use existing infra" 2
- check_connection
+ notify "[NOTE] Use existing infra: skip first ${USE_EXISTING_INFRA} states" 2
+ notify "[STATE] Skipping: ${cluster_states[*]::${USE_EXISTING_INFRA}}" 2
else
- prepare_vms "${base_image}" "${STORAGE_DIR}" "${virtual_repos_pkgs}" \
- "${virtual_nodes[@]}"
+ prepare_vms "${base_image}" "${MCP_STORAGE_DIR}" "${virtual_repos_pkgs}"
create_networks "${OPNFV_BRIDGES[@]}"
do_sysctl_cfg
- create_vms "${STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
- update_mcpcontrol_network
+ do_udev_cfg
+ create_vms "${MCP_STORAGE_DIR}" "${virtual_nodes_data}" "${OPNFV_BRIDGES[@]}"
start_vms "${virtual_nodes[@]}"
- check_connection
-fi
-if [ ${USE_EXISTING_INFRA} -lt 2 ]; then
- wait_for 5 "./salt.sh ${STORAGE_DIR}/pod_config.yml ${virtual_nodes[*]}"
+
+ # https://github.com/docker/libnetwork/issues/1743
+ # rm -f /var/lib/docker/network/files/local-kv.db
+ sudo systemctl restart docker
+ prepare_containers "${MCP_STORAGE_DIR}"
fi
+start_containers "${MCP_STORAGE_DIR}"
+check_connection
+
# Openstack cluster setup
set +x
-if [ ${INFRA_CREATION_ONLY} -eq 1 ] || [ ${NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
+if [ ${MCP_NO_DEPLOY_ENVIRONMENT} -eq 1 ]; then
notify "[NOTE] Skip openstack cluster setup" 2
else
- for state in "${cluster_states[@]}"; do
+ for state in "${cluster_states[@]:${USE_EXISTING_INFRA}}"; do
notify "[STATE] Applying state: ${state}" 2
# shellcheck disable=SC2086,2029
wait_for 5 "ssh ${SSH_OPTS} ${SSH_SALT} sudo \
CI_DEBUG=$CI_DEBUG ERASE_ENV=$ERASE_ENV \
/root/fuel/mcp/config/states/${state}"
+ if [ "${state}" = 'maas' ]; then
+ # For hybrid PODs (virtual + baremetal nodes), the virtual nodes
+ # should be reset to force a DHCP request from MaaS DHCP
+ reset_vms "${virtual_nodes[@]}"
+ fi
done
-fi
-./log.sh "${DEPLOY_LOG}"
+ ./log.sh "${DEPLOY_LOG}"
+fi
popd > /dev/null
diff --git a/docker b/docker
new file mode 160000
+Subproject d580f1ef272a29268a1825e7f810979ade6b2b7
diff --git a/docs/LICENSE b/docs/LICENSE
new file mode 100644
index 000000000..d2a14a60a
--- /dev/null
+++ b/docs/LICENSE
@@ -0,0 +1,395 @@
+Attribution 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution 4.0 International Public License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution 4.0 International Public License ("Public License"). To the
+extent this Public License may be interpreted as a contract, You are
+granted the Licensed Rights in consideration of Your acceptance of
+these terms and conditions, and the Licensor grants You such rights in
+consideration of benefits the Licensor receives from making the
+Licensed Material available under these terms and conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ d. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ e. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ f. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ g. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ h. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ i. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ j. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ k. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ 4. If You Share Adapted Material You produce, the Adapter's
+ License You apply must not prevent recipients of the Adapted
+ Material from complying with this Public License.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material; and
+
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public
+licenses. Notwithstanding, Creative Commons may elect to apply one of
+its public licenses to material it publishes and in those instances
+will be considered the "Licensor". The text of the Creative Commons
+public licenses is dedicated to the public domain under the CC0 Public
+Domain Dedication. Except for the limited purpose of indicating that
+material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the
+public licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/docs/conf.py b/docs/conf.py
new file mode 100644
index 000000000..f0c2a7876
--- /dev/null
+++ b/docs/conf.py
@@ -0,0 +1,8 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from docs_conf.conf import * # noqa: F401,F403
diff --git a/docs/conf.yaml b/docs/conf.yaml
new file mode 100644
index 000000000..a7ce79465
--- /dev/null
+++ b/docs/conf.yaml
@@ -0,0 +1,10 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+project_cfg: opnfv
+project: fuel
diff --git a/docs/index.rst b/docs/index.rst
new file mode 100644
index 000000000..943e1d5ca
--- /dev/null
+++ b/docs/index.rst
@@ -0,0 +1,18 @@
+.. _fuel:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+====
+FUEL
+====
+
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ release/release-notes/index
+ release/installation/index
+ release/userguide/index
+ release/scenarios/index
diff --git a/docs/release/developer-guide/img/README.rst b/docs/release/developer-guide/img/README.rst
new file mode 120000
index 000000000..1104109df
--- /dev/null
+++ b/docs/release/developer-guide/img/README.rst
@@ -0,0 +1 @@
+../../installation/img/README.rst \ No newline at end of file
diff --git a/docs/release/developer-guide/img/detail_fuel.png b/docs/release/developer-guide/img/detail_fuel.png
new file mode 100755
index 000000000..02af61aa7
--- /dev/null
+++ b/docs/release/developer-guide/img/detail_fuel.png
Binary files differ
diff --git a/docs/release/developer-guide/img/overview_fuel.png b/docs/release/developer-guide/img/overview_fuel.png
new file mode 100755
index 000000000..6b879d756
--- /dev/null
+++ b/docs/release/developer-guide/img/overview_fuel.png
Binary files differ
diff --git a/docs/release/developer-guide/img/overview_mcp.png b/docs/release/developer-guide/img/overview_mcp.png
new file mode 100755
index 000000000..037b293b9
--- /dev/null
+++ b/docs/release/developer-guide/img/overview_mcp.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_gerrit.png b/docs/release/developer-guide/img/symbol_gerrit.png
new file mode 100755
index 000000000..aea346e25
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_gerrit.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_git_blue.png b/docs/release/developer-guide/img/symbol_git_blue.png
new file mode 100755
index 000000000..569ed3f7b
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_git_blue.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_git_orange.png b/docs/release/developer-guide/img/symbol_git_orange.png
new file mode 100755
index 000000000..32f672985
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_git_orange.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_git_red.png b/docs/release/developer-guide/img/symbol_git_red.png
new file mode 100755
index 000000000..f288afe0b
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_git_red.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_jenkins.png b/docs/release/developer-guide/img/symbol_jenkins.png
new file mode 100755
index 000000000..20fde4141
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_jenkins.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_k8.png b/docs/release/developer-guide/img/symbol_k8.png
new file mode 100755
index 000000000..0cbc31005
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_k8.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_os.png b/docs/release/developer-guide/img/symbol_os.png
new file mode 100755
index 000000000..c2c8b262b
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_os.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_salt.png b/docs/release/developer-guide/img/symbol_salt.png
new file mode 100755
index 000000000..e9011ae0c
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_salt.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_trigger.png b/docs/release/developer-guide/img/symbol_trigger.png
new file mode 100755
index 000000000..e7dc10ffd
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_trigger.png
Binary files differ
diff --git a/docs/release/developer-guide/img/symbol_user.png b/docs/release/developer-guide/img/symbol_user.png
new file mode 100755
index 000000000..6384f8205
--- /dev/null
+++ b/docs/release/developer-guide/img/symbol_user.png
Binary files differ
diff --git a/docs/release/installation/img/README.rst b/docs/release/installation/img/README.rst
index bc8d9bede..bf630445b 100644
--- a/docs/release/installation/img/README.rst
+++ b/docs/release/installation/img/README.rst
@@ -1,12 +1,18 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. SPDX-License-Identifier: CC-BY-4.0
-.. (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
+.. (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
+
+:orphan:
Image Editor
============
-All files in this directory have been created using `draw.io <http://draw.io>`_.
+
+All files in this directory have been created using `draw.io`_.
Image Sources
=============
-Image sources are embedded in each `png` file.
-To edit an image, import the `png` file using `draw.io <http://draw.io>`_.
+
+Image sources are embedded in each ``png`` file.
+To edit an image, import the ``png`` file using `draw.io`_.
+
+.. _`draw.io`: https://draw.io
diff --git a/docs/release/installation/img/arm_pod5.png b/docs/release/installation/img/arm_pod5.png
deleted file mode 100644
index 87edb8f45..000000000
--- a/docs/release/installation/img/arm_pod5.png
+++ /dev/null
Binary files differ
diff --git a/docs/release/installation/img/fuel_baremetal.png b/docs/release/installation/img/fuel_baremetal.png
deleted file mode 100644
index 27e762021..000000000
--- a/docs/release/installation/img/fuel_baremetal.png
+++ /dev/null
Binary files differ
diff --git a/docs/release/installation/img/fuel_baremetal_ha.png b/docs/release/installation/img/fuel_baremetal_ha.png
new file mode 100755
index 000000000..af5f00f8a
--- /dev/null
+++ b/docs/release/installation/img/fuel_baremetal_ha.png
Binary files differ
diff --git a/docs/release/installation/img/fuel_baremetal_noha.png b/docs/release/installation/img/fuel_baremetal_noha.png
new file mode 100755
index 000000000..4b5aef050
--- /dev/null
+++ b/docs/release/installation/img/fuel_baremetal_noha.png
Binary files differ
diff --git a/docs/release/installation/img/fuel_hybrid_noha.png b/docs/release/installation/img/fuel_hybrid_noha.png
new file mode 100755
index 000000000..f2debfef3
--- /dev/null
+++ b/docs/release/installation/img/fuel_hybrid_noha.png
Binary files differ
diff --git a/docs/release/installation/img/fuel_virtual.png b/docs/release/installation/img/fuel_virtual.png
deleted file mode 100644
index d7664865d..000000000
--- a/docs/release/installation/img/fuel_virtual.png
+++ /dev/null
Binary files differ
diff --git a/docs/release/installation/img/fuel_virtual_noha.png b/docs/release/installation/img/fuel_virtual_noha.png
new file mode 100755
index 000000000..710988acb
--- /dev/null
+++ b/docs/release/installation/img/fuel_virtual_noha.png
Binary files differ
diff --git a/docs/release/installation/img/lf_pod2.png b/docs/release/installation/img/lf_pod2.png
deleted file mode 100644
index da419d87c..000000000
--- a/docs/release/installation/img/lf_pod2.png
+++ /dev/null
Binary files differ
diff --git a/docs/release/installation/index.rst b/docs/release/installation/index.rst
index 784eec252..866044eb5 100644
--- a/docs/release/installation/index.rst
+++ b/docs/release/installation/index.rst
@@ -1,17 +1,10 @@
-.. _fuel-installation:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-.. _fuel-release-installation-label:
-
-****************************************
-Installation instruction for Fuel\@OPNFV
-****************************************
+.. _fuel-installation:
.. toctree::
- :numbered:
:maxdepth: 2
installation.instruction.rst
diff --git a/docs/release/installation/installation.instruction.rst b/docs/release/installation/installation.instruction.rst
index 1961b25f0..8c013dd65 100644
--- a/docs/release/installation/installation.instruction.rst
+++ b/docs/release/installation/installation.instruction.rst
@@ -2,618 +2,1411 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-========
+***********************************
+OPNFV Fuel Installation Instruction
+***********************************
+
Abstract
========
-This document describes how to install the Fraser release of
+This document describes how to install the ``Iruya`` release of
OPNFV when using Fuel as a deployment tool, covering its usage,
limitations, dependencies and required system resources.
-This is an unified documentation for both x86_64 and aarch64
+
+This is an unified documentation for both ``x86_64`` and ``aarch64``
architectures. All information is common for both architectures
except when explicitly stated.
-============
Introduction
============
This document provides guidelines on how to install and
-configure the Fraser release of OPNFV when using Fuel as a
+configure the ``Iruya`` release of OPNFV when using Fuel as a
deployment tool, including required software and hardware configurations.
Although the available installation options provide a high degree of
freedom in how the system is set up, including architecture, services
and features, etc., said permutations may not provide an OPNFV
compliant reference architecture. This document provides a
-step-by-step guide that results in an OPNFV Fraser compliant
+step-by-step guide that results in an OPNFV ``Iruya`` compliant
deployment.
The audience of this document is assumed to have good knowledge of
networking and Unix/Linux administration.
-=======
-Preface
-=======
-
-Before starting the installation of the Fraser release of
+Before starting the installation of the ``Iruya`` release of
OPNFV, using Fuel as a deployment tool, some planning must be
done.
Preparations
============
-Prior to installation, a number of deployment specific parameters must be collected, those are:
+Prior to installation, a number of deployment specific parameters must be
+collected, those are:
#. Provider sub-net and gateway information
-#. Provider VLAN information
+#. Provider ``VLAN`` information
-#. Provider DNS addresses
+#. Provider ``DNS`` addresses
-#. Provider NTP addresses
-
-#. Network overlay you plan to deploy (VLAN, VXLAN, FLAT)
-
-#. How many nodes and what roles you want to deploy (Controllers, Storage, Computes)
-
-#. Monitoring options you want to deploy (Ceilometer, Syslog, etc.).
-
-#. Other options not covered in the document are available in the links above
+#. Provider ``NTP`` addresses
+#. How many nodes and what roles you want to deploy (Controllers, Computes)
This information will be needed for the configuration procedures
provided in this document.
-=========================================
-Hardware Requirements for Virtual Deploys
-=========================================
-
-The following minimum hardware requirements must be met for the virtual
-installation of Fraser using Fuel:
-
-+----------------------------+--------------------------------------------------------+
-| **HW Aspect** | **Requirement** |
-| | |
-+============================+========================================================+
-| **1 Jumpserver** | A physical node (also called Foundation Node) that |
-| | will host a Salt Master VM and each of the VM nodes in |
-| | the virtual deploy |
-+----------------------------+--------------------------------------------------------+
-| **CPU** | Minimum 1 socket with Virtualization support |
-+----------------------------+--------------------------------------------------------+
-| **RAM** | Minimum 32GB/server (Depending on VNF work load) |
-+----------------------------+--------------------------------------------------------+
-| **Disk** | Minimum 100GB (SSD or SCSI (15krpm) highly recommended)|
-+----------------------------+--------------------------------------------------------+
-
-
-===========================================
-Hardware Requirements for Baremetal Deploys
-===========================================
-
-The following minimum hardware requirements must be met for the baremetal
-installation of Fraser using Fuel:
-
-+-------------------------+------------------------------------------------------+
-| **HW Aspect** | **Requirement** |
-| | |
-+=========================+======================================================+
-| **# of nodes** | Minimum 5 |
-| | |
-| | - 3 KVM servers which will run all the controller |
-| | services |
-| | |
-| | - 2 Compute nodes |
-| | |
-+-------------------------+------------------------------------------------------+
-| **CPU** | Minimum 1 socket with Virtualization support |
-+-------------------------+------------------------------------------------------+
-| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
-+-------------------------+------------------------------------------------------+
-| **Disk** | Minimum 256GB 10kRPM spinning disks |
-+-------------------------+------------------------------------------------------+
-| **Networks** | 4 VLANs (PUBLIC, MGMT, STORAGE, PRIVATE) - can be |
-| | a mix of tagged/native |
-| | |
-| | 1 Un-Tagged VLAN for PXE Boot - ADMIN Network |
-| | |
-| | Note: These can be allocated to a single NIC - |
-| | or spread out over multiple NICs |
-+-------------------------+------------------------------------------------------+
-| **1 Jumpserver** | A physical node (also called Foundation Node) that |
-| | hosts the Salt Master and MaaS VMs |
-+-------------------------+------------------------------------------------------+
-| **Power management** | All targets need to have power management tools that |
-| | allow rebooting the hardware and setting the boot |
-| | order (e.g. IPMI) |
-+-------------------------+------------------------------------------------------+
-
-**NOTE:** All nodes including the Jumpserver must have the same architecture (either x86_64 or aarch64).
-
-**NOTE:** For aarch64 deployments an UEFI compatible firmware with PXE support is needed (e.g. EDK2).
-
-===============================
+Hardware Requirements
+=====================
+
+Mininum hardware requirements depend on the deployment type.
+
+.. WARNING::
+
+ If ``baremetal`` nodes are present in the cluster, the architecture of the
+ nodes running the control plane (``kvm01``, ``kvm02``, ``kvm03`` for
+ ``HA`` scenarios, respectively ``ctl01``, ``gtw01``, ``odl01`` for
+ ``noHA`` scenarios) and the ``jumpserver`` architecture must be the same
+ (either ``x86_64`` or ``aarch64``).
+
+.. TIP::
+
+ The compute nodes may have different architectures, but extra
+ configuration might be required for scheduling VMs on the appropiate host.
+ This use-case is not tested in OPNFV CI, so it is considered experimental.
+
+Hardware Requirements for ``virtual`` Deploys
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following minimum hardware requirements must be met for the ``virtual``
+installation of ``Iruya`` using Fuel:
+
++------------------+------------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++==================+======================================================+
+| **1 Jumpserver** | A physical node (also called Foundation Node) that |
+| | will host a Salt Master container and each of the VM |
+| | nodes in the virtual deploy |
++------------------+------------------------------------------------------+
+| **CPU** | Minimum 1 socket with Virtualization support |
++------------------+------------------------------------------------------+
+| **RAM** | Minimum 32GB/server (Depending on VNF work load) |
++------------------+------------------------------------------------------+
+| **Disk** | Minimum 100GB (SSD or 15krpm SCSI highly recommended)|
++------------------+------------------------------------------------------+
+
+Hardware Requirements for ``baremetal`` Deploys
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following minimum hardware requirements must be met for the ``baremetal``
+installation of ``Iruya`` using Fuel:
+
++------------------+------------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++==================+======================================================+
+| **1 Jumpserver** | A physical node (also called Foundation Node) that |
+| | hosts the Salt Master and MaaS containers |
++------------------+------------------------------------------------------+
+| **# of nodes** | Minimum 5 |
+| | |
+| | - 3 KVM servers which will run all the controller |
+| | services |
+| | |
+| | - 2 Compute nodes |
+| | |
+| | .. WARNING:: |
+| | |
+| | ``kvm01``, ``kvm02``, ``kvm03`` nodes and the |
+| | ``jumpserver`` must have the same architecture |
+| | (either ``x86_64`` or ``aarch64``). |
+| | |
+| | .. NOTE:: |
+| | |
+| | ``aarch64`` nodes should run an ``UEFI`` |
+| | compatible firmware with PXE support |
+| | (e.g. ``EDK2``). |
++------------------+------------------------------------------------------+
+| **CPU** | Minimum 1 socket with Virtualization support |
++------------------+------------------------------------------------------+
+| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
++------------------+------------------------------------------------------+
+| **Disk** | Minimum 256GB 10kRPM spinning disks |
++------------------+------------------------------------------------------+
+| **Networks** | Mininum 4 |
+| | |
+| | - 3 VLANs (``public``, ``mgmt``, ``private``) - |
+| | can be a mix of tagged/native |
+| | |
+| | - 1 Un-Tagged VLAN for PXE Boot - |
+| | ``PXE/admin`` Network |
+| | |
+| | .. NOTE:: |
+| | |
+| | These can be allocated to a single NIC |
+| | or spread out over multiple NICs. |
+| | |
+| | .. WARNING:: |
+| | |
+| | No external ``DHCP`` server should be present |
+| | in the ``PXE/admin`` network segment, as it |
+| | would interfere with ``MaaS`` ``DHCP`` during |
+| | ``baremetal`` node commissioning/deploying. |
++------------------+------------------------------------------------------+
+| **Power mgmt** | All targets need to have power management tools that |
+| | allow rebooting the hardware (e.g. ``IPMI``). |
++------------------+------------------------------------------------------+
+
+Hardware Requirements for ``hybrid`` (``baremetal`` + ``virtual``) Deploys
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The following minimum hardware requirements must be met for the ``hybrid``
+installation of ``Iruya`` using Fuel:
+
++------------------+------------------------------------------------------+
+| **HW Aspect** | **Requirement** |
+| | |
++==================+======================================================+
+| **1 Jumpserver** | A physical node (also called Foundation Node) that |
+| | hosts the Salt Master and MaaS containers, and |
+| | each of the virtual nodes defined in ``PDF`` |
++------------------+------------------------------------------------------+
+| **# of nodes** | .. NOTE:: |
+| | |
+| | Depends on ``PDF`` configuration. |
+| | |
+| | If the control plane is virtualized, minimum |
+| | baremetal requirements are: |
+| | |
+| | - 2 Compute nodes |
+| | |
+| | If the computes are virtualized, minimum |
+| | baremetal requirements are: |
+| | |
+| | - 3 KVM servers which will run all the controller |
+| | services |
+| | |
+| | .. WARNING:: |
+| | |
+| | ``kvm01``, ``kvm02``, ``kvm03`` nodes and the |
+| | ``jumpserver`` must have the same architecture |
+| | (either ``x86_64`` or ``aarch64``). |
+| | |
+| | .. NOTE:: |
+| | |
+| | ``aarch64`` nodes should run an ``UEFI`` |
+| | compatible firmware with PXE support |
+| | (e.g. ``EDK2``). |
++------------------+------------------------------------------------------+
+| **CPU** | Minimum 1 socket with Virtualization support |
++------------------+------------------------------------------------------+
+| **RAM** | Minimum 16GB/server (Depending on VNF work load) |
++------------------+------------------------------------------------------+
+| **Disk** | Minimum 256GB 10kRPM spinning disks |
++------------------+------------------------------------------------------+
+| **Networks** | Same as for ``baremetal`` deployments |
++------------------+------------------------------------------------------+
+| **Power mgmt** | Same as for ``baremetal`` deployments |
++------------------+------------------------------------------------------+
+
Help with Hardware Requirements
-===============================
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Calculate hardware requirements:
-For information on compatible hardware types available for use,
-please see `Fuel OpenStack Hardware Compatibility List <https://www.mirantis.com/software/hardware-compatibility/>`_
-
When choosing the hardware on which you will deploy your OpenStack
environment, you should think about:
-- CPU -- Consider the number of virtual machines that you plan to deploy in your cloud environment and the CPUs per virtual machine.
+- CPU -- Consider the number of virtual machines that you plan to deploy in
+ your cloud environment and the CPUs per virtual machine.
-- Memory -- Depends on the amount of RAM assigned per virtual machine and the controller node.
+- Memory -- Depends on the amount of RAM assigned per virtual machine and the
+ controller node.
-- Storage -- Depends on the local drive space per virtual machine, remote volumes that can be attached to a virtual machine, and object storage.
+- Storage -- Depends on the local drive space per virtual machine, remote
+ volumes that can be attached to a virtual machine, and object storage.
-- Networking -- Depends on the Choose Network Topology, the network bandwidth per virtual machine, and network storage.
+- Networking -- Depends on the Choose Network Topology, the network bandwidth
+ per virtual machine, and network storage.
-================================================
-Top of the Rack (TOR) Configuration Requirements
-================================================
+Top of the Rack (``TOR``) Configuration Requirements
+====================================================
The switching infrastructure provides connectivity for the OPNFV
infrastructure operations, tenant networks (East/West) and provider
connectivity (North/South); it also provides needed connectivity for
the Storage Area Network (SAN).
+
To avoid traffic congestion, it is strongly suggested that three
physically separated networks are used, that is: 1 physical network
for administration and control, one physical network for tenant private
and public networks, and one physical network for SAN.
+
The switching connectivity can (but does not need to) be fully redundant,
in such case it comprises a redundant 10GE switch pair for each of the
three physically separated networks.
-The physical TOR switches are **not** automatically configured from
-the Fuel OPNFV reference platform. All the networks involved in the OPNFV
-infrastructure as well as the provider networks and the private tenant
-VLANs needs to be manually configured.
+.. WARNING::
-Manual configuration of the Fraser hardware platform should
-be carried out according to the `OPNFV Pharos Specification
-<https://wiki.opnfv.org/display/pharos/Pharos+Specification>`_.
+ The physical ``TOR`` switches are **not** automatically configured from
+ the OPNFV Fuel reference platform. All the networks involved in the OPNFV
+ infrastructure as well as the provider networks and the private tenant
+ VLANs needs to be manually configured.
+
+Manual configuration of the ``Iruya`` hardware platform should
+be carried out according to the `OPNFV Pharos Specification`_.
-============================
OPNFV Software Prerequisites
============================
+.. NOTE::
+
+ All prerequisites described in this chapter apply to the ``jumpserver``
+ node.
+
+OS Distribution Support
+~~~~~~~~~~~~~~~~~~~~~~~
+
The Jumpserver node should be pre-provisioned with an operating system,
-according to the Pharos specification. Relevant network bridges should
-also be pre-configured (e.g. admin_br, mgmt_br, public_br).
+according to the `OPNFV Pharos specification`_.
+
+OPNFV Fuel has been validated by CI using the following distributions
+installed on the Jumpserver:
+
+- ``CentOS 7`` (recommended by Pharos specification);
+- ``Ubuntu Xenial 16.04``;
+
+.. TOPIC:: ``aarch64`` notes
+
+ For an ``aarch64`` Jumpserver, the ``libvirt`` minimum required
+ version is ``3.x``, ``3.5`` or newer highly recommended.
+
+ .. TIP::
+
+ ``CentOS 7`` (``aarch64``) distro provided packages are already new
+ enough.
+
+ .. WARNING::
+
+ ``Ubuntu 16.04`` (``arm64``), distro packages are too old and 3rd party
+ repositories should be used.
+
+ For convenience, Armband provides a DEB repository holding all the
+ required packages.
+
+ To add and enable the Armband repository on an Ubuntu 16.04 system,
+ create a new sources list file ``/apt/sources.list.d/armband.list``
+ with the following contents:
+
+ .. code-block:: console
+
+ jenkins@jumpserver:~$ cat /etc/apt/sources.list.d/armband.list
+ deb http://linux.enea.com/mcp-repos/rocky/xenial rocky-armband main
+
+ jenkins@jumpserver:~$ sudo apt-key adv --keyserver keys.gnupg.net \
+ --recv 798AB1D1
+ jenkins@jumpserver:~$ sudo apt-get update
+
+OS Distribution Packages
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+By default, the ``deploy.sh`` script will automatically install the required
+distribution package dependencies on the Jumpserver, so the end user does
+not have to manually install them before starting the deployment.
+
+This includes Python, QEMU, libvirt etc.
+
+.. SEEALSO::
+
+ To disable automatic package installation (and/or upgrade) during
+ deployment, check out the ``-P`` deploy argument.
+
+.. WARNING::
+
+ The install script expects ``libvirt`` to be already running on the
+ Jumpserver.
+
+In case ``libvirt`` packages are missing, the script will install them; but
+depending on the OS distribution, the user might have to start the
+``libvirt`` daemon service manually, then run the deploy script again.
-- The admin bridge (admin_br) is mandatory for the baremetal nodes PXE booting during Fuel installation.
-- The management bridge (mgmt_br) is required for testing suites (e.g. functest/yardstick), it is
- suggested to pre-configure it for debugging purposes.
-- The public bridge (public_br) is also nice to have for debugging purposes, but not mandatory.
+Therefore, it is recommended to install ``libvirt`` explicitly on the
+Jumpserver before the deployment.
-The user running the deploy script on the Jumpserver should belong to "sudo" and "libvirt" groups,
-and have passwordless sudo access.
+While not mandatory, upgrading the kernel on the Jumpserver is also highly
+recommended.
-The following example adds the groups to the user "jenkins"
+.. code-block:: console
-.. code-block:: bash
+ jenkins@jumpserver:~$ sudo apt-get install \
+ linux-image-generic-hwe-16.04-edge libvirt-bin
+ jenkins@jumpserver:~$ sudo reboot
- $ sudo usermod -aG sudo jenkins
- $ sudo usermod -aG libvirt jenkins
- $ reboot
- $ groups
+User Requirements
+~~~~~~~~~~~~~~~~~
+
+The user running the deploy script on the Jumpserver should belong to
+``sudo`` and ``libvirt`` groups, and have passwordless sudo access.
+
+.. NOTE::
+
+ Throughout this documentation, we will use the ``jenkins`` username for
+ this role.
+
+The following example adds the groups to the user ``jenkins``:
+
+.. code-block:: console
+
+ jenkins@jumpserver:~$ sudo usermod -aG sudo jenkins
+ jenkins@jumpserver:~$ sudo usermod -aG libvirt jenkins
+ jenkins@jumpserver:~$ sudo reboot
+ jenkins@jumpserver:~$ groups
jenkins sudo libvirt
- $ sudo visudo
+ jenkins@jumpserver:~$ sudo visudo
...
%jenkins ALL=(ALL) NOPASSWD:ALL
-The folder containing the temporary deploy artifacts (/home/jenkins/tmpdir in the examples below)
-needs to have mask 777 in order for libvirt to be able to use them.
+Local Artifact Storage
+~~~~~~~~~~~~~~~~~~~~~~
-.. code-block:: bash
+The folder containing the temporary deploy artifacts (``/home/jenkins/tmpdir``
+in the examples below) needs to have mask ``777`` in order for ``libvirt`` to
+be able to use them.
- $ mkdir -p -m 777 /home/jenkins/tmpdir
+.. code-block:: console
-For an AArch64 Jumpserver, the "libvirt" minimum required version is 3.x, 3.5 or newer highly recommended.
-While not mandatory, upgrading the kernel and QEMU on the Jumpserver is also highly recommended
-(especially on AArch64 Jumpservers).
+ jenkins@jumpserver:~$ mkdir -p -m 777 /home/jenkins/tmpdir
+
+Network Configuration
+~~~~~~~~~~~~~~~~~~~~~
+
+Relevant Linux bridges should also be pre-configured for certain networks,
+depending on the type of the deployment.
+
++------------+---------------+----------------------------------------------+
+| Network | Linux Bridge | Linux Bridge necessity based on deploy type |
+| | +--------------+---------------+---------------+
+| | | ``virtual`` | ``baremetal`` | ``hybrid`` |
++============+===============+==============+===============+===============+
+| PXE/admin | ``admin_br`` | absent | present | present |
++------------+---------------+--------------+---------------+---------------+
+| management | ``mgmt_br`` | optional | optional, | optional, |
+| | | | recommended, | recommended, |
+| | | | required for | required for |
+| | | | ``functest``, | ``functest``, |
+| | | | ``yardstick`` | ``yardstick`` |
++------------+---------------+--------------+---------------+---------------+
+| internal | ``int_br`` | optional | optional | present |
++------------+---------------+--------------+---------------+---------------+
+| public | ``public_br`` | optional | optional, | optional, |
+| | | | recommended, | recommended, |
+| | | | useful for | useful for |
+| | | | debugging | debugging |
++------------+---------------+--------------+---------------+---------------+
-For CentOS 7.4 (AArch64), distro provided packages are already new enough.
-For Ubuntu 16.04 (arm64), distro packages are too old and 3rd party repositories should be used.
-For convenience, Armband provides a DEB repository holding all the required packages.
+.. TIP::
-To add and enable the Armband repository on an Ubuntu 16.04 system,
-create a new sources list file `/apt/sources.list.d/armband.list` with the following contents:
+ IP addresses should be assigned to the created bridge interfaces (not
+ to one of its ports).
-.. code-block:: bash
+.. WARNING::
- $ cat /etc/apt/sources.list.d/armband.list
- //for OpenStack Queens release
- deb http://linux.enea.com/mcp-repos/queens/xenial queens-armband main
+ ``PXE/admin`` bridge (``admin_br``) **must** have an IP address.
- $ apt-get update
+Changes ``deploy.sh`` Will Perform to Jumpserver OS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Fuel@OPNFV has been validated by CI using the following distributions
-installed on the Jumpserver:
+.. WARNING::
-- CentOS 7 (recommended by Pharos specification);
-- Ubuntu Xenial;
+ The install script will alter Jumpserver sysconf and disable
+ ``net.bridge.bridge-nf-call``.
-**NOTE**: The install script expects 'libvirt' to be already running on the Jumpserver. In case libvirt
-packages are missing, the script will install them; but depending on the OS distribution, the user
-might have to start the 'libvirtd' service manually, then run the deploy script again. Therefore, it
-is recommended to install libvirt-bin explicitly on the Jumpserver before the deployment.
+.. WARNING::
-**NOTE**: It is also recommended to install the newer kernel on the Jumpserver before the deployment.
+ On Jumpservers running Ubuntu with AppArmor enabled, when deploying
+ on baremetal nodes (i.e. when MaaS is used), the install script
+ will disable certain conflicting AppArmor profiles that interfere with
+ MaaS services inside the container, e.g. ``ntpd``, ``named``, ``dhcpd``,
+ ``tcpdump``.
-**NOTE**: The install script will automatically install the rest of required distro package
-dependencies on the Jumpserver, unless explicitly asked not to (via -P deploy arg). This includes
-Python, QEMU, libvirt etc.
+.. WARNING::
-**NOTE**: The install script will alter Jumpserver sysconf and disable `net.bridge.bridge-nf-call`.
+ The install script will automatically install and/or upgrade the
+ required distribution package dependencies on the Jumpserver,
+ unless explicitly asked not to (via the ``-P`` deploy arg).
-.. code-block:: bash
+OPNFV Software Configuration (``XDF``)
+======================================
- $ apt-get install linux-image-generic-hwe-16.04-edge libvirt-bin
+.. versionadded:: 5.0.0
+.. versionchanged:: 7.0.0
+Unlike the old approach based on OpenStack Fuel, OPNFV Fuel no longer has a
+graphical user interface for configuring the environment, but instead
+switched to OPNFV specific descriptor files that we will call generically
+``XDF``:
-==========================================
-OPNFV Software Installation and Deployment
-==========================================
+- ``PDF`` (POD Descriptor File) provides an abstraction of the target POD
+ with all its hardware characteristics and required parameters;
+- ``IDF`` (Installer Descriptor File) extends the ``PDF`` with POD related
+ parameters required by the OPNFV Fuel installer;
+- ``SDF`` (Scenario Descriptor File, **not** yet adopted) will later
+ replace embedded scenario definitions, describing the roles and layout of
+ the cluster enviroment for a given reference architecture;
-This section describes the process of installing all the components needed to
-deploy the full OPNFV reference platform stack across a server cluster.
+.. TIP::
-The installation is done with Mirantis Cloud Platform (MCP), which is based on
-a reclass model. This model provides the formula inputs to Salt, to make the deploy
-automatic based on deployment scenario.
-The reclass model covers:
+ For ``virtual`` deployments, if the ``public`` network will be accessed
+ from outside the ``jumpserver`` node, a custom ``PDF``/``IDF`` pair is
+ required for customizing ``idf.net_config.public`` and
+ ``idf.fuel.jumphost.bridges.public``.
- - Infrastructure node definition: Salt Master node (cfg01) and MaaS node (mas01)
- - OpenStack node definition: Controller nodes (ctl01, ctl02, ctl03) and Compute nodes (cmp001, cmp002)
- - Infrastructure components to install (software packages, services etc.)
- - OpenStack components and services (rabbitmq, galera etc.), as well as all configuration for them
+.. NOTE::
+ For OPNFV CI PODs, as well as simple (no ``public`` bridge) ``virtual``
+ deployments, ``PDF``/``IDF`` files are already available in the
+ `pharos git repo`_. They can be used as a reference for user-supplied
+ inputs or to kick off a deployment right away.
-Automatic Installation of a Virtual POD
-=======================================
++----------+------------------------------------------------------------------+
+| LAB/POD | ``PDF``/``IDF`` availability based on deploy type |
+| +------------------------+--------------------+--------------------+
+| | ``virtual`` | ``baremetal`` | ``hybrid`` |
++==========+========================+====================+====================+
+| OPNFV CI | available in | available in | N/A, as currently |
+| POD | `pharos git repo`_ | `pharos git repo`_ | there are 0 hybrid |
+| | (e.g. | (e.g. ``lf-pod2``, | PODs in OPNFV CI |
+| | ``ericsson-virtual1``) | ``arm-pod5``) | |
++----------+------------------------+--------------------+--------------------+
+| local or | ``user-supplied`` | ``user-supplied`` | ``user-supplied`` |
+| new POD | | | |
++----------+------------------------+--------------------+--------------------+
+
+.. TIP::
+
+ Both ``PDF`` and ``IDF`` structure are modelled as ``yaml`` schemas in the
+ `pharos git repo`_, also included as a git submodule in OPNFV Fuel.
+
+ .. SEEALSO::
-For virtual deploys all the targets are VMs on the Jumpserver. The deploy script will:
+ - ``mcp/scripts/pharos/config/pdf/pod1.schema.yaml``
+ - ``mcp/scripts/pharos/config/pdf/idf-pod1.schema.yaml``
- - Create a Salt Master VM on the Jumpserver which will drive the installation
- - Create the bridges for networking with virsh (only if a real bridge does not already exist for a given network)
- - Install OpenStack on the targets
- - Leverage Salt to install & configure OpenStack services
+ Schema files are also used during the initial deployment phase to validate
+ the user-supplied input ``PDF``/``IDF`` files.
-.. figure:: img/fuel_virtual.png
- :align: center
- :alt: Fuel@OPNFV Virtual POD Network Layout Examples
+``PDF``
+~~~~~~~
- Fuel@OPNFV Virtual POD Network Layout Examples
+The Pod Descriptor File is a hardware description of the POD
+infrastructure. The information is modeled under a ``yaml`` structure.
- +-----------------------+------------------------------------------------------------------------+
- | cfg01 | Salt Master VM |
- +-----------------------+------------------------------------------------------------------------+
- | ctl01 | Controller VM |
- +-----------------------+------------------------------------------------------------------------+
- | cmp001/cmp002 | Compute VMs |
- +-----------------------+------------------------------------------------------------------------+
- | gtw01 | Gateway VM with neutron services (dhcp agent, L3 agent, metadata, etc) |
- +-----------------------+------------------------------------------------------------------------+
- | odl01 | VM on which ODL runs (for scenarios deployed with ODL) |
- +-----------------------+------------------------------------------------------------------------+
+The hardware description covers the ``jumphost`` node and a set of ``nodes``
+for the cluster target boards. For each node the following characteristics
+are defined:
+- Node parameters including ``CPU`` features and total memory;
+- A list of available disks;
+- Remote management parameters;
+- Network interfaces list including name, ``MAC`` address, link speed,
+ advanced features;
-In this figure there are examples of two virtual deploys:
- - Jumphost 1 has only virsh bridges, created by the deploy script
- - Jumphost 2 has a mix of Linux and virsh bridges; When Linux bridge exists for a specified network,
- the deploy script will skip creating a virsh bridge for it
+.. SEEALSO::
-**Note**: A virtual network "mcpcontrol" is always created for initial connection
-of the VMs on Jumphost.
+ A reference file with the expected ``yaml`` structure is available at:
+ - ``mcp/scripts/pharos/config/pdf/pod1.yaml``
-Automatic Installation of a Baremetal POD
-=========================================
+ For more information on ``PDF``, see the `OPNFV PDF Wiki Page`_.
-The baremetal installation process can be done by editing the information about
-hardware and environment in the reclass files, or by using the files Pod Descriptor
-File (PDF) and Installer Descriptor File (IDF) as described in the OPNFV Pharos project.
-These files contain all the information about the hardware and network of the deployment
-that will be fed to the reclass model during deployment.
+.. WARNING::
-The installation is done automatically with the deploy script, which will:
+ The fixed IPs defined in ``PDF`` are ignored by the OPNFV Fuel installer
+ script and it will instead assign addresses based on the network ranges
+ defined in ``IDF``.
- - Create a Salt Master VM on the Jumpserver which will drive the installation
- - Create a MaaS Node VM on the Jumpserver which will provision the targets
- - Install OpenStack on the targets
- - Leverage MaaS to provision baremetal nodes with the operating system
- - Leverage Salt to configure the operating system on the baremetal nodes
- - Leverage Salt to install & configure OpenStack services
+ For more details on the way IP addresses are assigned, see
+ :ref:`OPNFV Fuel User Guide <fuel-userguide>`.
-.. figure:: img/fuel_baremetal.png
- :align: center
- :alt: Fuel@OPNFV Baremetal POD Network Layout Example
-
- Fuel@OPNFV Baremetal POD Network Layout Example
-
- +-----------------------+---------------------------------------------------------+
- | cfg01 | Salt Master VM |
- +-----------------------+---------------------------------------------------------+
- | mas01 | MaaS Node VM |
- +-----------------------+---------------------------------------------------------+
- | kvm01..03 | Baremetals which hold the VMs with controller functions |
- +-----------------------+---------------------------------------------------------+
- | cmp001/cmp002 | Baremetal compute nodes |
- +-----------------------+---------------------------------------------------------+
- | prx01/prx02 | Proxy VMs for Nginx |
- +-----------------------+---------------------------------------------------------+
- | msg01..03 | RabbitMQ Service VMs |
- +-----------------------+---------------------------------------------------------+
- | dbs01..03 | MySQL service VMs |
- +-----------------------+---------------------------------------------------------+
- | mdb01..03 | Telemetry VMs |
- +-----------------------+---------------------------------------------------------+
- | odl01 | VM on which ODL runs (for scenarios deployed with ODL) |
- +-----------------------+---------------------------------------------------------+
- | Tenant VM | VM running in the cloud |
- +-----------------------+---------------------------------------------------------+
-
-In the baremetal deploy all bridges but "mcpcontrol" are Linux bridges. For the Jumpserver, it is
-required to pre-configure at least the admin_br bridge for the PXE/Admin.
-For the targets, the bridges are created by the deploy script.
-
-**Note**: A virtual network "mcpcontrol" is always created for initial connection
-of the VMs on Jumphost.
+``PDF``/``IDF`` Role (hostname) Mapping
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Upcoming ``SDF`` support will introduce a series of possible node roles.
+Until that happens, the role mapping logic is hardcoded, based on node index
+in ``PDF``/``IDF`` (which should also be in sync, i.e. the parameters of the
+``n``-th cluster node defined in ``PDF`` should be the ``n``-th node in
+``IDF`` structures too).
-Steps to Start the Automatic Deploy
-===================================
++-------------+------------------+----------------------+
+| Node index | ``HA`` scenario | ``noHA`` scenario |
++=============+==================+======================+
+| 1st | ``kvm01`` | ``ctl01`` |
++-------------+------------------+----------------------+
+| 2nd | ``kvm02`` | ``gtw01`` |
++-------------+------------------+----------------------+
+| 3rd | ``kvm03`` | ``odl01``/``unused`` |
++-------------+------------------+----------------------+
+| 4th, | ``cmp001``, | ``cmp001``, |
+| 5th, | ``cmp002``, | ``cmp002``, |
+| ... | ``...`` | ``...`` |
++-------------+------------------+----------------------+
-These steps are common both for virtual and baremetal deploys.
+.. TIP::
-#. Clone the Fuel code from gerrit
+ To switch node role(s), simply reorder the node definitions in
+ ``PDF``/``IDF`` (make sure to keep them in sync).
- For x86_64
+``IDF``
+~~~~~~~
- .. code-block:: bash
+The Installer Descriptor File extends the ``PDF`` with POD related parameters
+required by the installer. This information may differ per each installer type
+and it is not considered part of the POD infrastructure.
+
+``idf.*`` Overview
+------------------
+
+The ``IDF`` file must be named after the ``PDF`` it attaches to, with the
+prefix ``idf-``.
+
+.. SEEALSO::
+
+ A reference file with the expected ``yaml`` structure is available at:
+
+ - ``mcp/scripts/pharos/config/pdf/idf-pod1.yaml``
+
+The file follows a ``yaml`` structure and at least two sections
+(``idf.net_config`` and ``idf.fuel``) are expected.
+
+The ``idf.fuel`` section defines several sub-sections required by the OPNFV
+Fuel installer:
+
+- ``jumphost``: List of bridge names for each network on the Jumpserver;
+- ``network``: List of device name and bus address info of all the target nodes.
+ The order must be aligned with the order defined in the ``PDF`` file.
+ The OPNFV Fuel installer relies on the ``IDF`` model to setup all node NICs
+ by defining the expected device name and bus address;
+- ``maas``: Defines the target nodes commission timeout and deploy timeout;
+- ``reclass``: Defines compute parameter tuning, including huge pages, ``CPU``
+ pinning and other ``DPDK`` settings;
+
+.. code-block:: yaml
+
+ ---
+ idf:
+ version: 0.1 # fixed, the only supported version (mandatory)
+ net_config: # POD network configuration overview (mandatory)
+ oob: ... # mandatory
+ admin: ... # mandatory
+ mgmt: ... # mandatory
+ storage: ... # mandatory
+ private: ... # mandatory
+ public: ... # mandatory
+ fuel: # OPNFV Fuel specific section (mandatory)
+ jumphost: # OPNFV Fuel jumpserver bridge configuration (mandatory)
+ bridges: # Bridge name mapping (mandatory)
+ admin: 'admin_br' # <PXE/admin bridge name> or ~
+ mgmt: 'mgmt_br' # <mgmt bridge name> or ~
+ private: ~ # <private bridge name> or ~
+ public: 'public_br' # <public bridge name> or ~
+ trunks: ... # Trunked networks (optional)
+ maas: # MaaS timeouts (optional)
+ timeout_comissioning: 10 # commissioning timeout in minutes
+ timeout_deploying: 15 # deploy timeout in minutes
+ network: # Cluster nodes network (mandatory)
+ interface_mtu: 1500 # Cluster-level MTU (optional)
+ ntp_strata_host1: 1.pool.ntp.org # NTP1 (optional)
+ ntp_strata_host2: 0.pool.ntp.org # NTP2 (optional)
+ node: ... # List of per-node cfg (mandatory)
+ reclass: # Additional params (mandatory)
+ node: ... # List of per-node cfg (mandatory)
+
+``idf.net_config``
+------------------
+
+``idf.net_config`` was introduced as a mechanism to map all the usual cluster
+networks (internal and provider networks, e.g. ``mgmt``) to their ``VLAN``
+tags, ``CIDR`` and a physical interface index (used to match networks to
+interface names, like ``eth0``, on the cluster nodes).
+
+
+.. WARNING::
+
+ The mapping between one network segment (e.g. ``mgmt``) and its ``CIDR``/
+ ``VLAN`` is not configurable on a per-node basis, but instead applies to
+ all the nodes in the cluster.
+
+For each network, the following parameters are currently supported:
+
++--------------------------+--------------------------------------------------+
+| ``idf.net_config.*`` key | Details |
++==========================+==================================================+
+| ``interface`` | The index of the interface to use for this net. |
+| | For each cluster node (if network is present), |
+| | OPNFV Fuel will determine the underlying physical|
+| | interface by picking the element at index |
+| | ``interface`` from the list of network interface |
+| | names defined in |
+| | ``idf.fuel.network.node.*.interfaces``. |
+| | Required for each network. |
+| | |
+| | .. NOTE:: |
+| | |
+| | The interface index should be the |
+| | same on all cluster nodes. This can be |
+| | achieved by ordering them accordingly in |
+| | ``PDF``/``IDF``. |
++--------------------------+--------------------------------------------------+
+| ``vlan`` | ``VLAN`` tag (integer) or the string ``native``. |
+| | Required for each network. |
++--------------------------+--------------------------------------------------+
+| ``ip-range`` | When specified, all cluster IPs dynamically |
+| | allocated by OPNFV Fuel for that network will be |
+| | assigned inside this range. |
+| | Required for ``oob``, optional for others. |
+| | |
+| | .. NOTE:: |
+| | |
+| | For now, only range start address is used. |
++--------------------------+--------------------------------------------------+
+| ``network`` | Network segment address. |
+| | Required for each network, except ``oob``. |
++--------------------------+--------------------------------------------------+
+| ``mask`` | Network segment mask. |
+| | Required for each network, except ``oob``. |
++--------------------------+--------------------------------------------------+
+| ``gateway`` | Gateway IP address. |
+| | Required for ``public``, N/A for others. |
++--------------------------+--------------------------------------------------+
+| ``dns`` | List of DNS IP addresses. |
+| | Required for ``public``, N/A for others. |
++--------------------------+--------------------------------------------------+
+
+Sample ``public`` network configuration block:
+
+.. code-block:: yaml
+
+ idf:
+ net_config:
+ public:
+ interface: 1
+ vlan: native
+ network: 10.0.16.0
+ ip-range: 10.0.16.100-10.0.16.253
+ mask: 24
+ gateway: 10.0.16.254
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+
+.. TOPIC:: ``hybrid`` POD notes
+
+ Interface indexes must be the same for all nodes, which is problematic
+ when mixing ``virtual`` nodes (where all interfaces were untagged
+ so far) with ``baremetal`` nodes (where interfaces usually carry
+ tagged VLANs).
+
+ .. TIP::
+
+ To achieve this, a special ``jumpserver`` network layout is used:
+ ``mgmt``, ``storage``, ``private``, ``public`` are trunked together
+ in a single ``trunk`` bridge:
+
+ - without decapsulating them (if they are also tagged on ``baremetal``);
+ a ``trunk.<vlan_tag>`` interface should be created on the
+ ``jumpserver`` for each tagged VLAN so the kernel won't drop the
+ packets;
+ - by decapsulating them first (if they are also untagged on
+ ``baremetal`` nodes);
+
+ The ``trunk`` bridge is then used for all bridges OPNFV Fuel
+ is aware of in ``idf.fuel.jumphost.bridges``, e.g. for a ``trunk`` where
+ only ``mgmt`` network is not decapsulated:
+
+ .. code-block:: yaml
+
+ idf:
+ fuel:
+ jumphost:
+ bridges:
+ admin: 'admin_br'
+ mgmt: 'trunk'
+ private: 'trunk'
+ public: 'trunk'
+ trunks:
+ # mgmt network is not decapsulated for jumpserver infra nodes,
+ # to align with the VLAN configuration of baremetal nodes.
+ mgmt: True
+
+.. WARNING::
+
+ The Linux kernel limits the name of network interfaces to 16 characters.
+ Extra care is required when choosing bridge names, so appending the
+ ``VLAN`` tag won't lead to an interface name length exceeding that limit.
+
+``idf.fuel.network``
+--------------------
+
+``idf.fuel.network`` allows mapping the cluster networks (e.g. ``mgmt``) to
+their physical interface name (e.g. ``eth0``) and bus address on the cluster
+nodes.
+
+``idf.fuel.network.node`` should be a list with the same number (and order) of
+elements as the cluster nodes defined in ``PDF``, e.g. the second cluster node
+in ``PDF`` will use the interface name and bus address defined in the second
+list element.
+
+Below is a sample configuration block for a single node with two interfaces:
+
+.. code-block:: yaml
+
+ idf:
+ fuel:
+ network:
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces:
+ # Ordered-list, index should be in sync with interface index
+ # in PDF
+ - 'ens3'
+ - 'ens4'
+ busaddr:
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:00:03.0'
+ - '0000:00:04.0'
+
+
+``idf.fuel.reclass``
+--------------------
+
+``idf.fuel.reclass`` provides a way of overriding default values in the
+reclass cluster model.
+
+This currently covers strictly compute parameter tuning, including huge
+pages, ``CPU`` pinning and other ``DPDK`` settings.
+
+``idf.fuel.reclass.node`` should be a list with the same number (and order) of
+elements as the cluster nodes defined in ``PDF``, e.g. the second cluster node
+in ``PDF`` will use the parameters defined in the second list element.
+
+The following parameters are currently supported:
+
++---------------------------------+-------------------------------------------+
+| ``idf.fuel.reclass.node.*`` | Details |
+| key | |
++=================================+===========================================+
+| ``nova_cpu_pinning`` | List of CPU cores nova will be pinned to. |
+| | |
+| | .. NOTE:: |
+| | |
+| | Currently disabled. |
++---------------------------------+-------------------------------------------+
+| ``compute_hugepages_size`` | Size of each persistent huge pages. |
+| | |
+| | Usual values are ``2M`` and ``1G``. |
++---------------------------------+-------------------------------------------+
+| ``compute_hugepages_count`` | Total number of persistent huge pages. |
++---------------------------------+-------------------------------------------+
+| ``compute_hugepages_mount`` | Mount point to use for huge pages. |
++---------------------------------+-------------------------------------------+
+| ``compute_kernel_isolcpu`` | List of certain CPU cores that are |
+| | isolated from Linux scheduler. |
++---------------------------------+-------------------------------------------+
+| ``compute_dpdk_driver`` | Kernel module to provide userspace I/O |
+| | support. |
++---------------------------------+-------------------------------------------+
+| ``compute_ovs_pmd_cpu_mask`` | Hexadecimal mask of CPUs to run ``DPDK`` |
+| | Poll-mode drivers. |
++---------------------------------+-------------------------------------------+
+| ``compute_ovs_dpdk_socket_mem`` | Set of amount huge pages in ``MB`` to be |
+| | used by ``OVS-DPDK`` daemon taken for each|
+| | ``NUMA`` node. Set size is equal to |
+| | ``NUMA`` nodes count, elements are |
+| | divided by comma. |
++---------------------------------+-------------------------------------------+
+| ``compute_ovs_dpdk_lcore_mask`` | Hexadecimal mask of ``DPDK`` lcore |
+| | parameter used to run ``DPDK`` processes. |
++---------------------------------+-------------------------------------------+
+| ``compute_ovs_memory_channels`` | Number of memory channels to be used. |
++---------------------------------+-------------------------------------------+
+| ``dpdk0_driver`` | NIC driver to use for physical network |
+| | interface. |
++---------------------------------+-------------------------------------------+
+| ``dpdk0_n_rxq`` | Number of ``RX`` queues. |
++---------------------------------+-------------------------------------------+
+
+Sample ``compute_params`` configuration block (for a single node):
+
+.. code-block:: yaml
+
+ idf:
+ fuel:
+ reclass:
+ node:
+ - compute_params:
+ common: &compute_params_common
+ compute_hugepages_size: 2M
+ compute_hugepages_count: 2048
+ compute_hugepages_mount: /mnt/hugepages_2M
+ dpdk:
+ <<: *compute_params_common
+ compute_dpdk_driver: uio
+ compute_ovs_pmd_cpu_mask: "0x6"
+ compute_ovs_dpdk_socket_mem: "1024"
+ compute_ovs_dpdk_lcore_mask: "0x8"
+ compute_ovs_memory_channels: "2"
+ dpdk0_driver: igb_uio
+ dpdk0_n_rxq: 2
+
+``SDF``
+~~~~~~~
+
+Scenario Descriptor Files are not yet implemented in the OPNFV Fuel ``Iruya``
+release.
+
+Instead, embedded OPNFV Fuel scenarios files are locally available in
+``mcp/config/scenario``.
- $ git clone https://git.opnfv.org/fuel
- $ cd fuel
+OPNFV Software Installation and Deployment
+==========================================
- For aarch64
+This section describes the process of installing all the components needed to
+deploy the full OPNFV reference platform stack across a server cluster.
- .. code-block:: bash
+Deployment Types
+~~~~~~~~~~~~~~~~
- $ git clone https://git.opnfv.org/armband
- $ cd armband
+.. WARNING::
-#. Checkout the Fraser release
+ OPNFV releases previous to ``Iruya`` used to rely on the ``virtual``
+ keyword being part of the POD name (e.g. ``ericsson-virtual2``) to
+ configure the deployment type as ``virtual``. Otherwise ``baremetal``
+ was implied.
- .. code-block:: bash
+``Gambia`` and newer releases are more flexbile towards supporting a mix
+of ``baremetal`` and ``virtual`` nodes, so the type of deployment is
+now automatically determined based on the cluster nodes types in ``PDF``:
- $ git checkout opnfv-6.2.1
++---------------------------------+-------------------------------------------+
+| ``PDF`` has nodes of type | Deployment type |
++---------------+-----------------+ |
+| ``baremetal`` | ``virtual`` | |
++===============+=================+===========================================+
+| yes | no | ``baremetal`` |
++---------------+-----------------+-------------------------------------------+
+| yes | yes | ``hybrid`` |
++---------------+-----------------+-------------------------------------------+
+| no | yes | ``virtual`` |
++---------------+-----------------+-------------------------------------------+
-#. Start the deploy script
+Based on that, the deployment script will later enable/disable certain extra
+nodes (e.g. ``mas01``) and/or ``STATE`` files (e.g. ``maas``).
- Besides the basic options, there are other recommended deploy arguments:
+``HA`` vs ``noHA``
+~~~~~~~~~~~~~~~~~~
- - use **-D** option to enable the debug info
- - use **-S** option to point to a tmp dir where the disk images are saved. The images will be
- re-used between deploys
- - use **|& tee** to save the deploy log to a file
+High availability of OpenStack services is determined based on scenario name,
+e.g. ``os-nosdn-nofeature-noha`` vs ``os-nosdn-nofeature-ha``.
- .. code-block:: bash
+.. TIP::
- $ ci/deploy.sh -l <lab_name> \
- -p <pod_name> \
- -b <URI to configuration repo containing the PDF file> \
- -s <scenario> \
- -D \
- -S <Storage directory for disk images> |& tee deploy.log
+ ``HA`` scenarios imply a virtualized control plane (``VCP``) for the
+ OpenStack services running on the 3 ``kvm`` nodes.
- **NOTE**: The deployment uses the OPNFV Pharos project as input (PDF and IDF files)
- for hardware and network configuration of all current OPNFV PODs.
- When deploying a new POD, one can pass the `-b` flag to the deploy script to override
- the path for the labconfig directory structure containing the PDF and IDF (see below).
+ .. SEEALSO::
-Examples
---------
-#. Virtual deploy
+ An experimental feature argument (``-N``) is supported by the deploy
+ script for disabling ``VCP``, although it might not be supported by
+ all scenarios and is not being continuosly validated by OPNFV CI/CD.
- To start a virtual deployment, it is required to have the `virtual` keyword
- while specifying the pod name to the installer script.
+.. WARNING::
- It will create the required bridges and networks, configure Salt Master and
- install OpenStack.
+ ``virtual`` ``HA`` deployments are not officially supported, due to
+ poor performance and various limitations of nested virtualization on
+ both ``x86_64`` and ``aarch64`` architectures.
- .. code-block:: bash
+ .. TIP::
- $ ci/deploy.sh -l ericsson \
- -p virtual3 \
- -s os-nosdn-nofeature-noha \
- -D \
- -S /home/jenkins/tmpdir |& tee deploy.log
+ ``virtual`` ``HA`` deployments without ``VCP`` are supported, but
+ highly experimental.
- Once the deployment is complete, the OpenStack Dashboard, Horizon, is
- available at http://<controller VIP>:8078
- The administrator credentials are **admin** / **opnfv_secret**.
++-------------------------------+-------------------------+-------------------+
+| Feature | ``HA`` scenario | ``noHA`` scenario |
++===============================+=========================+===================+
+| ``VCP`` | yes, | no |
+| (Virtualized Control Plane) | disabled with ``-N`` | |
++-------------------------------+-------------------------+-------------------+
+| OpenStack APIs SSL | yes | no |
++-------------------------------+-------------------------+-------------------+
+| Storage | ``GlusterFS`` | ``NFS`` |
++-------------------------------+-------------------------+-------------------+
- A simple (and generic) sample PDF/IDF set of configuration files may
- be used for virtual deployments by setting lab/POD name to 'local-virtual1'.
- This sample configuration is x86_64 specific and hardcodes certain parameters,
- like public network address space, so a dedicated PDF/IDF is highly recommended.
+Steps to Start the Automatic Deploy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- .. code-block:: bash
+These steps are common for ``virtual``, ``baremetal`` or ``hybrid`` deploys,
+``x86_64``, ``aarch64`` or ``mixed`` (``x86_64`` and ``aarch64``):
- $ ci/deploy.sh -l local \
- -p virtual1 \
- -s os-nosdn-nofeature-noha \
- -D \
- -S /home/jenkins/tmpdir |& tee deploy.log
+- Clone the OPNFV Fuel code from gerrit
+- Checkout the ``Iruya`` release tag
+- Start the deploy script
-#. Baremetal deploy
+.. NOTE::
- A x86 deploy on pod2 from Linux Foundation lab
+ The deployment uses the OPNFV Pharos project as input (``PDF`` and
+ ``IDF`` files) for hardware and network configuration of all current
+ OPNFV PODs.
- .. code-block:: bash
+ When deploying a new POD, one may pass the ``-b`` flag to the deploy
+ script to override the path for the labconfig directory structure
+ containing the ``PDF`` and ``IDF`` (``<URI to configuration repo ...>`` is
+ the absolute path to a local or remote directory structure, populated
+ similar to `pharos git repo`_, i.e. ``PDF``/``IDF`` reside in a
+ subdirectory called ``labs/<lab_name>``).
- $ ci/deploy.sh -l lf \
- -p pod2 \
- -s os-nosdn-nofeature-ha \
- -D \
- -S /home/jenkins/tmpdir |& tee deploy.log
+.. code-block:: console
- .. figure:: img/lf_pod2.png
- :align: center
- :alt: Fuel@OPNFV LF POD2 Network Layout
+ jenkins@jumpserver:~$ git clone https://git.opnfv.org/fuel
+ jenkins@jumpserver:~$ cd fuel
+ jenkins@jumpserver:~/fuel$ git checkout opnfv-9.0.0
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l <lab_name> \
+ -p <pod_name> \
+ -b <URI to configuration repo containing the PDF/IDF files> \
+ -s <scenario> \
+ -D \
+ -S <Storage directory for deploy artifacts> |& tee deploy.log
- Fuel@OPNFV LF POD2 Network Layout
+.. TIP::
- An aarch64 deploy on pod5 from Arm lab
+ Besides the basic options, there are other recommended deploy arguments:
- .. code-block:: bash
+ - use ``-D`` option to enable the debug info
+ - use ``-S`` option to point to a tmp dir where the disk images are saved.
+ The deploy artifacts will be re-used on subsequent (re)deployments.
+ - use ``|& tee`` to save the deploy log to a file
- $ ci/deploy.sh -l arm \
- -p pod5 \
- -s os-nosdn-nofeature-ha \
- -D \
- -S /home/jenkins/tmpdir |& tee deploy.log
+Typical Cluster Examples
+~~~~~~~~~~~~~~~~~~~~~~~~
- .. figure:: img/arm_pod5.png
- :align: center
- :alt: Fuel@OPNFV ARM POD5 Network Layout
+Common cluster layouts usually fall into one of the cases described below,
+categorized by deployment type (``baremetal``, ``virtual`` or ``hybrid``) and
+high availability (``HA`` or ``noHA``).
- Fuel@OPNFV ARM POD5 Network Layout
+A simplified overview of the steps ``deploy.sh`` will automatically perform is:
- Once the deployment is complete, the SaltStack Deployment Documentation is
- available at http://<proxy public VIP>:8090
+- create a Salt Master Docker container on the jumpserver, which will drive
+ the rest of the installation;
+- ``baremetal`` or ``hybrid`` only: create a ``MaaS`` container node,
+ which will be leveraged using Salt to handle OS provisioning on the
+ ``baremetal`` nodes;
+- leverage Salt to install & configure OpenStack;
- When deploying a new POD, one can pass the `-b` flag to the deploy script to override
- the path for the labconfig directory structure containing the PDF and IDF.
+.. NOTE::
- .. code-block:: bash
+ A Docker network ``mcpcontrol`` is always created for initial connection
+ of the infrastructure containers (``cfg01``, ``mas01``) on Jumphost.
- $ ci/deploy.sh -b file://<absolute_path_to_labconfig> \
- -l <lab_name> \
- -p <pod_name> \
- -s <scenario> \
- -D \
- -S <tmp_folder> |& tee deploy.log
+.. WARNING::
- - <absolute_path_to_labconfig> is the absolute path to a local directory, populated
- similar to Pharos, i.e. PDF/IDF reside in <absolute_path_to_labconfig>/labs/<lab_name>
- - <lab_name> is the same as the directory in the path above
- - <pod_name> is the name used for the PDF (<pod_name>.yaml) and IDF (idf-<pod_name>.yaml) files
+ A single cluster deployment per ``jumpserver`` node is currently supported,
+ indifferent of its type (``virtual``, ``baremetal`` or ``hybrid``).
+Once the deployment is complete, the following should be accessible:
++---------------+----------------------------------+---------------------------+
+| Resource | ``HA`` scenario | ``noHA`` scenario |
++===============+==================================+===========================+
+| ``Horizon`` | ``https://<prx public VIP>`` | ``http://<ctl VIP>:8078`` |
+| (Openstack | | |
+| Dashboard) | | |
++---------------+----------------------------------+---------------------------+
+| ``SaltStack`` | ``http://<prx public VIP>:8090`` | N/A |
+| Deployment | | |
+| Documentation | | |
++---------------+----------------------------------+---------------------------+
-Pod and Installer Descriptor Files
-==================================
+.. SEEALSO::
-Descriptor files provide the installer with an abstraction of the target pod
-with all its hardware characteristics and required parameters. This information
-is split into two different files:
-Pod Descriptor File (PDF) and Installer Descriptor File (IDF).
+ For more details on locating and importing the generated SSL certificate,
+ see :ref:`OPNFV Fuel User Guide <fuel-userguide>`.
-The Pod Descriptor File is a hardware description of the pod
-infrastructure. The information is modeled under a yaml structure.
-A reference file with the expected yaml structure is available at
-*mcp/config/labs/local/pod1.yaml*
+``virtual`` ``noHA`` POD
+------------------------
-The hardware description is arranged into a main "jumphost" node and a "nodes"
-set for all target boards. For each node the following characteristics
-are defined:
+In the following figure there are two generic examples of ``virtual`` deploys,
+each on a separate Jumphost node, both behind the same ``TOR`` switch:
-- Node parameters including CPU features and total memory.
-- A list of available disks.
-- Remote management parameters.
-- Network interfaces list including mac address, speed, advanced features and name.
+- Jumphost 1 has only virsh bridges (created by the deploy script);
+- Jumphost 2 has a mix of Linux (manually created) and ``libvirt`` managed
+ bridges (created by the deploy script);
-**Note**: The fixed IPs are ignored by the MCP installer script and it will instead
-assign based on the network ranges defined in IDF.
+.. figure:: img/fuel_virtual_noha.png
+ :align: center
+ :width: 60%
+ :alt: OPNFV Fuel Virtual noHA POD Network Layout Examples
-The Installer Descriptor File extends the PDF with pod related parameters
-required by the installer. This information may differ per each installer type
-and it is not considered part of the pod infrastructure.
-The IDF file must be named after the PDF with the prefix "idf-". A reference file with the expected
-structure is available at *mcp/config/labs/local/idf-pod1.yaml*
-
-The file follows a yaml structure and two sections "net_config" and "fuel" are expected.
-
-The "net_config" section describes all the internal and provider networks
-assigned to the pod. Each used network is expected to have a vlan tag, IP subnet and
-attached interface on the boards. Untagged vlans shall be defined as "native".
-
-The "fuel" section defines several sub-sections required by the Fuel installer:
-
-- jumphost: List of bridge names for each network on the Jumpserver.
-- network: List of device name and bus address info of all the target nodes.
- The order must be aligned with the order defined in PDF file. Fuel installer relies on the IDF model
- to setup all node NICs by defining the expected device name and bus address.
-- maas: Defines the target nodes commission timeout and deploy timeout. (optional)
-- reclass: Defines compute parameter tuning, including huge pages, cpu pinning
- and other DPDK settings. (optional)
-
-The following parameters can be defined in the IDF files under "reclass". Those value will
-overwrite the default configuration values in Fuel repository:
-
-- nova_cpu_pinning: List of CPU cores nova will be pinned to. Currently disabled.
-- compute_hugepages_size: Size of each persistent huge pages. Usual values are '2M' and '1G'.
-- compute_hugepages_count: Total number of persistent huge pages.
-- compute_hugepages_mount: Mount point to use for huge pages.
-- compute_kernel_isolcpu: List of certain CPU cores that are isolated from Linux scheduler.
-- compute_dpdk_driver: Kernel module to provide userspace I/O support.
-- compute_ovs_pmd_cpu_mask: Hexadecimal mask of CPUs to run DPDK Poll-mode drivers.
-- compute_ovs_dpdk_socket_mem: Set of amount huge pages in MB to be used by OVS-DPDK daemon
- taken for each NUMA node. Set size is equal to NUMA nodes count, elements are divided by comma.
-- compute_ovs_dpdk_lcore_mask: Hexadecimal mask of DPDK lcore parameter used to run DPDK processes.
-- compute_ovs_memory_channels: Number of memory channels to be used.
-- dpdk0_driver: NIC driver to use for physical network interface.
-- dpdk0_n_rxq: Number of RX queues.
-
-
-The full description of the PDF and IDF file structure are available as yaml schemas.
-The schemas are defined as a git submodule in Fuel repository. Input files provided
-to the installer will be validated against the schemas.
-
-- *mcp/scripts/pharos/config/pdf/pod1.schema.yaml*
-- *mcp/scripts/pharos/config/pdf/idf-pod1.schema.yaml*
+ OPNFV Fuel Virtual noHA POD Network Layout Examples
-=============
-Release Notes
-=============
+ +-------------+------------------------------------------------------------+
+ | ``cfg01`` | Salt Master Docker container |
+ +-------------+------------------------------------------------------------+
+ | ``ctl01`` | Controller VM |
+ +-------------+------------------------------------------------------------+
+ | ``gtw01`` | Gateway VM with neutron services |
+ | | (``DHCP`` agent, ``L3`` agent, ``metadata`` agent etc) |
+ +-------------+------------------------------------------------------------+
+ | ``odl01`` | VM on which ``ODL`` runs |
+ | | (for scenarios deployed with ODL) |
+ +-------------+------------------------------------------------------------+
+ | ``cmp001``, | Compute VMs |
+ | ``cmp002`` | |
+ +-------------+------------------------------------------------------------+
-Please refer to the :ref:`Release Notes <fuel-release-notes-label>` article.
+.. TIP::
-==========
-References
-==========
+ If external access to the ``public`` network is not required, there is
+ little to no motivation to create a custom ``PDF``/``IDF`` set for a
+ virtual deployment.
-OPNFV
+ Instead, the existing virtual PODs definitions in `pharos git repo`_ can
+ be used as-is:
-1) `OPNFV Home Page <http://www.opnfv.org>`_
-2) `OPNFV documentation <http://docs.opnfv.org>`_
-3) `Software downloads <https://www.opnfv.org/software/download>`_
+ - ``ericsson-virtual1`` for ``x86_64``;
+ - ``arm-virtual2`` for ``aarch64``;
-OpenStack
+.. code-block:: console
-4) `OpenStack Queens Release Artifacts <http://www.openstack.org/software/queens>`_
-5) `OpenStack Documentation <http://docs.openstack.org>`_
+ # example deploy cmd for an x86_64 virtual cluster
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l ericsson \
+ -p virtual1 \
+ -s os-nosdn-nofeature-noha \
+ -D \
+ -S /home/jenkins/tmpdir |& tee deploy.log
-OpenDaylight
+``baremetal`` ``noHA`` POD
+--------------------------
-6) `OpenDaylight Artifacts <http://www.opendaylight.org/software/downloads>`_
+.. WARNING::
-Fuel
+ These scenarios are not tested in OPNFV CI, so they are considered
+ experimental.
-7) `Mirantis Cloud Platform Documentation <https://docs.mirantis.com/mcp/latest>`_
+.. figure:: img/fuel_baremetal_noha.png
+ :align: center
+ :width: 60%
+ :alt: OPNFV Fuel Baremetal noHA POD Network Layout Example
+
+ OPNFV Fuel Baremetal noHA POD Network Layout Example
+
+ +-------------+------------------------------------------------------------+
+ | ``cfg01`` | Salt Master Docker container |
+ +-------------+------------------------------------------------------------+
+ | ``mas01`` | MaaS Node Docker container |
+ +-------------+------------------------------------------------------------+
+ | ``ctl01`` | Baremetal controller node |
+ +-------------+------------------------------------------------------------+
+ | ``gtw01`` | Baremetal Gateway with neutron services |
+ | | (dhcp agent, L3 agent, metadata, etc) |
+ +-------------+------------------------------------------------------------+
+ | ``odl01`` | Baremetal node on which ODL runs |
+ | | (for scenarios deployed with ODL, otherwise unused |
+ +-------------+------------------------------------------------------------+
+ | ``cmp001``, | Baremetal Computes |
+ | ``cmp002`` | |
+ +-------------+------------------------------------------------------------+
+ | Tenant VM | VM running in the cloud |
+ +-------------+------------------------------------------------------------+
+
+``baremetal`` ``HA`` POD
+------------------------
+
+.. figure:: img/fuel_baremetal_ha.png
+ :align: center
+ :width: 60%
+ :alt: OPNFV Fuel Baremetal HA POD Network Layout Example
+
+ OPNFV Fuel Baremetal HA POD Network Layout Example
+
+ +---------------------------+----------------------------------------------+
+ | ``cfg01`` | Salt Master Docker container |
+ +---------------------------+----------------------------------------------+
+ | ``mas01`` | MaaS Node Docker container |
+ +---------------------------+----------------------------------------------+
+ | ``kvm01``, | Baremetals which hold the VMs with |
+ | ``kvm02``, | controller functions |
+ | ``kvm03`` | |
+ +---------------------------+----------------------------------------------+
+ | ``prx01``, | Proxy VMs for Nginx |
+ | ``prx02`` | |
+ +---------------------------+----------------------------------------------+
+ | ``msg01``, | RabbitMQ Service VMs |
+ | ``msg02``, | |
+ | ``msg03`` | |
+ +---------------------------+----------------------------------------------+
+ | ``dbs01``, | MySQL service VMs |
+ | ``dbs02``, | |
+ | ``dbs03`` | |
+ +---------------------------+----------------------------------------------+
+ | ``mdb01``, | Telemetry VMs |
+ | ``mdb02``, | |
+ | ``mdb03`` | |
+ +---------------------------+----------------------------------------------+
+ | ``odl01`` | VM on which ``OpenDaylight`` runs |
+ | | (for scenarios deployed with ``ODL``) |
+ +---------------------------+----------------------------------------------+
+ | ``cmp001``, | Baremetal Computes |
+ | ``cmp002`` | |
+ +---------------------------+----------------------------------------------+
+ | Tenant VM | VM running in the cloud |
+ +---------------------------+----------------------------------------------+
+
+.. code-block:: console
+
+ # x86_x64 baremetal deploy on pod2 from Linux Foundation lab (lf-pod2)
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l lf \
+ -p pod2 \
+ -s os-nosdn-nofeature-ha \
+ -D \
+ -S /home/jenkins/tmpdir |& tee deploy.log
+
+.. code-block:: console
+
+ # aarch64 baremetal deploy on pod5 from Enea ARM lab (arm-pod5)
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l arm \
+ -p pod5 \
+ -s os-nosdn-nofeature-ha \
+ -D \
+ -S /home/jenkins/tmpdir |& tee deploy.log
+
+``hybrid`` ``noHA`` POD
+-----------------------
+
+.. figure:: img/fuel_hybrid_noha.png
+ :align: center
+ :width: 60%
+ :alt: OPNFV Fuel Hybrid noHA POD Network Layout Examples
+
+ OPNFV Fuel Hybrid noHA POD Network Layout Examples
+
+ +-------------+------------------------------------------------------------+
+ | ``cfg01`` | Salt Master Docker container |
+ +-------------+------------------------------------------------------------+
+ | ``mas01`` | MaaS Node Docker container |
+ +-------------+------------------------------------------------------------+
+ | ``ctl01`` | Controller VM |
+ +-------------+------------------------------------------------------------+
+ | ``gtw01`` | Gateway VM with neutron services |
+ | | (``DHCP`` agent, ``L3`` agent, ``metadata`` agent etc) |
+ +-------------+------------------------------------------------------------+
+ | ``odl01`` | VM on which ``ODL`` runs |
+ | | (for scenarios deployed with ODL) |
+ +-------------+------------------------------------------------------------+
+ | ``cmp001``, | Baremetal Computes |
+ | ``cmp002`` | |
+ +-------------+------------------------------------------------------------+
+
+Automatic Deploy Breakdown
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When an automatic deploy is started, the following operations are performed
+sequentially by the deploy script:
+
++------------------+----------------------------------------------------------+
+| **Deploy stage** | **Details** |
++==================+==========================================================+
+| Argument | enviroment variables and command line arguments passed |
+| Parsing | to ``deploy.sh`` are interpreted |
++------------------+----------------------------------------------------------+
+| Distribution | Install and/or configure mandatory requirements on the |
+| Package | ``jumpserver`` node: |
+| Installation | |
+| | - ``Docker`` (from upstream and not distribution repos, |
+| | as the version included in ``Ubuntu`` ``Xenial`` is |
+| | outdated); |
+| | - ``docker-compose`` (from upstream, as the version |
+| | included in both ``CentOS 7`` and |
+| | ``Ubuntu Xenial 16.04`` has dependency issues on most |
+| | systems); |
+| | - ``virt-inst`` (from upstream, as the version included |
+| | in ``Ubuntu Xenial 16.04`` is outdated and lacks |
+| | certain required features); |
+| | - other miscelaneous requirements, depending on |
+| | ``jumpserver`` distribution OS; |
+| | |
+| | .. SEEALSO:: |
+| | |
+| | - ``mcp/scripts/requirements_deb.yaml`` (``Ubuntu``) |
+| | - ``mcp/scripts/requirements_rpm.yaml`` (``CentOS``) |
+| | |
+| | .. WARNING:: |
+| | |
+| | Mininum required ``Docker`` version is ``17.x``. |
+| | |
+| | .. WARNING:: |
+| | |
+| | Mininum required ``virt-inst`` version is ``1.4``. |
++------------------+----------------------------------------------------------+
+| Patch | For each ``git`` submodule in OPNFV Fuel repository, |
+| Apply | if a subdirectory with the same name exists under |
+| | ``mcp/patches``, all patches in that subdirectory are |
+| | applied using ``git-am`` to the respective ``git`` |
+| | submodule. |
+| | |
+| | This allows OPNFV Fuel to alter upstream repositories |
+| | contents before consuming them, including: |
+| | |
+| | - ``Docker`` container build process customization; |
+| | - ``salt-formulas`` customization; |
+| | - ``reclass.system`` customization; |
+| | |
+| | .. SEEALSO:: |
+| | |
+| | - ``mcp/patches/README.rst`` |
++------------------+----------------------------------------------------------+
+| SSH RSA Keypair | If not already present, a RSA keypair is generated on |
+| Generation | the ``jumpserver`` node at: |
+| | |
+| | - ``/var/lib/opnfv/mcp.rsa{,.pub}`` |
+| | |
+| | The public key will be added to the ``authorized_keys`` |
+| | list for ``ubuntu`` user, so the private key can be used |
+| | for key-based logins on: |
+| | |
+| | - ``cfg01``, ``mas01`` infrastructure nodes; |
+| | - all cluster nodes (``baremetal`` and/or ``virtual``), |
+| | including ``VCP`` VMs; |
++------------------+----------------------------------------------------------+
+| ``j2`` | Based on ``XDF`` (``PDF``, ``IDF``, ``SDF``) and |
+| Expansion | additional deployment configuration determined during |
+| | ``argument parsing`` stage described above, all jinja2 |
+| | templates are expanded, including: |
+| | |
+| | - various classes in ``reclass.cluster``; |
+| | - docker-compose ``yaml`` for Salt Master bring-up; |
+| | - ``libvirt`` network definitions (``xml``); |
++------------------+----------------------------------------------------------+
+| Jumpserver | Basic validation that common ``jumpserver`` requirements |
+| Requirements | are satisfied, e.g. ``PXE/admin`` is Linux bridge if |
+| Check | ``baremetal`` nodes are defined in the ``PDF``. |
++------------------+----------------------------------------------------------+
+| Infrastucture | .. NOTE:: |
+| Setup | |
+| | All steps apply to and only to the ``jumpserver``. |
+| | |
+| | - prepare virtual machines; |
+| | - (re)create ``libvirt`` managed networks; |
+| | - apply ``sysctl`` configuration; |
+| | - apply ``udev`` configuration; |
+| | - create & start virtual machines prepared earlier; |
+| | - create & start Salt Master (``cfg01``) Docker |
+| | container; |
++------------------+----------------------------------------------------------+
+| ``STATE`` | Based on deployment type, scenario and other parameters, |
+| Files | a ``STATE`` file list is constructed, then executed |
+| | sequentially. |
+| | |
+| | .. TIP:: |
+| | |
+| | The table below lists all current ``STATE`` files |
+| | and their intended action. |
+| | |
+| | .. SEEALSO:: |
+| | |
+| | For more information on how the list of ``STATE`` |
+| | files is constructed, see |
+| | :ref:`OPNFV Fuel User Guide <fuel-userguide>`. |
++------------------+----------------------------------------------------------+
+| Log | Contents of ``/var/log`` are recursively gathered from |
+| Collection | all the nodes, then archived together for later |
+| | inspection. |
++------------------+----------------------------------------------------------+
+
+``STATE`` Files Overview
+------------------------
+
++---------------------------+-------------------------------------------------+
+| ``STATE`` file | Targets involved and main intended action |
++===========================+=================================================+
+| ``virtual_init`` | ``cfg01``: reclass node generation |
+| | |
+| | ``jumpserver`` VMs (if present): basic OS |
+| | config |
++---------------------------+-------------------------------------------------+
+| ``maas`` | ``mas01``: OS, MaaS configuration |
+| | ``baremetal`` node commissioning and deploy |
+| | |
+| | .. NOTE:: |
+| | |
+| | Skipped if no ``baremetal`` nodes are |
+| | defined in ``PDF`` (``virtual`` deploy). |
++---------------------------+-------------------------------------------------+
+| ``baremetal_init`` | ``kvm``, ``cmp``: OS install, config |
++---------------------------+-------------------------------------------------+
+| ``dpdk`` | ``cmp``: configure OVS-DPDK |
++---------------------------+-------------------------------------------------+
+| ``networks`` | ``ctl``: create OpenStack networks |
++---------------------------+-------------------------------------------------+
+| ``neutron_gateway`` | ``gtw01``: configure Neutron gateway |
++---------------------------+-------------------------------------------------+
+| ``opendaylight`` | ``odl01``: install & configure ``ODL`` |
++---------------------------+-------------------------------------------------+
+| ``openstack_noha`` | cluster nodes: install OpenStack without ``HA`` |
++---------------------------+-------------------------------------------------+
+| ``openstack_ha`` | cluster nodes: install OpenStack with ``HA`` |
++---------------------------+-------------------------------------------------+
+| ``virtual_control_plane`` | ``kvm``: create ``VCP`` VMs |
+| | |
+| | ``VCP`` VMs: basic OS config |
+| | |
+| | .. NOTE:: |
+| | |
+| | Skipped if ``-N`` deploy argument is used. |
++---------------------------+-------------------------------------------------+
+| ``tacker`` | ``ctl``: install & configure Tacker |
++---------------------------+-------------------------------------------------+
-Salt
+Release Notes
+=============
-8) `Saltstack Documentation <https://docs.saltstack.com/en/latest/topics>`_
-9) `Saltstack Formulas <http://salt-formulas.readthedocs.io/en/latest/develop/overview-reclass.html>`_
+Please refer to the :ref:`OPNFV Fuel Release Notes <fuel-releasenotes>`
+article.
-Reclass
+References
+==========
-10) `Reclass model <http://reclass.pantsfullofunix.net>`_
+For more information on the OPNFV ``Iruya`` 9.0 release, please see:
+
+#. `OPNFV Home Page`_
+#. `OPNFV Documentation`_
+#. `OPNFV Software Downloads`_
+#. `OPNFV Iruya Wiki Page`_
+#. `OpenStack Rocky Release Artifacts`_
+#. `OpenStack Documentation`_
+#. `OpenDaylight Artifacts`_
+#. `Mirantis Cloud Platform Documentation`_
+#. `Saltstack Documentation`_
+#. `Saltstack Formulas`_
+#. `Reclass`_
+
+.. FIXME: cleanup unused refs, extend above list
+.. _`OpenDaylight`: https://www.opendaylight.org
+.. _`OpenDaylight Artifacts`: https://www.opendaylight.org/software/downloads
+.. _`MCP`: https://www.mirantis.com/software/mcp/
+.. _`Mirantis Cloud Platform Documentation`: https://docs.mirantis.com/mcp/latest/
+.. _`fuel git repository`: https://git.opnfv.org/fuel
+.. _`pharos git repo`: https://git.opnfv.org/pharos
+.. _`OpenStack Documentation`: https://docs.openstack.org/rocky
+.. _`OpenStack Rocky Release Artifacts`: https://www.openstack.org/software/rocky
+.. _`OPNFV Home Page`: https://www.opnfv.org
+.. _`OPNFV Iruya Wiki Page`: https://wiki.opnfv.org/display/SWREL/Iruya
+.. _`OPNFV Documentation`: https://docs.opnfv.org
+.. _`OPNFV Software Downloads`: https://www.opnfv.org/software/downloads
+.. _`Apache License 2.0`: https://www.apache.org/licenses/LICENSE-2.0
+.. _`Saltstack Documentation`: https://docs.saltstack.com/en/latest/topics/
+.. _`Saltstack Formulas`: https://salt-formulas.readthedocs.io/en/latest/
+.. _`Reclass`: https://reclass.pantsfullofunix.net
+.. _`OPNFV Pharos Specification`: https://wiki.opnfv.org/display/pharos/Pharos+Specification
+.. _`OPNFV PDF Wiki Page`: https://wiki.opnfv.org/display/INF/POD+Descriptor
diff --git a/docs/release/release-notes/index.rst b/docs/release/release-notes/index.rst
index 4b1e4fa77..d4560558b 100644
--- a/docs/release/release-notes/index.rst
+++ b/docs/release/release-notes/index.rst
@@ -1,17 +1,10 @@
-.. _fuel-releasenotes:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-.. _fuel-release-notes-label:
-
-*****************************
-Release notes for Fuel\@OPNFV
-*****************************
+.. _fuel-releasenotes:
.. toctree::
- :numbered:
:maxdepth: 2
release-notes.rst
diff --git a/docs/release/release-notes/release-notes.rst b/docs/release/release-notes/release-notes.rst
index 9a4c8d52d..846200faa 100644
--- a/docs/release/release-notes/release-notes.rst
+++ b/docs/release/release-notes/release-notes.rst
@@ -2,152 +2,170 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-========
+************************
+OPNFV Fuel Release Notes
+************************
+
Abstract
========
-This document compiles the release notes for the Fraser release of
-OPNFV when using Fuel as a deployment tool. This is an unified documentation
-for both x86_64 and aarch64 architectures. All information is common for
-both architectures except when explicitly stated.
+This document provides the release notes for ``Iruya`` release with the Fuel
+deployment toolchain.
+Starting with ``Gambia`` release, both ``x86_64`` and ``aarch64`` architectures
+are supported at the same time by the ``fuel`` codebase.
+
+License
+=======
+
+All Fuel and "common" entities are protected by the `Apache License 2.0`_.
-===============
Important Notes
===============
-These notes provides release information for the use of Fuel as deployment
-tool for the Fraser release of OPNFV.
+This is the OPNFV ``Iruya`` release that implements the deploy stage of the
+OPNFV CI pipeline via Fuel.
+
+Fuel is based on the `MCP`_ installation tool chain.
+More information available at `Mirantis Cloud Platform Documentation`_.
-The goal of the Fraser release and this Fuel-based deployment process is
+The goal of the ``Iruya`` release and this Fuel-based deployment process is
to establish a lab ready platform accelerating further development
of the OPNFV infrastructure.
-Carefully follow the installation-instructions.
+Carefully follow the installation instructions.
-=======
Summary
=======
-For Fraser, the typical use of Fuel as an OpenStack installer is
-supplemented with OPNFV unique components such as:
-
-- `OpenDaylight <http://www.opendaylight.org/software>`_
-- `Open vSwitch for NFV <https://wiki.opnfv.org/ovsnfv>`_
+``Iruya`` release with the Fuel deployment toolchain will establish an OPNFV
+target system on a Pharos compliant lab infrastructure. The current definition
+of an OPNFV target system is OpenStack Queens combined with an SDN
+controller, such as OpenDaylight. The system is deployed with OpenStack High
+Availability (HA) for most OpenStack services.
-As well as OPNFV-unique configurations of the Hardware and Software stack.
+Fuel also supports non-HA deployments, which deploys a
+single controller, one gateway node and a number of compute nodes.
-This Fraser artifact provides Fuel as the deployment stage tool in the
-OPNFV CI pipeline including:
+Fuel supports ``x86_64``, ``aarch64`` or ``mixed`` architecture clusters.
-- Documentation built by Jenkins
+Furthermore, Fuel is capable of deploying scenarios in a ``baremetal``,
+``virtual`` or ``hybrid`` fashion. ``virtual`` deployments use multiple VMs on
+the Jump Host and internal networking to simulate the ``baremetal`` deployment.
- - overall OPNFV documentation
+For ``Iruya``, the typical use of Fuel as an OpenStack installer is
+supplemented with OPNFV unique components such as:
- - this document (release notes)
+- `OpenDaylight`_
- - installation instructions
+As well as OPNFV-unique configurations of the Hardware and Software stack.
-- Automated deployment of Fraser with running on baremetal or a nested
- hypervisor environment (KVM)
+This ``Iruya`` artifact provides Fuel as the deployment stage tool in the
+OPNFV CI pipeline including:
-- Automated validation of the Fraser deployment
+- Automated (Jenkins, RTD) documentation build & publish (multiple documents);
+- Automated (Jenkins) build & publish of Salt Master Docker image;
+- Automated (Jenkins) deployment of ``Iruya`` running on baremetal or a nested
+ hypervisor environment (KVM);
+- Automated (Jenkins) validation of the ``Iruya`` deployment
-============
Release Data
============
+--------------------------------------+--------------------------------------+
-| **Project** | fuel/armband |
+| **Project** | fuel |
| | |
+--------------------------------------+--------------------------------------+
-| **Repo/tag** | opnfv-6.2.1 |
+| **Repo/tag** | opnfv-9.0.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release designation** | Fraser 6.2 |
+| **Release designation** | Iruya 9.0 |
| | |
+--------------------------------------+--------------------------------------+
-| **Release date** | June 29 2018 |
+| **Release date** | January 31, 2020 |
| | |
+--------------------------------------+--------------------------------------+
-| **Purpose of the delivery** | Fraser alignment to Released |
-| | MCP baseline + features and |
-| | bug-fixes for the following |
-| | feaures: |
-| | |
-| | - Open vSwitch for NFV |
-| | - OpenDaylight |
-| | - DPDK |
+| **Purpose of the delivery** | OPNFV Iruya 9.0 release |
+--------------------------------------+--------------------------------------+
Version Change
-==============
+--------------
Module Version Changes
-----------------------
-This is the Fraser 6.2 release.
-It is based on following upstream versions:
+~~~~~~~~~~~~~~~~~~~~~~
+
+This is the first tracked version of the ``Iruya`` release with the Fuel
+deployment toolchain. It is based on following upstream versions:
+
+- MCP (``Q1`19`` GA release)
-- MCP Base Release
+- OpenStack (``Stein`` release)
-- OpenStack Pike Release
+- OpenDaylight (``Neon`` release)
-- OpenDaylight Oxygen Release
+- Ubuntu (``18.04`` release)
Document Changes
-----------------
-This is the Fraser 6.2 release.
+~~~~~~~~~~~~~~~~
+
+This is the ``Iruya`` 9.0 release.
It comes with the following documentation:
-- :ref:`fuel-release-installation-label`
+- :ref:`OPNFV Fuel Installation Instruction <fuel-installation>`
- Release notes (This document)
-- :ref:`fuel-release-userguide-label`
+- :ref:`OPNFV Fuel Userguide <fuel-userguide>`
Reason for Version
-==================
+------------------
Feature Additions
------------------
+~~~~~~~~~~~~~~~~~
-**JIRA TICKETS:**
-None
+Due to reduced schedule, this is a maintainance release.
Bug Corrections
----------------
+~~~~~~~~~~~~~~~
-**JIRA TICKETS:**
-
-`Fraser 6.2 bug fixes <https://jira.opnfv.org/issues/?filter=12318>`_
-
-(Also See respective Integrated feature project's bug tracking)
-
-Deliverables
-============
+N/A
Software Deliverables
----------------------
-
-- `Fuel@x86_64 installer script files <https://git.opnfv.org/fuel>`_
+~~~~~~~~~~~~~~~~~~~~~
-- `Fuel@aarch64 installer script files <https://git.opnfv.org/armband>`_
+- `fuel git repository`_ with multiarch (``x86_64``, ``aarch64`` or ``mixed``)
+ installer script files
Documentation Deliverables
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
-- :ref:`fuel-release-installation-label`
+- :ref:`OPNFV Fuel Installation Instruction <fuel-installation>`
- Release notes (This document)
-- :ref:`fuel-release-userguide-label`
+- :ref:`OPNFV Fuel Userguide <fuel-userguide>`
+
+Scenario Matrix
+---------------
+
++-------------------------+---------------+-------------+------------+
+| | ``baremetal`` | ``virtual`` | ``hybrid`` |
++=========================+===============+=============+============+
+| os-nosdn-nofeature-noha | | ``x86_64`` | |
++-------------------------+---------------+-------------+------------+
+| os-nosdn-nofeature-ha | ``x86_64``, | | |
+| | ``aarch64`` | | |
++-------------------------+---------------+-------------+------------+
+| os-odl-nofeature-noha | | ``x86_64`` | |
++-------------------------+---------------+-------------+------------+
+| os-odl-nofeature-ha | ``x86_64``, | | |
++-------------------------+---------------+-------------+------------+
-=========================================
Known Limitations, Issues and Workarounds
=========================================
System Limitations
-==================
+------------------
- **Max number of blades:** 1 Jumpserver, 3 Controllers, 20 Compute blades
@@ -159,54 +177,46 @@ System Limitations
Known Issues
-============
-
-**JIRA TICKETS:**
-
-`Known issues <https://jira.opnfv.org/issues/?filter=12317>`_
+------------
-(Also See respective Integrated feature project's bug tracking)
+None
Workarounds
-===========
-
-**JIRA TICKETS:**
+-----------
None
-(Also See respective Integrated feature project's bug tracking)
-
-============
Test Results
============
-The Fraser 6.2 release with the Fuel deployment tool has undergone QA test
+
+The ``Iruya`` 9.0 release with the Fuel deployment tool has undergone QA test
runs, see separate test results.
-==========
References
==========
-For more information on the OPNFV Fraser 6.2 release, please see:
-
-OPNFV
-=====
-
-1) `OPNFV Home Page <http://www.opnfv.org>`_
-2) `OPNFV Documentation <http://docs.opnfv.org>`_
-3) `OPNFV Software Downloads <https://www.opnfv.org/software/download>`_
-
-OpenStack
-=========
-
-4) `OpenStack Pike Release Artifacts <http://www.openstack.org/software/pike>`_
-
-5) `OpenStack Documentation <http://docs.openstack.org>`_
-
-OpenDaylight
-============
-
-6) `OpenDaylight Artifacts <http://www.opendaylight.org/software/downloads>`_
-
-Fuel
-====
-7) `Mirantis Cloud Platform Documentation <https://docs.mirantis.com/mcp/latest>`_
+For more information on the OPNFV ``Iruya`` 9.0 release, please see:
+
+#. `OPNFV Home Page`_
+#. `OPNFV Documentation`_
+#. `OPNFV Software Downloads`_
+#. `OPNFV Iruya Wiki Page`_
+#. `OpenStack Queens Release Artifacts`_
+#. `OpenStack Documentation`_
+#. `OpenDaylight Artifacts`_
+#. `Mirantis Cloud Platform Documentation`_
+
+.. FIXME: cleanup unused refs, extend above list
+.. _`OpenDaylight`: https://www.opendaylight.org
+.. _`Vector Packet Processing`: https://wiki.fd.io/view/VPP
+.. _`OpenDaylight Artifacts`: https://www.opendaylight.org/software/downloads
+.. _`MCP`: https://www.mirantis.com/software/mcp/
+.. _`Mirantis Cloud Platform Documentation`: https://docs.mirantis.com/mcp/latest/
+.. _`fuel git repository`: https://git.opnfv.org/fuel
+.. _`OpenStack Documentation`: https://docs.openstack.org/rocky
+.. _`OpenStack Stein Release Artifacts`: https://www.openstack.org/software/stein
+.. _`OPNFV Home Page`: https://www.opnfv.org
+.. _`OPNFV Iruya Wiki Page`: https://wiki.opnfv.org/display/SWREL/Iruya
+.. _`OPNFV Documentation`: https://docs.opnfv.org
+.. _`OPNFV Software Downloads`: https://www.opnfv.org/software/downloads
+.. _`Apache License 2.0`: https://www.apache.org/licenses/LICENSE-2.0
diff --git a/docs/release/scenarios/index.rst b/docs/release/scenarios/index.rst
new file mode 100644
index 000000000..651e0fdd4
--- /dev/null
+++ b/docs/release/scenarios/index.rst
@@ -0,0 +1,17 @@
+.. _fuel-scenarios:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Open Platform for NFV Project, Inc. and its contributors
+
+********************
+OPNFV Fuel Scenarios
+********************
+
+.. toctree::
+ :maxdepth: 2
+
+ os-nosdn-nofeature-noha/index.rst
+ os-nosdn-nofeature-ha/index.rst
+ os-odl-nofeature-noha/index.rst
+ os-odl-nofeature-ha/index.rst
diff --git a/docs/release/scenarios/os-nosdn-fdio-noha/index.rst b/docs/release/scenarios/os-nosdn-fdio-noha/index.rst
new file mode 100644
index 000000000..e64d9607e
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-fdio-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-nosdn-fdio-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+===========================================
+os-nosdn-fdio-noha overview and description
+===========================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-nosdn-fdio-noha.rst
diff --git a/docs/release/scenarios/os-nosdn-fdio-noha/os-nosdn-fdio-noha.rst b/docs/release/scenarios/os-nosdn-fdio-noha/os-nosdn-fdio-noha.rst
new file mode 100644
index 000000000..df306d767
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-fdio-noha/os-nosdn-fdio-noha.rst
@@ -0,0 +1,39 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Hunter 8.0 of
+deployment with no SDN controller and VPP enabled as virtual switch.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Queens OpenStack
+deployment with no SDN controller enabled and VPP as virtual switch.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. It also installs
+VPP on the compute nodes as virtual switch.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-nosdn-fdio-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deploy only.
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-nosdn-nofeature-ha/index.rst b/docs/release/scenarios/os-nosdn-nofeature-ha/index.rst
new file mode 100644
index 000000000..88e6c8336
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-nofeature-ha/index.rst
@@ -0,0 +1,14 @@
+.. _os-nosdn-nofeature-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+==============================================
+os-nosdn-nofeature-ha overview and description
+==============================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-nosdn-nofeature-ha.rst
diff --git a/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
new file mode 100644
index 000000000..6a793702c
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-nofeature-ha/os-nosdn-nofeature-ha.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Iruya 9.0 of
+deployment with no SDN controller and no extra features enabled.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Stein OpenStack
+deployment without any NFV features or SDN controller enabled.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-nosdn-nofeature-ha as scenario
+deploy parameter.
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Iruya release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-nosdn-nofeature-noha/index.rst b/docs/release/scenarios/os-nosdn-nofeature-noha/index.rst
new file mode 100644
index 000000000..8243b74eb
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-nofeature-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-nosdn-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+================================================
+os-nosdn-nofeature-noha overview and description
+================================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-nosdn-nofeature-noha.rst
diff --git a/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
new file mode 100644
index 000000000..b66e8500b
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-nofeature-noha/os-nosdn-nofeature-noha.rst
@@ -0,0 +1,38 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Iruya 9.0 of
+deployment with no SDN controller and no extra features enabled.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Stein OpenStack
+deployment without any NFV features or SDN controller enabled.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-nosdn-nofeature-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deploy only.
+
+References
+==========
+
+For more information on the OPNFV Iruya release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-nosdn-onap-ha/index.rst b/docs/release/scenarios/os-nosdn-onap-ha/index.rst
new file mode 100644
index 000000000..bb2ae139c
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-onap-ha/index.rst
@@ -0,0 +1,14 @@
+.. _os-nosdn-onap-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB, Tieto and others
+
+========================================
+os-nosdn-ovs-ha overview and description
+========================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-nosdn-onap-ha.rst
diff --git a/docs/release/scenarios/os-nosdn-onap-ha/os-nosdn-ovs-ha.rst b/docs/release/scenarios/os-nosdn-onap-ha/os-nosdn-ovs-ha.rst
new file mode 100644
index 000000000..2d2569369
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-onap-ha/os-nosdn-ovs-ha.rst
@@ -0,0 +1,48 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB, Tieto and others
+
+This document provides scenario level details for Hunter
+deployment, with no SDN controller, with ONAP deployed on top of OPNFV.
+
+
+Introduction
+============
+
+This scenario is used primarily to deploy a Queens OpenStack deployment,
+without any NFV features or SDN controller enabled, with an ONAP deployment
+managed by the OPNFV Auto project. This scenario is a "specific" scenario
+created from the "generic" scenario os-nosdn-nofeature-ha.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. It also installs
+an ONAP deployment, managed by the OPNFV Auto project.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-nosdn-onap-ha as scenario
+deploy parameter and refer to the Auto Project documentation for further
+setup instructions at https://wiki.opnfv.org/display/AUTO/Auto+Documentation .
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
+For setup instructions visit the Auto Project at,
+https://wiki.opnfv.org/display/AUTO/Auto+Documentation
diff --git a/docs/release/scenarios/os-nosdn-onap-noha/index.rst b/docs/release/scenarios/os-nosdn-onap-noha/index.rst
new file mode 100644
index 000000000..7211f576d
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-onap-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-nosdn-onap-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+==========================================
+os-nosdn-ovs-noha overview and description
+==========================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-nosdn-onap-noha.rst
diff --git a/docs/release/scenarios/os-nosdn-onap-noha/os-nosdn-ovs-noha.rst b/docs/release/scenarios/os-nosdn-onap-noha/os-nosdn-ovs-noha.rst
new file mode 100644
index 000000000..a6f46e4f5
--- /dev/null
+++ b/docs/release/scenarios/os-nosdn-onap-noha/os-nosdn-ovs-noha.rst
@@ -0,0 +1,45 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB, Tieto and others
+
+This document provides scenario level details for Hunter
+deployment, with no SDN controller, with ONAP deployed on top of OPNFV.
+
+
+Introduction
+============
+
+This scenario is used primarily to deploy a Queens OpenStack deployment,
+without any NFV features or SDN controller enabled, with an ONAP deployment
+managed by the OPNFV Auto project. This scenario is a "specific" scenario
+created from the "generic" scenario os-nosdn-nofeature-noha.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. It also installs
+an ONAP deployment, managed by the OPNFV Auto project.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-nosdn-onap-noha as scenario
+deploy parameter and refer to the Auto Project documentation for further
+setup instructions at https://wiki.opnfv.org/display/AUTO/Auto+Documentation .
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deployment only.
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
+For setup instructions visit the Auto Project at,
+https://wiki.opnfv.org/display/AUTO/Auto+Documentation
diff --git a/docs/release/scenarios/os-nosdn-ovs-ha/index.rst b/docs/release/scenarios/os-nosdn-ovs-ha/index.rst
index af0105b81..c9c9b9985 100644
--- a/docs/release/scenarios/os-nosdn-ovs-ha/index.rst
+++ b/docs/release/scenarios/os-nosdn-ovs-ha/index.rst
@@ -2,15 +2,13 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2017 Mirantis Inc., Enea Software AB and others
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
========================================
os-nosdn-ovs-ha overview and description
========================================
.. toctree::
- :numbered:
:maxdepth: 2
- os-nosdn-ovs-ha.rst
-
+.. include:: os-nosdn-ovs-ha.rst
diff --git a/docs/release/scenarios/os-nosdn-ovs-ha/os-nosdn-ovs-ha.rst b/docs/release/scenarios/os-nosdn-ovs-ha/os-nosdn-ovs-ha.rst
index eda89412f..1121cc1e2 100644
--- a/docs/release/scenarios/os-nosdn-ovs-ha/os-nosdn-ovs-ha.rst
+++ b/docs/release/scenarios/os-nosdn-ovs-ha/os-nosdn-ovs-ha.rst
@@ -1,15 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c)2017 Mirantis Inc., Enea Software AB and others
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
-This document provides scenario level details for Fraser 6.2 of
+This document provides scenario level details for Hunter 8.1 of
deployment with no SDN controller and no extra features enabled.
-============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment without any NFV features or SDN controller enabled.
Scenario components and composition
@@ -27,8 +26,8 @@ per service.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-nosdn-ovs-ha.yaml deploy
-settings file.
+Simply deploy this scenario by setting os-nosdn-ovs-ha as scenario
+deploy parameter.
Limitations, Issues and Workarounds
===================================
@@ -38,5 +37,5 @@ None
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/software
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-nosdn-ovs-noha/index.rst b/docs/release/scenarios/os-nosdn-ovs-noha/index.rst
index 066abc938..135cefca0 100644
--- a/docs/release/scenarios/os-nosdn-ovs-noha/index.rst
+++ b/docs/release/scenarios/os-nosdn-ovs-noha/index.rst
@@ -2,15 +2,13 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2017 Mirantis Inc., Enea Software AB and others
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
==========================================
os-nosdn-ovs-noha overview and description
==========================================
.. toctree::
- :numbered:
:maxdepth: 2
- os-nosdn-ovs-noha.rst
-
+.. include:: os-nosdn-ovs-noha.rst
diff --git a/docs/release/scenarios/os-nosdn-ovs-noha/os-nosdn-ovs-noha.rst b/docs/release/scenarios/os-nosdn-ovs-noha/os-nosdn-ovs-noha.rst
index 0e5239675..f72206d7f 100644
--- a/docs/release/scenarios/os-nosdn-ovs-noha/os-nosdn-ovs-noha.rst
+++ b/docs/release/scenarios/os-nosdn-ovs-noha/os-nosdn-ovs-noha.rst
@@ -1,15 +1,14 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
-.. (c) 2017 Mirantis Inc., Enea Software AB and others
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
-This document provides scenario level details for Fraser 6.2 of
+This document provides scenario level details for Hunter 8.1 of
deployment with no SDN controller and no extra features enabled.
-============
Introduction
============
-This scenario is used primarily to validate and deploy a Pike OpenStack
+This scenario is used primarily to validate and deploy a Queens OpenStack
deployment without any NFV features or SDN controller enabled.
@@ -24,8 +23,8 @@ the DPDK-enabled Open vSwitch component.
Scenario usage overview
=======================
-Simply deploy this scenario by using the os-nosdn-ovs-noha.yaml deploy
-settings file.
+Simply deploy this scenario by setting os-nosdn-ovs-noha as scenario
+deploy parameter.
Limitations, Issues and Workarounds
@@ -36,5 +35,5 @@ Tested on virtual deploy only.
References
==========
-For more information on the OPNFV Fraser release, please visit
-http://www.opnfv.org/software
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-odl-nofeature-ha/index.rst b/docs/release/scenarios/os-odl-nofeature-ha/index.rst
new file mode 100644
index 000000000..998671cbd
--- /dev/null
+++ b/docs/release/scenarios/os-odl-nofeature-ha/index.rst
@@ -0,0 +1,14 @@
+.. _os-odl-nofeature-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+============================================
+os-odl-nofeature-ha overview and description
+============================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-odl-nofeature-ha.rst
diff --git a/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
new file mode 100644
index 000000000..e5d942ab5
--- /dev/null
+++ b/docs/release/scenarios/os-odl-nofeature-ha/os-odl-nofeature-ha.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Iruya 9.0 of
+deployment with OpenDaylight controller.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Stein OpenStack
+deployment with OpenDaylight Neon controller enabled.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. All services
+are in HA, meaning that there are multiple cloned instances of each service,
+and they are balanced by HA Proxy using a Virtual IP Address per service.
+OpenDaylight is installed as a SDN controller on one of the controller nodes.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-odl-nofeature-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Iruya release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-odl-nofeature-noha/index.rst b/docs/release/scenarios/os-odl-nofeature-noha/index.rst
new file mode 100644
index 000000000..bbe12cc52
--- /dev/null
+++ b/docs/release/scenarios/os-odl-nofeature-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-odl-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+==============================================
+os-odl-nofeature-noha overview and description
+==============================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-odl-nofeature-noha.rst
diff --git a/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
new file mode 100644
index 000000000..f9140b240
--- /dev/null
+++ b/docs/release/scenarios/os-odl-nofeature-noha/os-odl-nofeature-noha.rst
@@ -0,0 +1,39 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Iruya 9.0 of
+deployment with OpenDaylight controller.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Stein OpenStack
+with OpenDaylight Neon controller enabled.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. It also installs
+OpenDaylight as a SDN controller on the dedicated node.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting up os-odl-nofeature-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deploy only.
+
+References
+==========
+
+For more information on the OPNFV Iruya release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-odl-ovs-noha/index.rst b/docs/release/scenarios/os-odl-ovs-noha/index.rst
new file mode 100644
index 000000000..b05d8583c
--- /dev/null
+++ b/docs/release/scenarios/os-odl-ovs-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-odl-ovs-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+========================================
+os-odl-ovs-noha overview and description
+========================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-odl-ovs-noha.rst
diff --git a/docs/release/scenarios/os-odl-ovs-noha/os-odl-ovs-noha.rst b/docs/release/scenarios/os-odl-ovs-noha/os-odl-ovs-noha.rst
new file mode 100644
index 000000000..92d5c1602
--- /dev/null
+++ b/docs/release/scenarios/os-odl-ovs-noha/os-odl-ovs-noha.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Hunter 8.1 of
+deployment with OpenDaylight controller and DPDK feature enabled.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Queens OpenStack
+deployment with DPDK feature and OpenDaylight Fluorine controller enabled.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon. It also installs
+the DPDK-enabled Open vSwitch component along with OpenDaylight as a SDN
+controller on the dedicated node.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting up os-odl-ovs-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deploy only.
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-ovn-nofeature-ha/index.rst b/docs/release/scenarios/os-ovn-nofeature-ha/index.rst
new file mode 100644
index 000000000..5a9b2cdfe
--- /dev/null
+++ b/docs/release/scenarios/os-ovn-nofeature-ha/index.rst
@@ -0,0 +1,14 @@
+.. _os-ovn-nofeature-ha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+============================================
+os-ovn-nofeature-ha overview and description
+============================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-ovn-nofeature-ha.rst
diff --git a/docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst b/docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst
new file mode 100644
index 000000000..fe16a527d
--- /dev/null
+++ b/docs/release/scenarios/os-ovn-nofeature-ha/os-ovn-nofeature-ha.rst
@@ -0,0 +1,40 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Hunter 8.0 of deployment
+with Open Virtual Network (OVN) providing Layers 2 and 3 networking and no
+extra features enabled.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Queens OpenStack
+deployment with OVN enabled and without any NFV features.
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon, plus OVN.
+
+All services are in HA, meaning that there are multiple cloned instances of
+each service, and they are balanced by HA Proxy using a Virtual IP Address
+per service.
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-ovn-nofeature-ha as scenario
+deploy parameter.
+
+Limitations, Issues and Workarounds
+===================================
+
+None
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/scenarios/os-ovn-nofeature-noha/index.rst b/docs/release/scenarios/os-ovn-nofeature-noha/index.rst
new file mode 100644
index 000000000..ba823f3b5
--- /dev/null
+++ b/docs/release/scenarios/os-ovn-nofeature-noha/index.rst
@@ -0,0 +1,14 @@
+.. _os-ovn-nofeature-noha:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International Licence.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+==============================================
+os-ovn-nofeature-noha overview and description
+==============================================
+
+.. toctree::
+ :maxdepth: 2
+
+.. include:: os-ovn-nofeature-noha.rst
diff --git a/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst b/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
new file mode 100644
index 000000000..885406a78
--- /dev/null
+++ b/docs/release/scenarios/os-ovn-nofeature-noha/os-ovn-nofeature-noha.rst
@@ -0,0 +1,39 @@
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) 2018 Mirantis Inc., Enea Software AB and others
+
+This document provides scenario level details for Hunter 8.0 of deployment
+with Open Virtual Network (OVN) providing Layers 2 and 3 networking and no
+extra features enabled.
+
+Introduction
+============
+
+This scenario is used primarily to validate and deploy a Queens OpenStack
+deployment with OVN enabled and without any NFV features.
+
+
+Scenario components and composition
+===================================
+
+This scenario is composed of common OpenStack services enabled by default,
+including Nova, Neutron, Glance, Cinder, Keystone, Horizon, plus OVN.
+
+
+Scenario usage overview
+=======================
+
+Simply deploy this scenario by setting os-ovn-nofeature-noha as scenario
+deploy parameter.
+
+
+Limitations, Issues and Workarounds
+===================================
+
+Tested on virtual deploy only.
+
+References
+==========
+
+For more information on the OPNFV Hunter release, please visit
+https://www.opnfv.org/software
diff --git a/docs/release/userguide/img/saltstack.png b/docs/release/userguide/img/saltstack.png
deleted file mode 100644
index d57452c65..000000000
--- a/docs/release/userguide/img/saltstack.png
+++ /dev/null
Binary files differ
diff --git a/docs/release/userguide/index.rst b/docs/release/userguide/index.rst
index d4330d08c..ab616d317 100644
--- a/docs/release/userguide/index.rst
+++ b/docs/release/userguide/index.rst
@@ -1,18 +1,10 @@
-.. _fuel-userguide:
-
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-.. _fuel-release-userguide-label:
-
-**************************
-User guide for Fuel\@OPNFV
-**************************
+.. _fuel-userguide:
.. toctree::
- :numbered:
:maxdepth: 2
userguide.rst
-
diff --git a/docs/release/userguide/userguide.rst b/docs/release/userguide/userguide.rst
index 584948f15..5a467ba7e 100644
--- a/docs/release/userguide/userguide.rst
+++ b/docs/release/userguide/userguide.rst
@@ -2,406 +2,1011 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) Open Platform for NFV Project, Inc. and its contributors
-========
+*********************
+OPNFV Fuel User Guide
+*********************
+
Abstract
========
-This document contains details about how to use OPNFV Fuel - Fraser
-release - after it was deployed. For details on how to deploy check the
-installation instructions in the :ref:`fuel_userguide_references` section.
+This document contains details about using OPNFV Fuel ``Iruya`` release after
+it was deployed. For details on how to deploy OpenStack, check
+the installation instructions in the :ref:`fuel_userguide_references` section.
-This is an unified documentation for both x86_64 and aarch64
+This is an unified documentation for both ``x86_64`` and ``aarch64``
architectures. All information is common for both architectures
except when explicitly stated.
-
-
-================
Network Overview
================
Fuel uses several networks to deploy and administer the cloud:
-+------------------+---------------------------------------------------------+
-| Network name | Description |
-| | |
-+==================+=========================================================+
-| **PXE/ADMIN** | Used for booting the nodes via PXE and/or Salt |
-| | control network |
-+------------------+---------------------------------------------------------+
-| **MCPCONTROL** | Used to provision the infrastructure VMs (Salt & MaaS) |
-+------------------+---------------------------------------------------------+
-| **Mgmt** | Used for internal communication between |
-| | OpenStack components |
-+------------------+---------------------------------------------------------+
-| **Internal** | Used for VM data communication within the |
-| | cloud deployment |
-+------------------+---------------------------------------------------------+
-| **Public** | Used to provide Virtual IPs for public endpoints |
-| | that are used to connect to OpenStack services APIs. |
-| | Used by Virtual machines to access the Internet |
-+------------------+---------------------------------------------------------+
++------------------+----------------------------------------------------------+
+| Network name | Description |
+| | |
++==================+==========================================================+
+| **PXE/admin** | Used for booting the nodes via PXE and/or Salt |
+| | control network |
++------------------+----------------------------------------------------------+
+| **mcpcontrol** | Docker network used to provision the infrastructure |
+| | hosts (Salt & MaaS) |
++------------------+----------------------------------------------------------+
+| **management** | Used for internal communication between |
+| | OpenStack components |
++------------------+----------------------------------------------------------+
+| **internal** | Used for VM data communication within the |
+| | cloud deployment |
++------------------+----------------------------------------------------------+
+| **public** | Used to provide Virtual IPs for public endpoints |
+| | that are used to connect to OpenStack services APIs. |
+| | Used by Virtual machines to access the Internet |
++------------------+----------------------------------------------------------+
+
+These networks - except ``mcpcontrol`` - can be Linux bridges configured
+before the deploy on the Jumpserver.
+If they don't exists at deploy time, they will be created by the scripts as
+``libvirt`` managed networks (except ``mcpcontrol``, which will be handled by
+Docker using the ``bridge`` driver).
+
+Network ``mcpcontrol``
+~~~~~~~~~~~~~~~~~~~~~~
+
+``mcpcontrol`` is a virtual network, managed by Docker. Its only purpose is to
+provide a simple method of assigning an arbitrary ``INSTALLER_IP`` to the Salt
+master node (``cfg01``), to maintain backwards compatibility with old OPNFV
+Fuel behavior. Normally, end-users only need to change the ``INSTALLER_IP`` if
+the default CIDR (``10.20.0.0/24``) overlaps with existing lab networks.
+
+``mcpcontrol`` uses the Docker bridge driver, so the Salt master (``cfg01``)
+and the MaaS containers (``mas01``, when present) get assigned predefined IPs
+(``.2``, ``.3``, while the jumpserver gets ``.1``).
+
++------------------+---------------------------+-----------------------------+
+| Host | Offset in IP range | Default address |
++==================+===========================+=============================+
+| ``jumpserver`` | 1st | ``10.20.0.1`` |
++------------------+---------------------------+-----------------------------+
+| ``cfg01`` | 2nd | ``10.20.0.2`` |
++------------------+---------------------------+-----------------------------+
+| ``mas01`` | 3rd | ``10.20.0.3`` |
++------------------+---------------------------+-----------------------------+
+
+This network is limited to the ``jumpserver`` host and does not require any
+manual setup.
+
+Network ``PXE/admin``
+~~~~~~~~~~~~~~~~~~~~~
+
+.. TIP::
+
+ ``PXE/admin`` does not usually use an IP range offset in ``IDF``.
+
+.. NOTE::
+
+ During ``MaaS`` commissioning phase, IP addresses are handed out by
+ ``MaaS``'s DHCP.
+
+.. WARNING::
+
+ Default addresses in below table correspond to a ``PXE/admin`` CIDR of
+ ``192.168.11.0/24`` (the usual value used in OPNFV labs).
+
+ This is defined in ``IDF`` and can easily be changed to something else.
+
+.. TODO: detail MaaS DHCP range start/end
+
++------------------+-----------------------+---------------------------------+
+| Host | Offset in IP range | Default address |
++==================+=======================+=================================+
+| ``jumpserver`` | 1st | ``192.168.11.1`` |
+| | | (manual assignment) |
++------------------+-----------------------+---------------------------------+
+| ``cfg01`` | 2nd | ``192.168.11.2`` |
++------------------+-----------------------+---------------------------------+
+| ``mas01`` | 3rd | ``192.168.11.3`` |
++------------------+-----------------------+---------------------------------+
+| ``prx01``, | 4th, | ``192.168.11.4``, |
+| ``prx02`` | 5th | ``192.168.11.5`` |
++------------------+-----------------------+---------------------------------+
+| ``gtw01``, | ... | ``...`` |
+| ``gtw02``, | | |
+| ``gtw03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``kvm01``, | | |
+| ``kvm02``, | | |
+| ``kvm03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``dbs01``, | | |
+| ``dbs02``, | | |
+| ``dbs03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``msg01``, | | |
+| ``msg02``, | | |
+| ``msg03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``mdb01``, | | |
+| ``mdb02``, | | |
+| ``mdb03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``ctl01``, | | |
+| ``ctl02``, | | |
+| ``ctl03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``odl01``, | | |
+| ``odl02``, | | |
+| ``odl03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``mon01``, | | |
+| ``mon02``, | | |
+| ``mon03``, | | |
+| ``log01``, | | |
+| ``log02``, | | |
+| ``log03``, | | |
+| ``mtr01``, | | |
+| ``mtr02``, | | |
+| ``mtr03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``cmp001``, | | |
+| ``cmp002``, | | |
+| ``...`` | | |
++------------------+-----------------------+---------------------------------+
+
+Network ``management``
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. TIP::
+
+ ``management`` often has an IP range offset defined in ``IDF``.
+
+.. WARNING::
+
+ Default addresses in below table correspond to a ``management`` IP range of
+ ``172.16.10.10-172.16.10.254`` (one of the commonly used values in OPNFV
+ labs). This is defined in ``IDF`` and can easily be changed to something
+ else. Since the ``jumpserver`` address is manually assigned, this is
+ usually not subject to the IP range restriction in ``IDF``.
+
++------------------+-----------------------+---------------------------------+
+| Host | Offset in IP range | Default address |
++==================+=======================+=================================+
+| ``jumpserver`` | N/A | ``172.16.10.1`` |
+| | | (manual assignment) |
++------------------+-----------------------+---------------------------------+
+| ``cfg01`` | 1st | ``172.16.10.11`` |
++------------------+-----------------------+---------------------------------+
+| ``mas01`` | 2nd | ``172.16.10.12`` |
++------------------+-----------------------+---------------------------------+
+| ``prx`` | 3rd, | ``172.16.10.13``, |
+| | | |
+| ``prx01``, | 4th, | ``172.16.10.14``, |
+| ``prx02`` | 5th | ``172.16.10.15`` |
++------------------+-----------------------+---------------------------------+
+| ``gtw01``, | ... | ``...`` |
+| ``gtw02``, | | |
+| ``gtw03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``kvm``, | | |
+| | | |
+| ``kvm01``, | | |
+| ``kvm02``, | | |
+| ``kvm03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``dbs``, | | |
+| | | |
+| ``dbs01``, | | |
+| ``dbs02``, | | |
+| ``dbs03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``msg``, | | |
+| | | |
+| ``msg01``, | | |
+| ``msg02``, | | |
+| ``msg03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``mdb``, | | |
+| | | |
+| ``mdb01``, | | |
+| ``mdb02``, | | |
+| ``mdb03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``ctl``, | | |
+| | | |
+| ``ctl01``, | | |
+| ``ctl02``, | | |
+| ``ctl03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``odl``, | | |
+| | | |
+| ``odl01``, | | |
+| ``odl02``, | | |
+| ``odl03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``mon``, | | |
+| | | |
+| ``mon01``, | | |
+| ``mon02``, | | |
+| ``mon03``, | | |
+| | | |
+| ``log``, | | |
+| | | |
+| ``log01``, | | |
+| ``log02``, | | |
+| ``log03``, | | |
+| | | |
+| ``mtr``, | | |
+| | | |
+| ``mtr01``, | | |
+| ``mtr02``, | | |
+| ``mtr03`` | | |
++------------------+-----------------------+---------------------------------+
+| ``cmp001``, | | |
+| ``cmp002``, | | |
+| ``...`` | | |
++------------------+-----------------------+---------------------------------+
+
+Network ``internal``
+~~~~~~~~~~~~~~~~~~~~
+
+.. TIP::
+
+ ``internal`` does not usually use an IP range offset in ``IDF``.
+
+.. WARNING::
+
+ Default addresses in below table correspond to an ``internal`` CIDR of
+ ``10.1.0.0/24`` (the usual value used in OPNFV labs).
+ This is defined in ``IDF`` and can easily be changed to something else.
+
++------------------+------------------------+--------------------------------+
+| Host | Offset in IP range | Default address |
++==================+========================+================================+
+| ``jumpserver`` | N/A | ``10.1.0.1`` |
+| | | (manual assignment, optional) |
++------------------+------------------------+--------------------------------+
+| ``gtw01``, | 1st, | ``10.1.0.2``, |
+| ``gtw02``, | 2nd, | ``10.1.0.3``, |
+| ``gtw03`` | 3rd | ``10.1.0.4`` |
++------------------+------------------------+--------------------------------+
+| ``cmp001``, | 4th, | ``10.1.0.5``, |
+| ``cmp002``, | 5th, | ``10.1.0.6``, |
+| ``...`` | ... | ``...`` |
++------------------+------------------------+--------------------------------+
+
+Network ``public``
+~~~~~~~~~~~~~~~~~~
+
+.. TIP::
+
+ ``public`` often has an IP range offset defined in ``IDF``.
+
+.. WARNING::
+
+ Default addresses in below table correspond to a ``public`` IP range of
+ ``172.30.10.100-172.30.10.254`` (one of the used values in OPNFV
+ labs). This is defined in ``IDF`` and can easily be changed to something
+ else. Since the ``jumpserver`` address is manually assigned, this is
+ usually not subject to the IP range restriction in ``IDF``.
+
++------------------+------------------------+--------------------------------+
+| Host | Offset in IP range | Default address |
++==================+========================+================================+
+| ``jumpserver`` | N/A | ``172.30.10.72`` |
+| | | (manual assignment, optional) |
++------------------+------------------------+--------------------------------+
+| ``prx``, | 1st, | ``172.30.10.101``, |
+| | | |
+| ``prx01``, | 2nd, | ``172.30.10.102``, |
+| ``prx02`` | 3rd | ``172.30.10.103`` |
++------------------+------------------------+--------------------------------+
+| ``gtw01``, | 4th, | ``172.30.10.104``, |
+| ``gtw02``, | 5th, | ``172.30.10.105``, |
+| ``gtw03`` | 6th | ``172.30.10.106`` |
++------------------+------------------------+--------------------------------+
+| ``ctl01``, | ... | ``...`` |
+| ``ctl02``, | | |
+| ``ctl03`` | | |
++------------------+------------------------+--------------------------------+
+| ``odl``, | | |
++------------------+------------------------+--------------------------------+
+| ``cmp001``, | | |
+| ``cmp002``, | | |
+| ``...`` | | |
++------------------+------------------------+--------------------------------+
+
+Accessing the Salt Master Node (``cfg01``)
+==========================================
+
+The Salt Master node (``cfg01``) runs a ``sshd`` server listening on
+``0.0.0.0:22``.
+To login as ``ubuntu`` user, use the RSA private key ``/var/lib/opnfv/mcp.rsa``:
-These networks - except mcpcontrol - can be linux bridges configured before the deploy on the
-Jumpserver. If they don't exists at deploy time, they will be created by the scripts as virsh
-networks.
+.. code-block:: console
-Mcpcontrol exists only on the Jumpserver and needs to be virtual because a DHCP server runs
-on this network and associates static host entry IPs for Salt and Maas VMs.
+ jenkins@jumpserver:~$ ssh -o StrictHostKeyChecking=no \
+ -i /var/lib/opnfv/mcp.rsa \
+ -l ubuntu 10.20.0.2
+ ubuntu@cfg01:~$
+.. NOTE::
+ User ``ubuntu`` has sudo rights.
-===================
-Accessing the Cloud
-===================
+.. TIP::
-Access to any component of the deployed cloud is done from Jumpserver to user *ubuntu* with
-ssh key */var/lib/opnfv/mcp.rsa*. The example below is a connection to Salt master.
+ The Salt master IP (``10.20.0.2``) is not hard set, it is configurable via
+ ``INSTALLER_IP`` during deployment.
- .. code-block:: bash
+.. TIP::
- $ ssh -o StrictHostKeyChecking=no -i /var/lib/opnfv/mcp.rsa -l ubuntu 10.20.0.2
+ Starting with the ``Gambia`` release, ``cfg01`` is containerized, so this
+ also works (from ``jumpserver`` only):
-**Note**: The Salt master IP is not hard set, it is configurable via INSTALLER_IP during deployment
+.. code-block:: console
-Logging in to cluster nodes is possible from the Jumpserver and from Salt master. On the Salt master
-cluster hostnames can be used instead of IP addresses:
+ jenkins@jumpserver:~$ docker exec -it fuel bash
+ root@cfg01:~$
- .. code-block:: bash
+Accessing the MaaS Node (``mas01``)
+===================================
- $ sudo -i
- $ ssh -i mcp.rsa ubuntu@ctl01
+Starting with the ``Hunter`` release, the MaaS node (``mas01``) is
+containerized and no longer runs a ``sshd`` server. To access it (from
+``jumpserver`` only):
-User *ubuntu* has sudo rights.
+.. code-block:: console
+ jenkins@jumpserver:~$ docker exec -it maas bash
+ root@mas01:~$
-=============================
-Exploring the Cloud with Salt
-=============================
+Accessing Cluster Nodes
+=======================
-To gather information about the cloud, the salt commands can be used. It is based
-around a master-minion idea where the salt-master pushes config to the minions to
-execute actions.
+Logging in to cluster nodes is possible from the Jumpserver, Salt Master etc.
-For example tell salt to execute a ping to 8.8.8.8 on all the nodes.
+.. code-block:: console
-.. figure:: img/saltstack.png
+ jenkins@jumpserver:~$ ssh -i /var/lib/opnfv/mcp.rsa ubuntu@192.168.11.52
-Complex filters can be done to the target like compound queries or node roles.
-For more information about Salt see the :ref:`fuel_userguide_references` section.
+.. TIP::
-Some examples are listed below. Note that these commands are issued from Salt master
-as *root* user.
+ ``/etc/hosts`` on ``cfg01`` has all the cluster hostnames, which can be
+ used instead of IP addresses.
+ ``/root/.ssh/config`` on ``cfg01`` configures the default user and key:
+ ``ubuntu``, respectively ``/root/fuel/mcp/scripts/mcp.rsa``.
-#. View the IPs of all the components
+.. code-block:: console
- .. code-block:: bash
-
- root@cfg01:~$ salt "*" network.ip_addrs
- cfg01.mcp-pike-odl-ha.local:
- - 10.20.0.2
- - 172.16.10.100
- mas01.mcp-pike-odl-ha.local:
- - 10.20.0.3
- - 172.16.10.3
- - 192.168.11.3
- .........................
+ root@cfg01:~$ ssh ctl01
+Debugging ``MaaS`` Comissioning/Deployment Issues
+=================================================
-#. View the interfaces of all the components and put the output in a file with yaml format
+One of the most common issues when setting up a new POD is ``MaaS`` failing to
+commission/deploy the nodes, usually timing out after a couple of retries.
- .. code-block:: bash
+Such failures might indicate misconfiguration in ``PDF``/``IDF``, ``TOR``
+switch configuration or even faulty hardware.
- root@cfg01:~$ salt "*" network.interfaces --out yaml --output-file interfaces.yaml
- root@cfg01:~# cat interfaces.yaml
- cfg01.mcp-pike-odl-ha.local:
- enp1s0:
- hwaddr: 52:54:00:72:77:12
- inet:
- - address: 10.20.0.2
- broadcast: 10.20.0.255
- label: enp1s0
- netmask: 255.255.255.0
- inet6:
- - address: fe80::5054:ff:fe72:7712
- prefixlen: '64'
- scope: link
- up: true
- .........................
+Here are a couple of pointers for isolating the problem.
+Accessing the ``MaaS`` Dashboard
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#. View installed packages in MaaS node
+``MaaS`` web-based dashboard is available at
+``http://<jumpserver IP address>:5240/MAAS``.
- .. code-block:: bash
+The administrator credentials are ``opnfv``/``opnfv_secret``.
- root@cfg01:~# salt "mas*" pkg.list_pkgs
- mas01.mcp-pike-odl-ha.local:
- ----------
- accountsservice:
- 0.6.40-2ubuntu11.3
- acl:
- 2.2.52-3
- acpid:
- 1:2.0.26-1ubuntu2
- adduser:
- 3.113+nmu3ubuntu4
- anerd:
- 1
- .........................
+Ensure Commission/Deploy Timeouts Are Not Too Small
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Some hardware takes longer to boot or to run the initial scripts during
+commissioning/deployment phases. If that's the case, ``MaaS`` will time out
+waiting for the process to finish. ``MaaS`` logs will reflect that, and the
+issue is usually easy to observe on the nodes' serial console - if the node
+seems to PXE-boot the OS live image, starts executing cloud-init/curtin
+hooks without spilling critical errors, then it is powered down/shut off,
+most likely the timeout was hit.
-#. Execute any linux command on all nodes (list the content of */var/log* in this example)
+To access the serial console of a node, see your board manufacturer's
+documentation. Some hardware no longer has a physical serial connector these
+days, usually being replaced by a vendor-specific software-based interface.
- .. code-block:: bash
+If the board supports ``SOL`` (Serial Over LAN) over ``IPMI`` lanplus protocol,
+a simpler solution to hook to the serial console is to use ``ipmitool``.
- root@cfg01:~# salt "*" cmd.run 'ls /var/log'
- cfg01.mcp-pike-odl-ha.local:
- alternatives.log
- apt
- auth.log
- boot.log
- btmp
- cloud-init-output.log
- cloud-init.log
- .........................
+.. TIP::
+ Early boot stage output might not be shown over ``SOL``, but only over
+ the video console provided by the (vendor-specific) interface.
-#. Execute any linux command on nodes using compound queries filter
+.. code-block:: console
- .. code-block:: bash
+ jenkins@jumpserver:~$ ipmitool -H <host BMC IP> -U <user> -P <pass> \
+ -I lanplus sol activate
- root@cfg01:~# salt -C '* and cfg01*' cmd.run 'ls /var/log'
- cfg01.mcp-pike-odl-ha.local:
- alternatives.log
- apt
- auth.log
- boot.log
- btmp
- cloud-init-output.log
- cloud-init.log
- .........................
+To bypass this, simply set a larger timeout in the ``IDF``.
+Check Jumpserver Network Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-#. Execute any linux command on nodes using role filter
+.. code-block:: console
- .. code-block:: bash
+ jenkins@jumpserver:~$ brctl show
+ jenkins@jumpserver:~$ ifconfig -a
- root@cfg01:~# salt -I 'nova:compute' cmd.run 'ls /var/log'
- cmp001.mcp-pike-odl-ha.local:
- alternatives.log
- apache2
- apt
- auth.log
- btmp
- ceilometer
- cinder
- cloud-init-output.log
- cloud-init.log
- .........................
++-----------------------+------------------------------------------------+
+| Configuration item | Expected behavior |
++=======================+================================================+
+| IP addresses assigned | IP addresses should be assigned to the bridge, |
+| to bridge ports | and not to individual bridge ports |
++-----------------------+------------------------------------------------+
+Check Network Connectivity Between Nodes on the Jumpserver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+``cfg01`` is a Docker container running on the ``jumpserver``, connected to
+Docker networks (created by docker-compose automatically on container up),
+which in turn are connected using veth pairs to their ``libvirt`` managed
+counterparts (or manually created bridges).
-===================
-Accessing Openstack
-===================
+For example, the ``mgmt`` network(s) should look like below for a ``virtual``
+deployment.
-Once the deployment is complete, Openstack CLI is accessible from controller VMs (ctl01..03).
-Openstack credentials are at */root/keystonercv3*.
+.. code-block:: console
- .. code-block:: bash
+ jenkins@jumpserver:~$ brctl show mgmt
+ bridge name bridge id STP enabled interfaces
+ mgmt 8000.525400064f77 yes mgmt-nic
+ veth_mcp2
+ vnet8
- root@ctl01:~# source keystonercv3
- root@ctl01:~# openstack image list
- +--------------------------------------+-----------------------------------------------+--------+
- | ID | Name | Status |
- +======================================+===============================================+========+
- | 152930bf-5fd5-49c2-b3a1-cae14973f35f | CirrosImage | active |
- | 7b99a779-78e4-45f3-9905-64ae453e3dcb | Ubuntu16.04 | active |
- +--------------------------------------+-----------------------------------------------+--------+
+ jenkins@jumpserver:~$ docker network ls
+ NETWORK ID NAME DRIVER SCOPE
+ 81a0fdb3bd78 docker-compose_mgmt macvlan local
+ [...]
+ jenkins@jumpserver:~$ docker network inspect docker-compose_mgmt
+ [
+ {
+ "Name": "docker-compose_mgmt",
+ [...]
+ "Options": {
+ "parent": "veth_mcp3"
+ },
+ }
+ ]
-The OpenStack Dashboard, Horizon, is available at http://<proxy public VIP>
-The administrator credentials are *admin*/*opnfv_secret*.
+Before investigating the rest of the cluster networking configuration, the
+first thing to check is that ``cfg01`` has network connectivity to other
+jumpserver hosted nodes, e.g. ``mas01`` and to the jumpserver itself
+(provided that the jumpserver has an IP address in that particular network
+segment).
-.. figure:: img/horizon_login.png
+.. code-block:: console
+ jenkins@jumpserver:~$ docker exec -it fuel bash
+ root@cfg01:~# ifconfig -a | grep inet
+ inet addr:10.20.0.2 Bcast:0.0.0.0 Mask:255.255.255.0
+ inet addr:172.16.10.2 Bcast:0.0.0.0 Mask:255.255.255.0
+ inet addr:192.168.11.2 Bcast:0.0.0.0 Mask:255.255.255.0
-A full list of IPs/services is available at <proxy public VIP>:8090 for baremetal deploys.
+For each network of interest (``mgmt``, ``PXE/admin``), check
+that ``cfg01`` can ping the jumpserver IP in that network segment.
-.. figure:: img/salt_services_ip.png
+.. NOTE::
-==============================
-Guest Operating System Support
-==============================
+ ``mcpcontrol`` is set up at container bringup, so it should always be
+ available, while the other networks are configured by Salt as part of the
+ ``virtual_init`` STATE file.
-There are a number of possibilities regarding the guest operating systems which can be spawned
-on the nodes. The current system spawns virtual machines for VCP VMs on the KVM nodes and VMs
-requested by users in OpenStack compute nodes. Currently the system supports the following
-UEFI-images for the guests:
-
-+------------------+-------------------+------------------+
-| OS name | x86_64 status | aarch64 status |
-+==================+===================+==================+
-| Ubuntu 17.10 | untested | Full support |
-+------------------+-------------------+------------------+
-| Ubuntu 16.04 | Full support | Full support |
-+------------------+-------------------+------------------+
-| Ubuntu 14.04 | untested | Full support |
-+------------------+-------------------+------------------+
-| Fedora atomic 27 | untested | Full support |
-+------------------+-------------------+------------------+
-| Fedora cloud 27 | untested | Full support |
-+------------------+-------------------+------------------+
-| Debian | untested | Full support |
-+------------------+-------------------+------------------+
-| Centos 7 | untested | Not supported |
-+------------------+-------------------+------------------+
-| Cirros 0.3.5 | Full support | Full support |
-+------------------+-------------------+------------------+
-| Cirros 0.4.0 | Full support | Full support |
-+------------------+-------------------+------------------+
-
-
-The above table covers only UEFI image and implies OVMF/AAVMF firmware on the host. An x86 deployment
-also supports non-UEFI images, however that choice is up to the underlying hardware and the administrator
-to make.
-
-The images for the above operating systems can be found in their respective websites.
+.. code-block:: console
+ root@cfg01:~# ping -c1 10.20.0.1 # mcpcontrol jumpserver IP
+ root@cfg01:~# ping -c1 10.20.0.3 # mcpcontrol mas01 IP
-=================
-OpenStack Storage
-=================
+.. TIP::
-OpenStack Cinder is the project behind block storage in OpenStack and Fuel@OPNFV supports LVM out of the box.
-By default x86 supports 2 additional block storage devices and ARMBand supports only one.
-More devices can be supported if the OS-image created has additional properties allowing block storage devices
-to be spawned as SCSI drives. To do this, add the properties below to the server:
+ ``mcpcontrol`` CIDR is configurable via ``INSTALLER_IP`` env var during
+ deployment. However, IP offsets inside that segment are hard set to ``.1``
+ for the jumpserver, ``.2`` for ``cfg01``, respectively to ``.3`` for
+ ``mas01`` node.
- .. code-block:: bash
+.. code-block:: console
- openstack image set --property hw_disk_bus='scsi' --property hw_scsi_model='virtio-scsi' <image>
+ root@cfg01:~# salt 'mas*' pillar.item --out yaml \
+ _param:infra_maas_node01_deploy_address \
+ _param:infra_maas_node01_address
+ mas01.mcp-ovs-noha.local:
+ _param:infra_maas_node01_address: 172.16.10.12
+ _param:infra_maas_node01_deploy_address: 192.168.11.3
-The choice regarding which bus to use for the storage drives is an important one. Virtio-blk is the default
-choice for Fuel@OPNFV which attaches the drives in /dev/vdX. However, since we want to be able to attach a
-larger number of volumes to the virtual machines, we recommend the switch to SCSI drives which are attached
-in /dev/sdX instead. Virtio-scsi is a little worse in terms of performance but the ability to add a larger
-number of drives combined with added features like ZFS, Ceph et al, leads us to suggest the use of virtio-scsi in Fuel@OPNFV for both architectures.
+ root@cfg01:~# ping -c1 192.168.11.1 # PXE/admin jumpserver IP
+ root@cfg01:~# ping -c1 192.168.11.3 # PXE/admin mas01 IP
+ root@cfg01:~# ping -c1 172.16.10.1 # mgmt jumpserver IP
+ root@cfg01:~# ping -c1 172.16.10.12 # mgmt mas01 IP
-More details regarding the differences and performance of virtio-blk vs virtio-scsi are beyond the scope
-of this manual but can be easily found in other sources online like `4`_ or `5`_.
+.. TIP::
-.. _4: https://mpolednik.github.io/2017/01/23/virtio-blk-vs-virtio-scsi/
+ Jumpserver IP addresses for ``PXE/admin``, ``mgmt`` and ``public`` bridges
+ are user-chosen and manually set, so above snippets should be adjusted
+ accordingly if the user chose a different IP, other than ``.1`` in each
+ CIDR.
-.. _5 : https://www.ovirt.org/develop/release-management/features/storage/virtio-scsi/
+Alternatively, a quick ``nmap`` scan would work just as well.
-Additional configuration for configuring images in openstack can be found in the OpenStack Glance documentation.
+.. code-block:: console
+ root@cfg01:~# apt update && apt install -y nmap
+ root@cfg01:~# nmap -sn 10.20.0.0/24 # expected: cfg01, mas01, jumpserver
+ root@cfg01:~# nmap -sn 192.168.11.0/24 # expected: cfg01, mas01, jumpserver
+ root@cfg01:~# nmap -sn 172.16.10.0/24 # expected: cfg01, mas01, jumpserver
+Check ``DHCP`` Reaches Cluster Nodes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-===================
-Openstack Endpoints
-===================
+One common symptom observed during failed commissioning is that ``DHCP`` does
+not work as expected between cluster nodes (baremetal nodes in the cluster; or
+virtual machines on the jumpserver in case of ``hybrid`` deployments) and
+the ``MaaS`` node.
-For each Openstack service three endpoints are created: admin, internal and public.
+To confirm or rule out this possibility, monitor the serial console output of
+one (or more) cluster nodes during ``MaaS`` commissioning. If the node is
+properly configured to attempt PXE boot, yet it times out waiting for an IP
+address from ``mas01`` ``DHCP``, it's worth checking that ``DHCP`` packets
+reach the ``jumpserver``, respectively the ``mas01`` container.
- .. code-block:: bash
+.. code-block:: console
- ubuntu@ctl01:~$ openstack endpoint list --service keystone
- +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
- | ID | Region | Service Name | Service Type | Enabled | Interface | URL |
- +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
- | 008fec57922b4e9e8bf02c770039ae77 | RegionOne | keystone | identity | True | internal | http://172.16.10.26:5000/v3 |
- | 1a1f3c3340484bda9ef7e193f50599e6 | RegionOne | keystone | identity | True | admin | http://172.16.10.26:35357/v3 |
- | b0a47d42d0b6491b995d7e6230395de8 | RegionOne | keystone | identity | True | public | https://10.0.15.2:5000/v3 |
- +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
+ jenkins@jumpserver:~$ sudo apt update && sudo apt install -y dhcpdump
+ jenkins@jumpserver:~$ sudo dhcpdump -i admin_br
-MCP sets up all Openstack services to talk to each other over unencrypted
-connections on the internal management network. All admin/internal endpoints use
-plain http, while the public endpoints are https connections terminated via nginx
-at the VCP proxy VMs.
+.. TIP::
-To access the public endpoints an SSL certificate has to be provided. For
-convenience, the installation script will copy the required certificate into
-to the cfg01 node at /etc/ssl/certs/os_cacert.
+ If ``DHCP`` requests are present, but no replies are sent, ``iptables``
+ might be interfering on the jumpserver.
+
+Check ``MaaS`` Logs
+~~~~~~~~~~~~~~~~~~~
-Copy the certificate from the cfg01 node to the client that will access the https
-endpoints and place it under /etc/ssl/certs. The SSL connection will be established
-automatically after.
+If networking looks fine, yet nodes still fail to commission and/or deploy,
+``MaaS`` logs might offer more details about the failure:
- .. code-block:: bash
+* ``/var/log/maas/maas.log``
+* ``/var/log/maas/rackd.log``
+* ``/var/log/maas/regiond.log``
- $ ssh -o StrictHostKeyChecking=no -i /var/lib/opnfv/mcp.rsa -l ubuntu 10.20.0.2 \
- "cat /etc/ssl/certs/os_cacert" | sudo tee /etc/ssl/certs/os_cacert
+.. TIP::
+ If the problem is with the cluster node and not on the ``MaaS`` server,
+ node's kernel logs usually contain useful information.
+ These are saved via rsyslog on the ``mas01`` node in
+ ``/var/log/maas/rsyslog``.
+Recovering Failed Deployments
=============================
-Reclass model viewer tutorial
+
+The first deploy attempt might fail due to various reasons. If the problem
+is not systemic (i.e. fixing it will not introduce incompatible configuration
+changes, like setting a different ``INSTALLER_IP``), the environment is safe
+to be reused and the deployment process can pick up from where it left off.
+
+Leveraging these mechanisms requires a minimum understanding of how the
+deploy process works, at least for manual ``STATE`` runs.
+
+Automatic (re)deploy
+~~~~~~~~~~~~~~~~~~~~
+
+OPNFV Fuel's ``deploy.sh`` script offers a dedicated argument for this, ``-f``,
+which will skip executing the first ``N`` ``STATE`` files, where ``N`` is the
+number of ``-f`` occurrences in the argument list.
+
+.. TIP::
+
+ The list of ``STATE`` files to be executed for a specific environment
+ depends on the OPNFV scenario chosen, deployment type (``virtual``,
+ ``baremetal`` or ``hybrid``) and the presence/absence of a ``VCP``
+ (virtualized control plane).
+
+e.g.: Let's consider a ``baremetal`` enviroment, with ``VCP`` and a simple
+scenario ``os-nosdn-nofeature-ha``, where ``deploy.sh`` failed executing the
+``openstack_ha`` ``STATE`` file.
+
+The simplest redeploy approach (which usually works for **any** combination of
+deployment type/VCP/scenario) is to issue the same deploy command as the
+original attempt used, then adding a single ``-f``:
+
+.. code-block:: console
+
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l <lab_name> -p <pod_name> \
+ -s <scenario> [...] \
+ -f # skips running the virtual_init STATE file
+
+All ``STATE`` files are re-entrant, so the above is equivalent (but a little
+slower) to skipping all ``STATE`` files before the ``openstack_ha`` one, like:
+
+.. code-block:: console
+
+ jenkins@jumpserver:~/fuel$ ci/deploy.sh -l <lab_name> -p <pod_name> \
+ -s <scenario> [...] \
+ -ffff # skips virtual_init, maas, baremetal_init, virtual_control_plane
+
+.. TIP::
+
+ For fine tuning the infrastructure setup steps executed during deployment,
+ see also the ``-e`` and ``-P`` deploy arguments.
+
+.. NOTE::
+
+ On rare occassions, the cluster cannot idempotently be redeployed (e.g.
+ broken MySQL/Galera cluster), in which case some cleanup is due before
+ (re)running the ``STATE`` files. See ``-E`` deploy arg, which allows
+ either forcing a ``MaaS`` node deletion, then redeployment of all
+ baremetal nodes, if used twice (``-EE``); or only erasing the ``VCP`` VMs
+ if used only once (``-E``).
+
+Manual ``STATE`` Run
+~~~~~~~~~~~~~~~~~~~~
+
+Instead of leveraging the full ``deploy.sh``, one could execute the ``STATE``
+files one by one (or partially) from the ``cfg01``.
+
+However, this requires a better understanding of how the list of ``STATE``
+files to be executed is constructed for a specific scenario, depending on the
+deployment type and the cluster having baremetal nodes, implemented in:
+
+* ``mcp/config/scenario/defaults.yaml.j2``
+* ``mcp/config/scenario/<scenario-name>.yaml``
+
+e.g.: For the example presented above (baremetal with ``VCP``,
+``os-nosdn-nofeature-ha``), the list of ``STATE`` files would be:
+
+* ``virtual_init``
+* ``maas``
+* ``baremetal_init``
+* ``virtual_control_plane``
+* ``openstack_ha``
+* ``networks``
+
+To execute one (or more) of the remaining ``STATE`` files after a failure:
+
+.. code-block:: console
+
+ jenkins@jumpserver:~$ docker exec -it fuel bash
+ root@cfg01:~$ cd ~/fuel/mcp/config/states
+ root@cfg01:~/fuel/mcp/config/states$ ./openstack_ha
+ root@cfg01:~/fuel/mcp/config/states$ CI_DEBUG=true ./networks
+
+For even finer granularity, one can also run the commands in a ``STATE`` file
+one by one manually, e.g. if the execution failed applying the ``rabbitmq``
+sls:
+
+.. code-block:: console
+
+ root@cfg01:~$ salt -I 'rabbitmq:server' state.sls rabbitmq
+
+Exploring the Cloud with Salt
=============================
+To gather information about the cloud, the salt commands can be used.
+It is based around a master-minion idea where the salt-master pushes config to
+the minions to execute actions.
+
+For example tell salt to execute a ping to ``8.8.8.8`` on all the nodes.
+
+.. code-block:: console
+
+ root@cfg01:~$ salt "*" network.ping 8.8.8.8
+ ^^^ target
+ ^^^^^^^^^^^^ function to execute
+ ^^^^^^^ argument passed to the function
+
+.. TIP::
+
+ Complex filters can be done to the target like compound queries or node roles.
+
+For more information about Salt see the :ref:`fuel_userguide_references`
+section.
+
+Some examples are listed below. Note that these commands are issued from Salt
+master as ``root`` user.
+
+View the IPs of All the Components
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~$ salt "*" network.ip_addrs
+ cfg01.mcp-odl-ha.local:
+ - 10.20.0.2
+ - 172.16.10.100
+ mas01.mcp-odl-ha.local:
+ - 10.20.0.3
+ - 172.16.10.3
+ - 192.168.11.3
+ .........................
+
+View the Interfaces of All the Components and Put the Output in a ``yaml`` File
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~$ salt "*" network.interfaces --out yaml --output-file interfaces.yaml
+ root@cfg01:~# cat interfaces.yaml
+ cfg01.mcp-odl-ha.local:
+ enp1s0:
+ hwaddr: 52:54:00:72:77:12
+ inet:
+ - address: 10.20.0.2
+ broadcast: 10.20.0.255
+ label: enp1s0
+ netmask: 255.255.255.0
+ inet6:
+ - address: fe80::5054:ff:fe72:7712
+ prefixlen: '64'
+ scope: link
+ up: true
+ .........................
+
+View Installed Packages on MaaS Node
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~# salt "mas*" pkg.list_pkgs
+ mas01.mcp-odl-ha.local:
+ ----------
+ accountsservice:
+ 0.6.40-2ubuntu11.3
+ acl:
+ 2.2.52-3
+ acpid:
+ 1:2.0.26-1ubuntu2
+ adduser:
+ 3.113+nmu3ubuntu4
+ anerd:
+ 1
+ .........................
+
+Execute Any Linux Command on All Nodes (e.g. ``ls /var/log``)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~# salt "*" cmd.run 'ls /var/log'
+ cfg01.mcp-odl-ha.local:
+ alternatives.log
+ apt
+ auth.log
+ boot.log
+ btmp
+ cloud-init-output.log
+ cloud-init.log
+ .........................
+
+Execute Any Linux Command on Nodes Using Compound Queries Filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~# salt -C '* and cfg01*' cmd.run 'ls /var/log'
+ cfg01.mcp-odl-ha.local:
+ alternatives.log
+ apt
+ auth.log
+ boot.log
+ btmp
+ cloud-init-output.log
+ cloud-init.log
+ .........................
+
+Execute Any Linux Command on Nodes Using Role Filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: console
+
+ root@cfg01:~# salt -I 'nova:compute' cmd.run 'ls /var/log'
+ cmp001.mcp-odl-ha.local:
+ alternatives.log
+ apache2
+ apt
+ auth.log
+ btmp
+ ceilometer
+ cinder
+ cloud-init-output.log
+ cloud-init.log
+ .........................
-In order to get a better understanding on the reclass model Fuel uses, the `reclass-doc
-<https://github.com/jirihybek/reclass-doc>`_ can be used to visualise the reclass model.
-A simplified installation can be done with the use of a docker ubuntu container. This
-approach will avoid installing packages on the host, which might collide with other packages.
-After the installation is done, a webbrowser on the host can be used to view the results.
+Accessing Openstack
+===================
-**NOTE**: The host can be any device with Docker package already installed.
- The user which runs the docker needs to have root priviledges.
+Once the deployment is complete, Openstack CLI is accessible from controller
+VMs (``ctl01`` ... ``ctl03``).
+Openstack credentials are at ``/root/keystonercv3``.
-**Instructions**
+.. code-block:: console
+ root@ctl01:~# source keystonercv3
+ root@ctl01:~# openstack image list
+ +--------------------------------------+-----------------------------------------------+--------+
+ | ID | Name | Status |
+ +======================================+===============================================+========+
+ | 152930bf-5fd5-49c2-b3a1-cae14973f35f | CirrosImage | active |
+ | 7b99a779-78e4-45f3-9905-64ae453e3dcb | Ubuntu16.04 | active |
+ +--------------------------------------+-----------------------------------------------+--------+
-#. Create a new directory at any location
+The OpenStack Dashboard, Horizon, is available at ``http://<proxy public VIP>``.
+The administrator credentials are ``admin``/``opnfv_secret``.
- .. code-block:: bash
+.. figure:: img/horizon_login.png
+ :width: 60%
+ :align: center
- $ mkdir -p modeler
+A full list of IPs/services is available at ``<proxy public VIP>:8090`` for
+``baremetal`` deploys.
+.. figure:: img/salt_services_ip.png
+ :width: 60%
+ :align: center
-#. Place fuel repo in the above directory
+Guest Operating System Support
+==============================
- .. code-block:: bash
+There are a number of possibilities regarding the guest operating systems
+which can be spawned on the nodes.
+The current system spawns virtual machines for VCP VMs on the KVM nodes and VMs
+requested by users in OpenStack compute nodes. Currently the system supports
+the following ``UEFI``-images for the guests:
+
++------------------+-------------------+--------------------+
+| OS name | ``x86_64`` status | ``aarch64`` status |
++==================+===================+====================+
+| Ubuntu 17.10 | untested | Full support |
++------------------+-------------------+--------------------+
+| Ubuntu 16.04 | Full support | Full support |
++------------------+-------------------+--------------------+
+| Ubuntu 14.04 | untested | Full support |
++------------------+-------------------+--------------------+
+| Fedora atomic 27 | untested | Full support |
++------------------+-------------------+--------------------+
+| Fedora cloud 27 | untested | Full support |
++------------------+-------------------+--------------------+
+| Debian | untested | Full support |
++------------------+-------------------+--------------------+
+| Centos 7 | untested | Not supported |
++------------------+-------------------+--------------------+
+| Cirros 0.3.5 | Full support | Full support |
++------------------+-------------------+--------------------+
+| Cirros 0.4.0 | Full support | Full support |
++------------------+-------------------+--------------------+
+
+The above table covers only ``UEFI`` images and implies ``OVMF``/``AAVMF``
+firmware on the host. An ``x86_64`` deployment also supports ``non-UEFI``
+images, however that choice is up to the underlying hardware and the
+administrator to make.
+
+The images for the above operating systems can be found in their respective
+websites.
- $ cd modeler
- $ git clone https://gerrit.opnfv.org/gerrit/fuel && cd fuel
+OpenStack Storage
+=================
+
+OpenStack Cinder is the project behind block storage in OpenStack and OPNFV
+Fuel supports LVM out of the box.
+
+By default ``x86_64`` supports 2 additional block storage devices, while
+``aarch64`` supports only one.
+
+More devices can be supported if the OS-image created has additional
+properties allowing block storage devices to be spawned as ``SCSI`` drives.
+To do this, add the properties below to the server:
+
+.. code-block:: console
+
+ root@ctl01:~$ openstack image set --property hw_disk_bus='scsi' \
+ --property hw_scsi_model='virtio-scsi' \
+ <image>
+
+The choice regarding which bus to use for the storage drives is an important
+one. ``virtio-blk`` is the default choice for OPNFV Fuel, which attaches the
+drives in ``/dev/vdX``. However, since we want to be able to attach a
+larger number of volumes to the virtual machines, we recommend the switch to
+``SCSI`` drives which are attached in ``/dev/sdX`` instead.
+
+``virtio-scsi`` is a little worse in terms of performance but the ability to
+add a larger number of drives combined with added features like ZFS, Ceph et
+al, leads us to suggest the use of ``virtio-scsi`` in OPNFV Fuel for both
+architectures.
+
+More details regarding the differences and performance of ``virtio-blk`` vs
+``virtio-scsi`` are beyond the scope of this manual but can be easily found
+in other sources online like `VirtIO SCSI`_ or `VirtIO performance`_.
+Additional configuration for configuring images in OpenStack can be found in
+the OpenStack Glance documentation.
-#. Create a container and mount the above host directory
+OpenStack Endpoints
+===================
+
+For each OpenStack service three endpoints are created: ``admin``, ``internal``
+and ``public``.
+
+.. code-block:: console
+
+ ubuntu@ctl01:~$ openstack endpoint list --service keystone
+ +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
+ | ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+ +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
+ | 008fec57922b4e9e8bf02c770039ae77 | RegionOne | keystone | identity | True | internal | http://172.16.10.26:5000/v3 |
+ | 1a1f3c3340484bda9ef7e193f50599e6 | RegionOne | keystone | identity | True | admin | http://172.16.10.26:35357/v3 |
+ | b0a47d42d0b6491b995d7e6230395de8 | RegionOne | keystone | identity | True | public | https://10.0.15.2:5000/v3 |
+ +----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
+
+MCP sets up all Openstack services to talk to each other over unencrypted
+connections on the internal management network. All admin/internal endpoints
+use plain http, while the public endpoints are https connections terminated
+via nginx at the ``VCP`` proxy VMs.
- .. code-block:: bash
+To access the public endpoints an SSL certificate has to be provided. For
+convenience, the installation script will copy the required certificate
+to the ``cfg01`` node at ``/etc/ssl/certs/os_cacert``.
- $ docker run --privileged -it -v <absolute_path>/modeler:/host ubuntu bash
+Copy the certificate from the ``cfg01`` node to the client that will access
+the https endpoints and place it under ``/etc/ssl/certs/``.
+The SSL connection will be established automatically after.
+.. code-block:: console
-#. Install all the required packages inside the container.
+ jenkins@jumpserver:~$ ssh -o StrictHostKeyChecking=no -i /var/lib/opnfv/mcp.rsa -l ubuntu 10.20.0.2 \
+ "cat /etc/ssl/certs/os_cacert" | sudo tee /etc/ssl/certs/os_cacert
- .. code-block:: bash
+Reclass Model Viewer Tutorial
+=============================
- $ apt-get update
- $ apt-get install -y npm nodejs
- $ npm install -g reclass-doc
- $ cd /host/fuel/mcp/reclass
- $ ln -s /usr/bin/nodejs /usr/bin/node
- $ reclass-doc --output /host /host/fuel/mcp/reclass
+In order to get a better understanding of the ``reclass`` model Fuel uses, the
+`reclass-doc`_ tool can be used to visualise the ``reclass`` model.
+To avoid installing packages on the ``jumpserver`` or another host, the
+``cfg01`` Docker container can be used. Since the ``fuel`` git repository
+located on the ``jumpserver`` is already mounted inside ``cfg01`` container,
+the results can be visualized using a web browser on the ``jumpserver`` at the
+end of the procedure.
-#. View the results from the host by using a browser. The file to open should be now at modeler/index.html
+.. code-block:: console
- .. figure:: img/reclass_doc.png
+ jenkins@jumpserver:~$ docker exec -it fuel bash
+ root@cfg01:~$ apt-get update
+ root@cfg01:~$ apt-get install -y npm nodejs
+ root@cfg01:~$ npm install -g reclass-doc
+ root@cfg01:~$ ln -s /usr/bin/nodejs /usr/bin/node
+ root@cfg01:~$ reclass-doc --output ~/fuel/mcp/reclass/modeler \
+ ~/fuel/mcp/reclass
+The generated documentation should be available on the ``jumpserver`` inside
+``fuel`` git repo subpath ``mcp/reclass/modeler/index.html``.
+
+.. figure:: img/reclass_doc.png
+ :width: 60%
+ :align: center
.. _fuel_userguide_references:
-==========
References
==========
-1) :ref:`fuel-release-installation-label`
-2) `Saltstack Documentation <https://docs.saltstack.com/en/latest/topics>`_
-3) `Saltstack Formulas <http://salt-formulas.readthedocs.io/en/latest/>`_
-4) `Virtio performance <https://mpolednik.github.io/2017/01/23/virtio-blk-vs-virtio-scsi/>`_
-5) `Virtio SCSI <https://www.ovirt.org/develop/release-management/features/storage/virtio-scsi/>`_
+#. :ref:`OPNFV Fuel Installation Instruction <fuel-installation>`
+#. `Saltstack Documentation`_
+#. `Saltstack Formulas`_
+#. `VirtIO performance`_
+#. `VirtIO SCSI`_
+
+.. _`Saltstack Documentation`: https://docs.saltstack.com/en/latest/topics/
+.. _`Saltstack Formulas`: https://salt-formulas.readthedocs.io/en/latest/
+.. _`VirtIO performance`: https://mpolednik.github.io/2017/01/23/virtio-blk-vs-virtio-scsi/
+.. _`VirtIO SCSI`: https://www.ovirt.org/develop/release-management/features/storage/virtio-scsi/
+.. _`reclass-doc`: https://github.com/jirihybek/reclass-doc
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 000000000..208831f0f
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+lfdocs-conf
+sphinx_opnfv_theme
+# Uncomment the following line if your project uses Sphinx to document
+# HTTP APIs
+# sphinxcontrib-httpdomain
diff --git a/mcp/config/labs/local/idf-pod1.yaml b/mcp/config/labs/local/idf-pod1.yaml
deleted file mode 100644
index b916707a1..000000000
--- a/mcp/config/labs/local/idf-pod1.yaml
+++ /dev/null
@@ -1,79 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Linux Foundation, Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-### LF POD 2 installer descriptor file ###
-
-idf:
- version: 0.1
- net_config:
- # NOTE: Network names are likely to change after the PDF spec is updated
- oob:
- interface: 0
- ip-range: 172.30.8.65-172.30.8.75
- vlan: 410
- admin:
- interface: 0
- vlan: native
- network: 192.168.11.0 # Untagged, 'PXE/Admin' on wiki, different IP
- mask: 24
- mgmt:
- interface: 0
- vlan: 300
- network: 10.167.4.0 # Tagged, 'vlan 300' on wiki
- ip-range: 10.167.4.10-10.167.4.254 # Some IPs are in use by lab infra
- mask: 24
- storage:
- interface: 3
- vlan: 301
- network: 10.2.0.0 # Tagged, not the same with 'storage' on wiki
- mask: 24
- private:
- interface: 1
- vlan: 1000
- network: 10.1.0.0 # Tagged, not the same with 'private' on wiki
- mask: 24
- public:
- interface: 2
- vlan: native
- network: 172.30.10.0 # Untagged, 'public' on wiki
- ip-range: 172.30.10.100-172.30.10.254 # Some IPs are in use by lab infra
- mask: 24
- gateway: 172.30.10.1
- dns:
- - 8.8.8.8
- - 8.8.4.4
- fuel:
- jumphost:
- bridges:
- admin: 'pxebr'
- mgmt: 'br-ctl'
- private: ~
- public: ~
- network:
- node:
- # Ordered-list, index should be in sync with node index in PDF
- - interfaces: &interfaces
- # Ordered-list, index should be in sync with interface index in PDF
- - 'enp6s0'
- - 'enp7s0'
- - 'enp8s0'
- - 'enp9s0'
- busaddr: &busaddr
- # Bus-info reported by `ethtool -i ethX`
- - '0000:06:00.0'
- - '0000:07:00.0'
- - '0000:08:00.0'
- - '0000:09:00.0'
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
diff --git a/mcp/config/labs/local/idf-virtual1.yaml b/mcp/config/labs/local/idf-virtual1.yaml
deleted file mode 100644
index 402af9852..000000000
--- a/mcp/config/labs/local/idf-virtual1.yaml
+++ /dev/null
@@ -1,103 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-### Fuel@OPNFV sample VIRTUAL installer descriptor file ###
-
-idf:
- version: 0.0 # Intentionally invalid to indicate this is experimental
- net_config:
- # NOTE: Network names are likely to change after the PDF spec is updated
- oob:
- interface: 0
- ip-range: ~
- vlan: native
- # All networks (except OOB) are virtual networks managed by `libvirt`
- # Interface indexes are based on Fuel installer defaults
- admin:
- interface: 0 # when used, will be first vnet interface, untagged
- vlan: native
- network: 192.168.11.0
- mask: 24
- mgmt:
- interface: 1 # when used, will be second vnet interface, untagged
- vlan: native
- network: 172.16.10.0
- ip-range: 172.16.10.10-172.16.10.254 # Some IPs are in use by lab infra
- mask: 24
- storage:
- interface: 4 # when used, will be fifth vnet interface, untagged
- vlan: native
- network: 192.168.20.0
- mask: 24
- private:
- interface: 2 # when used, will be third vnet interface, untagged
- vlan: 1000-1999
- network: 10.1.0.0
- mask: 24
- public:
- interface: 3 # when used, will be fourth vnet interface, untagged
- vlan: native
- network: 10.16.0.0
- ip-range: 10.16.0.100-10.16.0.254 # Some IPs are in use by lab infra
- mask: 24
- gateway: 10.16.0.1
- dns:
- - 8.8.8.8
- - 8.8.4.4
- fuel:
- jumphost:
- bridges:
- admin: ~
- mgmt: ~
- private: ~
- public: ~
- network:
- ntp_strata_host1: 1.se.pool.ntp.org
- ntp_strata_host2: 0.se.pool.ntp.org
- node:
- # Ordered-list, index should be in sync with node index in PDF
- - interfaces: &interfaces
- # Ordered-list, index should be in sync with interface index in PDF
- - 'ens3'
- - 'ens4'
- - 'ens5'
- - 'ens6'
- busaddr: &busaddr
- # Bus-info reported by `ethtool -i ethX`
- - '0000:00:03.0'
- - '0000:00:04.0'
- - '0000:00:05.0'
- - '0000:00:06.0'
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
- - interfaces: *interfaces
- busaddr: *busaddr
- reclass:
- node:
- - compute_params: &compute_params
- common: &compute_params_common
- compute_hugepages_size: 2M
- compute_hugepages_count: 2048
- compute_hugepages_mount: /mnt/hugepages_2M
- dpdk:
- <<: *compute_params_common
- compute_dpdk_driver: uio
- compute_ovs_pmd_cpu_mask: "0x6"
- compute_ovs_dpdk_socket_mem: "1024"
- compute_ovs_dpdk_lcore_mask: "0x8"
- compute_ovs_memory_channels: "2"
- dpdk0_driver: igb_uio
- dpdk0_n_rxq: 2
- - compute_params: *compute_params
- - compute_params: *compute_params
- - compute_params: *compute_params
- - compute_params: *compute_params
diff --git a/mcp/config/labs/local/pod1.yaml b/mcp/config/labs/local/pod1.yaml
deleted file mode 100644
index 219b2a683..000000000
--- a/mcp/config/labs/local/pod1.yaml
+++ /dev/null
@@ -1,199 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Linux Foundation, Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-### LF POD 2 descriptor file ###
-
-version: 1.0
-details:
- pod_owner: Trevor Bramwell
- contact: tbramwell@linuxfoundation.org
- lab: LF Pharos Lab
- location: Portland
- type: production
- link: https://wiki.opnfv.org/display/pharos/LF+POD+2
-##############################################################################
-jumphost:
- name: pod2-jump
- node:
- type: baremetal
- vendor: Cisco Systems Inc
- model: UCSB-B200-M4
- arch: x86_64
- cpus: 2
- cpu_cflags: haswell
- cores: 8
- memory: 128G
- disks: &disks
- - name: 'disk1'
- disk_capacity: 2400G
- disk_type: hdd
- disk_interface: sas
- disk_rotation: 0
- os: centos-7
- remote_params: &remote_params
- type: ipmi
- versions:
- - 2.0
- user: admin
- pass: octopus
- remote_management:
- <<: *remote_params
- address: 172.30.8.83
- mac_address: "a8:9d:21:c9:c4:9e"
- interfaces:
- - mac_address: "00:25:b5:a0:00:1a"
- speed: 40gb
- features: 'dpdk|sriov'
- address: 192.168.11.1
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:1b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:1c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:1d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
-##############################################################################
-nodes:
- - name: pod2-node1
- node: &nodeparams
- type: baremetal
- vendor: Cisco Systems Inc
- model: UCSB-B200-M4
- arch: x86_64
- cpus: 2
- cpu_cflags: haswell
- cores: 8
- memory: 32G
- disks: *disks
- remote_management:
- <<: *remote_params
- address: 172.30.8.75
- mac_address: "a8:9d:21:c9:8b:56"
- interfaces:
- - mac_address: "00:25:b5:a0:00:2a"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:2b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:2c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:2d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
- ############################################################################
- - name: pod2-node2
- node: *nodeparams
- disks: *disks
- remote_management:
- <<: *remote_params
- address: 172.30.8.65
- mac_address: "a8:9d:21:c9:4d:26"
- interfaces:
- - mac_address: "00:25:b5:a0:00:3a"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:3b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:3c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:3d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
- ############################################################################
- - name: pod2-node3
- node: *nodeparams
- disks: *disks
- remote_management:
- <<: *remote_params
- address: 172.30.8.74
- mac_address: "a8:9d:21:c9:3a:92"
- interfaces:
- - mac_address: "00:25:b5:a0:00:4a"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:4b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:4c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:4d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
- ############################################################################
- - name: pod2-node4
- node: *nodeparams
- disks: *disks
- remote_management:
- <<: *remote_params
- address: 172.30.8.73
- mac_address: "74:a2:e6:a4:14:9c"
- interfaces:
- - mac_address: "00:25:b5:a0:00:5a"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:5b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:5c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:5d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
- ############################################################################
- - name: pod2-node5
- node: *nodeparams
- disks: *disks
- remote_management:
- <<: *remote_params
- address: 172.30.8.72
- mac_address: "a8:9d:21:a0:15:9c"
- interfaces:
- - mac_address: "00:25:b5:a0:00:6a"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic1'
- - mac_address: "00:25:b5:a0:00:6b"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic2'
- - mac_address: "00:25:b5:a0:00:6c"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic3'
- - mac_address: "00:25:b5:a0:00:6d"
- speed: 40gb
- features: 'dpdk|sriov'
- name: 'nic4'
diff --git a/mcp/config/labs/local/virtual1.yaml b/mcp/config/labs/local/virtual1.yaml
deleted file mode 100644
index b293b9775..000000000
--- a/mcp/config/labs/local/virtual1.yaml
+++ /dev/null
@@ -1,127 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-### Fuel@OPNFV sample VIRTUAL POD descriptor file ###
-### NOTE: This is subject to change as vPDF is not yet officialy supported ###
-
-version: 0.0 # Intentionally invalid to indicate this is experimental
-details:
- pod_owner: Fuel@OPNFV
- contact: Fuel@OPNFV
- lab: Example Lab
- location: Example Location
- type: development
- link: https://wiki.opnfv.org/display/pharos/
-##############################################################################
-jumphost:
- name: virtual1-jump
- node:
- type: baremetal
- vendor: HP
- model: ProLiant BL460c Gen8
- arch: x86_64
- cpus: 2
- cpu_cflags: ivybridge
- cores: 10
- memory: 64G
- disks:
- - name: 'disk1'
- disk_capacity: 800G
- disk_type: hdd
- disk_interface: scsi
- disk_rotation: 15000
- os: ubuntu-16.04
- remote_management:
- type: ipmi
- versions:
- - 1.0
- - 2.0
- user: changeme
- pass: changeme
- address: 0.0.0.0
- mac_address: "00:00:00:00:00:00"
- interfaces:
- - name: 'nic1'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00"
- vlan: native
- - name: 'nic2'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00"
- vlan: native
-##############################################################################
-nodes:
- - name: node-1 # noha ctl01 or ha (novcp) kvm01
- node: &nodeparams
- # Fuel overrides certain params (e.g. cpus, mem) based on node role later
- type: virtual
- vendor: libvirt
- model: virt
- arch: x86_64
- cpus: 1
- cpu_cflags: ivybridge
- cores: 8
- memory: 6G
- disks: &disks
- - name: 'disk1'
- disk_capacity: 100G
- disk_type: hdd
- disk_interface: scsi # virtio-scsi
- disk_rotation: 15000
- remote_management: &remotemgmt
- type: libvirt
- user: changeme
- pass: changeme
- address: 127.0.0.1 # Not used currently, will be 'qemu:///system' later
- interfaces: &interfaces
- - name: 'nic1'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00" # MACs will be assigned by libvirt
- vlan: native
- - name: 'nic2'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00"
- vlan: native
- - name: 'nic3'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00"
- vlan: native
- - name: 'nic4'
- speed: 10gb
- features: 'dpdk|sriov'
- mac_address: "00:00:00:00:00:00"
- vlan: native
- ############################################################################
- - name: node-2 # noha gtw01 or ha (novcp) kvm02
- node: *nodeparams
- disks: *disks
- remote_management: *remotemgmt
- interfaces: *interfaces
- ############################################################################
- - name: node-3 # noha odl01 / unused or ha (novcp) kvm02
- node: *nodeparams
- disks: *disks
- remote_management: *remotemgmt
- interfaces: *interfaces
- ############################################################################
- - name: node-4 # cmp001
- node: *nodeparams
- disks: *disks
- remote_management: *remotemgmt
- interfaces: *interfaces
- ############################################################################
- - name: node-5 # cmp002
- node: *nodeparams
- disks: *disks
- remote_management: *remotemgmt
- interfaces: *interfaces
diff --git a/mcp/config/scenario/.gitignore b/mcp/config/scenario/.gitignore
index 46c7f92c8..0e5ba3d09 100644
--- a/mcp/config/scenario/.gitignore
+++ b/mcp/config/scenario/.gitignore
@@ -1 +1,2 @@
-*-ha.yaml
+defaults.yaml
+os-nosdn-nofeature-noha.yaml
diff --git a/mcp/config/scenario/README.rst b/mcp/config/scenario/README.rst
index e03182f11..0a5391637 100644
--- a/mcp/config/scenario/README.rst
+++ b/mcp/config/scenario/README.rst
@@ -1,22 +1,25 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. SPDX-License-Identifier: CC-BY-4.0
-.. (c) 2017 Mirantis Inc., Enea AB and others.
+.. (c) 2018 Mirantis Inc., Enea AB and others.
-Fuel@OPNFV Scenario Configuration
+OPNFV Fuel Scenario Configuration
=================================
-Abstract:
----------
+Abstract
+--------
+
This directory contains configuration files for different OPNFV deployment
-feature scenarios used by Fuel@OPNFV, e.g.:
+feature scenarios used by OPNFV Fuel, e.g.:
- High availability configuration;
- Type of SDN controller to be deployed;
- OPNFV collaboration project features to be deployed;
- Provisioning of any other sevices;
-- POD configuration (baremetal, virtual);
+- POD configuration (``baremetal``, ``virtual``);
+
+NOTES
+-----
-NOTES:
-------
This directory is highly likely to change and/or be replaced/complemented
-by the new PDF (Pod Descriptor File) info in Pharos OPNFV git repo.
+by the new ``SDF`` (Scenario Descriptor File) info in Pharos OPNFV git repo
+in upcoming OPNFV releases.
diff --git a/mcp/config/scenario/defaults.yaml b/mcp/config/scenario/defaults.yaml
deleted file mode 100644
index 05e86965e..000000000
--- a/mcp/config/scenario/defaults.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-x86_64:
- base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
- default:
- vcpus: 2
- ram: 4096
- common:
- apt:
- keys:
- - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub
- repos:
- # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
- - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main
- pkg:
- install:
- - linux-generic-hwe-16.04
- - salt-minion
- control:
- apt: ~
- pkg:
- install:
- - cloud-init
-aarch64:
- base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img
- default:
- vcpus: 6
- ram: 4096
- common:
- apt:
- keys:
- - https://linux.enea.com/mcp-repos/queens/xenial/archive-mcpqueens.key
- - https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/SALTSTACK-GPG-KEY.pub
- repos:
- # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
- - saltstack 500 deb [arch=amd64] http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11 xenial main
- - armband 1201 deb [arch=arm64] http://linux.enea.com/mcp-repos/queens/xenial queens-armband main
- pkg:
- install:
- - linux-generic-hwe-16.04
- - python-futures
- - salt-minion
- control:
- apt: ~
- pkg:
- install:
- - cloud-init
diff --git a/mcp/config/scenario/defaults.yaml.j2 b/mcp/config/scenario/defaults.yaml.j2
new file mode 100644
index 000000000..48082a1dc
--- /dev/null
+++ b/mcp/config/scenario/defaults.yaml.j2
@@ -0,0 +1,158 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+x86_64:
+ default:
+ disks: 100G # ';'-separated list of disk drives to create
+ vcpus: 4
+ ram: 8192
+ cluster: &arch_default_cluster_states
+ states:
+ - virtual_init
+{%- if nm.cluster.has_baremetal_nodes %}
+ - maas
+ - baremetal_init
+{%- if conf.MCP_VCP %}
+ - virtual_control_plane
+{%- endif %}
+{%- endif %}
+ ubuntu1804:
+ base_image: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img
+ common:
+ repo:
+ keys:
+ - https://archive.repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
+ - saltstack 500 deb [arch=amd64] http://archive.repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7 bionic main
+ pkg:
+ install:
+ {%- if '-iec-' not in conf.MCP_DEPLOY_SCENARIO and conf.MCP_KERNEL_VER %}
+ - linux-image-{{ conf.MCP_KERNEL_VER }}-generic
+ - linux-headers-{{ conf.MCP_KERNEL_VER }}-generic
+ {%- endif %}
+ - salt-minion
+ - ifupdown
+ - cloud-init
+ - dnsmasq
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
+ ubuntu1604:
+ base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img
+ common:
+ repo:
+ keys:
+ - https://archive.repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
+ - saltstack 500 deb [arch=amd64] http://archive.repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main
+ pkg:
+ install:
+ - linux-generic-hwe-16.04
+ - salt-minion
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
+ centos7:
+ base_image: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+ common:
+ repo:
+ keys:
+ - https://archive.repo.saltstack.com/yum/redhat/$releasever/x86_64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> rpm <repo url>
+ - saltstack 0 rpm https://archive.repo.saltstack.com/yum/redhat/$releasever/x86_64/2017.7
+ pkg:
+ install:
+ - salt-minion
+ - epel-release
+ - git
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
+aarch64:
+ default:
+ disks: 100G # ';'-separated list of disk drives to create
+ vcpus: 6
+ ram: 4096
+ cluster: *arch_default_cluster_states
+ ubuntu1804:
+ base_image: https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-arm64.img
+ common:
+ repo:
+ keys:
+ - https://archive.repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
+ - saltstack 500 deb [arch=amd64] http://archive.repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7 bionic main
+ pkg:
+ install:
+ {%- if '-iec-' not in conf.MCP_DEPLOY_SCENARIO and conf.MCP_KERNEL_VER %}
+ - linux-image-{{ conf.MCP_KERNEL_VER }}-generic
+ - linux-headers-{{ conf.MCP_KERNEL_VER }}-generic
+ {%- endif %}
+ - salt-minion
+ - ifupdown
+ - cloud-init
+ - dnsmasq
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
+ ubuntu1604:
+ base_image: https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-arm64-uefi1.img
+ common:
+ repo:
+ keys:
+ - https://linux.enea.com/mcp-repos/rocky/xenial/archive-mcprocky.key
+ - https://archive.repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> deb [arch=<arch>] <repo url> <repo dist> <repo comp>
+ - saltstack 500 deb [arch=amd64] http://archive.repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main
+ - armband_3 1201 deb [arch=arm64] http://linux.enea.com/mcp-repos/rocky/xenial rocky-armband main
+ pkg:
+ install:
+ {%- if '-iec-' in conf.MCP_DEPLOY_SCENARIO %}
+ - linux-generic-hwe-16.04
+ {%- endif %}
+ - python-futures
+ - salt-minion
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
+ centos7:
+ base_image: https://cloud.centos.org/altarch/7/images/CentOS-7-aarch64-GenericCloud.qcow2.xz
+ common:
+ repo:
+ keys:
+ - https://archive.repo.saltstack.com/yum/redhat/$releasever/x86_64/2017.7/SALTSTACK-GPG-KEY.pub
+ repos:
+ # <repo name> <repo prio> rpm <repo url>
+ - openstack-rocky 0 rpm http://mirror.centos.org/altarch/$releasever/cloud/aarch64/openstack-rocky
+ - saltstack 0 rpm https://archive.repo.saltstack.com/yum/redhat/$releasever/x86_64/2017.7
+ pkg:
+ install:
+ - salt-minion
+ - epel-release
+ - git
+ control:
+ repo: ~
+ pkg:
+ install:
+ - cloud-init
diff --git a/mcp/config/scenario/k8-calico-iec-noha.yaml b/mcp/config/scenario/k8-calico-iec-noha.yaml
new file mode 100644
index 000000000..93f007ba9
--- /dev/null
+++ b/mcp/config/scenario/k8-calico-iec-noha.yaml
@@ -0,0 +1,27 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-iec-noha.local
+ states:
+ - akraino_iec
+virtual:
+ nodes:
+ control:
+ - iec01
+ - iec02
+ - iec03
+ iec01:
+ vcpus: 8
+ ram: 10240
+ iec02:
+ vcpus: 8
+ ram: 10240
+ iec03:
+ vcpus: 8
+ ram: 10240
diff --git a/mcp/config/scenario/k8-calico-iec-vcp-noha.yaml b/mcp/config/scenario/k8-calico-iec-vcp-noha.yaml
new file mode 100644
index 000000000..dc0c00bf6
--- /dev/null
+++ b/mcp/config/scenario/k8-calico-iec-vcp-noha.yaml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-iec-noha.local
+ states:
+ - virtual_control_plane
+ - akraino_iec
+virtual:
+ nodes:
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ kvm01:
+ vcpus: 8
+ ram: 10240
+ kvm02:
+ vcpus: 8
+ ram: 10240
+ kvm03:
+ vcpus: 8
+ ram: 10240
diff --git a/mcp/config/scenario/k8-calico-nofeature-noha.yaml b/mcp/config/scenario/k8-calico-nofeature-noha.yaml
new file mode 100644
index 000000000..a47298409
--- /dev/null
+++ b/mcp/config/scenario/k8-calico-nofeature-noha.yaml
@@ -0,0 +1,28 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-k8s-calico-noha.local
+ states:
+ - kubernetes
+virtual:
+ nodes:
+ control:
+ - ctl01
+ compute:
+ - cmp001
+ - cmp002
+ ctl01:
+ vcpus: 8
+ ram: 14336
+ cmp001:
+ vcpus: 6
+ ram: 12288
+ cmp002:
+ vcpus: 6
+ ram: 12288
diff --git a/mcp/config/scenario/os-nosdn-ovs-ha.yaml.j2 b/mcp/config/scenario/os-nosdn-fdio-ha.yaml
index f9ab7c3fb..b010f7ab6 100644
--- a/mcp/config/scenario/os-nosdn-ovs-ha.yaml.j2
+++ b/mcp/config/scenario/os-nosdn-fdio-ha.yaml
@@ -5,42 +5,25 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-{%- import 'net_map.j2' as nm with context %}
---
cluster:
- domain: mcp-ovs-dpdk-ha.local
+ domain: mcp-fdio-ha.local
states:
-{%- if nm.cluster.has_baremetal_nodes %}
- - maas
- - baremetal_init
-{%- endif %}
-{%- if conf.MCP_VCP %}
- - virtual_control_plane
-{%- endif %}
- - dpdk
- openstack_ha
- networks
virtual:
nodes:
- - cfg01
-{%- if nm.cluster.has_baremetal_nodes %}
- - mas01
-{%- endif %}
-{#- Most likely, controllers will always have the same type and number (3) #}
-{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
- - kvm01
- - kvm02
- - kvm03
-{%- endif %}
-{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
-{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
- - cmp001
- - cmp002
-{%- endif %}
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
cfg01:
vcpus: 4
ram: 6144
- # Below values are only used when nodes are defined in virtual.nodes above
mas01:
vcpus: 4
ram: 6144
@@ -55,8 +38,10 @@ virtual:
vcpus: 4
ram: 14336
cmp001:
+ disks: 100G;100G
vcpus: 4
- ram: 8192
+ ram: 14336
cmp002:
+ disks: 100G;100G
vcpus: 4
- ram: 8192
+ ram: 14336
diff --git a/mcp/config/scenario/os-nosdn-fdio-noha.yaml b/mcp/config/scenario/os-nosdn-fdio-noha.yaml
new file mode 100644
index 000000000..64b4aaf57
--- /dev/null
+++ b/mcp/config/scenario/os-nosdn-fdio-noha.yaml
@@ -0,0 +1,60 @@
+##############################################################################
+# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-fdio-noha.local
+ states:
+ - openstack_noha
+ - neutron_gateway
+ - networks
+virtual:
+ nodes:
+ control:
+ - ctl01
+ - gtw01
+ compute:
+ - cmp001
+ - cmp002
+ ctl01:
+ vcpus: 4
+ ram: 14336
+ gtw01:
+ vcpus: 8
+ ram: 14336
+ cpu_topology:
+ sockets: 1
+ cores: 4
+ threads: 2
+ numa:
+ cell0:
+ memory: 14680064
+ cpus: 0-7
+ cmp001:
+ disks: 100G;100G
+ vcpus: 8
+ ram: 14336
+ cpu_topology:
+ sockets: 1
+ cores: 4
+ threads: 2
+ numa:
+ cell0:
+ memory: 14680064
+ cpus: 0-7
+ cmp002:
+ disks: 100G;100G
+ vcpus: 8
+ ram: 14336
+ cpu_topology:
+ sockets: 1
+ cores: 4
+ threads: 2
+ numa:
+ cell0:
+ memory: 14680064
+ cpus: 0-7
diff --git a/mcp/config/scenario/os-nosdn-nofeature-ha.yaml.j2 b/mcp/config/scenario/os-nosdn-nofeature-ha.yaml
index 26938262e..d06d2ae0b 100644
--- a/mcp/config/scenario/os-nosdn-nofeature-ha.yaml.j2
+++ b/mcp/config/scenario/os-nosdn-nofeature-ha.yaml
@@ -5,41 +5,22 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-{%- import 'net_map.j2' as nm with context %}
---
cluster:
domain: mcp-ovs-ha.local
states:
-{%- if nm.cluster.has_baremetal_nodes %}
- - maas
- - baremetal_init
-{%- endif %}
-{%- if conf.MCP_VCP %}
- - virtual_control_plane
-{%- endif %}
- openstack_ha
- networks
virtual:
nodes:
- - cfg01
-{%- if nm.cluster.has_baremetal_nodes %}
- - mas01
-{%- endif %}
-{#- Most likely, controllers will always have the same type and number (3) #}
-{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
- - kvm01
- - kvm02
- - kvm03
-{%- endif %}
-{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
-{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
- - cmp001
- - cmp002
-{%- endif %}
- cfg01:
- vcpus: 4
- ram: 6144
- # Below values are only used when nodes are defined in virtual.nodes above
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
mas01:
vcpus: 4
ram: 6144
@@ -54,8 +35,10 @@ virtual:
vcpus: 4
ram: 14336
cmp001:
+ disks: 100G;100G
vcpus: 4
ram: 8192
cmp002:
+ disks: 100G;100G
vcpus: 4
ram: 8192
diff --git a/mcp/config/scenario/os-nosdn-nofeature-noha.yaml.j2 b/mcp/config/scenario/os-nosdn-nofeature-noha.yaml.j2
new file mode 100644
index 000000000..a1f0ddaa6
--- /dev/null
+++ b/mcp/config/scenario/os-nosdn-nofeature-noha.yaml.j2
@@ -0,0 +1,52 @@
+##############################################################################
+# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+cluster:
+ domain: mcp-ovs-noha.local
+ states:
+ - openstack_noha
+ - neutron_gateway
+ - networks
+virtual:
+ nodes:
+ control:
+ - ctl01
+ - gtw01
+ compute:
+ - cmp001
+ - cmp002
+ ctl01:
+ disks: {{ conf.nodes[nm.ctl01.idx].disks | map(attribute='disk_capacity') | join(';') }}
+ vcpus: 4
+ ram: 14336
+ gtw01:
+ ram: 2048
+{%- if nm.cmp001.idx < conf.nodes | length %}
+ {%- set cmp_node = conf.nodes[nm.cmp001.idx].node %}
+ {%- set cmp_ram = ([16384, ('%.0f' | format((cmp_node.memory | storage_size_num | float) * 1024 / 1000000000) | int)] | sort)[-1] %}
+ {%- set cmp_sockets = ([2, cmp_node.cpus | int] | sort)[-1] %}
+ {%- set cmp_threads = 2 %}
+ {%- set cmp_cores = ([2, ('%.0f' | format((cmp_node.cores | float) / cmp_threads)) | int] | sort)[-1] %}
+ {%- for cmpi in range(1, 3) %}
+ cmp00{{ cmpi }}:
+ disks: {{ conf.nodes[nm.cmp001.idx].disks | map(attribute='disk_capacity') | join(';') }}
+ vcpus: {{ cmp_sockets * cmp_cores * cmp_threads }}
+ ram: {{ cmp_ram }}
+ cpu_topology:
+ sockets: {{ cmp_sockets }}
+ cores: {{ cmp_cores }}
+ threads: {{ cmp_threads }}
+ numa:
+ {%- for ci in range(0, cmp_sockets) %}
+ cell{{ ci }}:
+ memory: {{ '%.0f' | format((cmp_ram | float) * 1024 / cmp_sockets) }}
+ cpus: {{ cmp_cores * cmp_threads * (ci | int) }}-{{ cmp_cores * cmp_threads * (ci | int + 1) - 1 }}
+ {%- endfor %}
+ {%- endfor %}
+{%- endif %}
diff --git a/mcp/config/scenario/os-nosdn-onap-ha.yaml b/mcp/config/scenario/os-nosdn-onap-ha.yaml
new file mode 100644
index 000000000..653599f6c
--- /dev/null
+++ b/mcp/config/scenario/os-nosdn-onap-ha.yaml
@@ -0,0 +1,48 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB, Tieto and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Scenario details:
+# Specific scenario for ONAP deployment on top of OPNFV
+# Scenario is based on generic os-nosdn-nofeature-ha scenario
+# Responsible: Auto project
+---
+cluster:
+ domain: mcp-ovs-ha.local
+ states:
+ - openstack_ha
+ - networks
+ - onap
+virtual:
+ nodes:
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
+ mas01:
+ vcpus: 4
+ ram: 6144
+ kvm01:
+ vcpus: 4
+ ram: 14336
+ kvm02:
+ vcpus: 4
+ ram: 14336
+ kvm03:
+ vcpus: 4
+ ram: 14336
+ cmp001:
+ disks: 100G;100G
+ vcpus: 32
+ ram: 102400
+ cmp002:
+ disks: 100G;100G
+ vcpus: 32
+ ram: 102400
diff --git a/mcp/config/scenario/os-nosdn-onap-noha.yaml b/mcp/config/scenario/os-nosdn-onap-noha.yaml
new file mode 100644
index 000000000..5a08a2cd6
--- /dev/null
+++ b/mcp/config/scenario/os-nosdn-onap-noha.yaml
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2017-2018 Mirantis Inc., Enea AB, Tieto and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Scenario details:
+# Specific scenario for ONAP deployment on top of OPNFV
+# Scenario is based on generic os-nosdn-nofeature-noha scenario
+# Responsible: Auto project
+---
+cluster:
+ domain: mcp-ovs-noha.local
+ states:
+ - openstack_noha
+ - neutron_gateway
+ - networks
+ - onap
+virtual:
+ nodes:
+ control:
+ - ctl01
+ - gtw01
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
+ ctl01:
+ vcpus: 4
+ ram: 16384
+ gtw01:
+ ram: 2048
+ cmp001:
+ disks: 100G;100G
+ vcpus: 36
+ ram: 122880
+ cmp002:
+ disks: 100G;100G
+ vcpus: 36
+ ram: 122880
diff --git a/mcp/config/scenario/os-nosdn-ovs-ha.yaml b/mcp/config/scenario/os-nosdn-ovs-ha.yaml
new file mode 100644
index 000000000..72a55ea6f
--- /dev/null
+++ b/mcp/config/scenario/os-nosdn-ovs-ha.yaml
@@ -0,0 +1,44 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-ovs-dpdk-ha.local
+ states:
+ - openstack_ha
+ - networks
+virtual:
+ nodes:
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
+ mas01:
+ vcpus: 4
+ ram: 6144
+ # NOTE: We might need to add more RAM here
+ kvm01:
+ vcpus: 4
+ ram: 14336
+ kvm02:
+ vcpus: 4
+ ram: 14336
+ kvm03:
+ vcpus: 4
+ ram: 14336
+ cmp001:
+ disks: 100G;100G
+ vcpus: 4
+ ram: 10240
+ cmp002:
+ disks: 100G;100G
+ vcpus: 4
+ ram: 10240
diff --git a/mcp/config/scenario/os-nosdn-ovs-noha.yaml b/mcp/config/scenario/os-nosdn-ovs-noha.yaml
index 19e7946d1..b109200e1 100644
--- a/mcp/config/scenario/os-nosdn-ovs-noha.yaml
+++ b/mcp/config/scenario/os-nosdn-ovs-noha.yaml
@@ -9,25 +9,26 @@
cluster:
domain: mcp-ovs-dpdk-noha.local
states:
- - dpdk
- openstack_noha
- neutron_gateway
- networks
virtual:
nodes:
- - cfg01
- - ctl01
- - cmp001
- - cmp002
- - gtw01
+ control:
+ - ctl01
+ - gtw01
+ compute:
+ - cmp001
+ - cmp002
ctl01:
- vcpus: 4
ram: 14336
gtw01:
- ram: 2048
+ ram: 4096
cmp001:
- vcpus: 4
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 14336
cmp002:
- vcpus: 4
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 14336
diff --git a/mcp/config/scenario/os-odl-bgpvpn-noha.yaml b/mcp/config/scenario/os-odl-bgpvpn-noha.yaml
new file mode 100644
index 000000000..a083e154e
--- /dev/null
+++ b/mcp/config/scenario/os-odl-bgpvpn-noha.yaml
@@ -0,0 +1,38 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+cluster:
+ domain: mcp-odl-noha.local
+ states:
+ - opendaylight
+ - openstack_noha
+ - neutron_gateway
+ - quagga
+ - networks
+virtual:
+ nodes:
+ control:
+ - ctl01
+ - gtw01
+ - odl01
+ compute:
+ - cmp001
+ - cmp002
+ ctl01:
+ ram: 14336
+ gtw01:
+ vcpus: 2
+ ram: 2048
+ odl01:
+ ram: 6144
+ cmp001:
+ disks: 100G;100G
+ ram: 12288
+ cmp002:
+ disks: 100G;100G
+ ram: 12288
diff --git a/mcp/config/scenario/os-odl-nofeature-ha.yaml.j2 b/mcp/config/scenario/os-odl-nofeature-ha.yaml
index c48f18c51..f295df2ec 100644
--- a/mcp/config/scenario/os-odl-nofeature-ha.yaml.j2
+++ b/mcp/config/scenario/os-odl-nofeature-ha.yaml
@@ -5,42 +5,23 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-{%- import 'net_map.j2' as nm with context %}
---
cluster:
domain: mcp-odl-ha.local
states:
-{%- if nm.cluster.has_baremetal_nodes %}
- - maas
- - baremetal_init
-{%- endif %}
-{%- if conf.MCP_VCP %}
- - virtual_control_plane
-{%- endif %}
- opendaylight
- openstack_ha
- networks
virtual:
nodes:
- - cfg01
-{%- if nm.cluster.has_baremetal_nodes %}
- - mas01
-{%- endif %}
-{#- Most likely, controllers will always have the same type and number (3) #}
-{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
- - kvm01
- - kvm02
- - kvm03
-{%- endif %}
-{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
-{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
- - cmp001
- - cmp002
-{%- endif %}
- cfg01:
- vcpus: 4
- ram: 6144
- # Below values are only used when nodes are defined in virtual.nodes above
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
mas01:
vcpus: 4
ram: 6144
@@ -55,8 +36,10 @@ virtual:
vcpus: 4
ram: 14336
cmp001:
+ disks: 100G;100G
vcpus: 4
ram: 8192
cmp002:
+ disks: 100G;100G
vcpus: 4
ram: 8192
diff --git a/mcp/config/scenario/os-odl-nofeature-noha.yaml b/mcp/config/scenario/os-odl-nofeature-noha.yaml
index ec04ee87a..1bb47bbc7 100644
--- a/mcp/config/scenario/os-odl-nofeature-noha.yaml
+++ b/mcp/config/scenario/os-odl-nofeature-noha.yaml
@@ -15,17 +15,21 @@ cluster:
- networks
virtual:
nodes:
- - cfg01
- - ctl01
- - cmp001
- - cmp002
- - gtw01
- - odl01
+ control:
+ - ctl01
+ - gtw01
+ - odl01
+ compute:
+ - cmp001
+ - cmp002
ctl01:
- vcpus: 4
ram: 14336
gtw01:
+ vcpus: 2
ram: 2048
odl01:
- vcpus: 4
- ram: 5120
+ ram: 6144
+ cmp001:
+ disks: 100G;100G
+ cmp002:
+ disks: 100G;100G
diff --git a/mcp/config/scenario/os-odl-ovs-noha.yaml b/mcp/config/scenario/os-odl-ovs-noha.yaml
index 083e7e646..d47ad9668 100644
--- a/mcp/config/scenario/os-odl-ovs-noha.yaml
+++ b/mcp/config/scenario/os-odl-ovs-noha.yaml
@@ -9,30 +9,31 @@
cluster:
domain: mcp-odl-noha.local
states:
- - dpdk
- opendaylight
- openstack_noha
- neutron_gateway
- networks
virtual:
nodes:
- - cfg01
- - ctl01
- - cmp001
- - cmp002
- - gtw01
- - odl01
+ control:
+ - ctl01
+ - gtw01
+ - odl01
+ compute:
+ - cmp001
+ - cmp002
ctl01:
- vcpus: 4
ram: 14336
gtw01:
- ram: 1024
+ vcpus: 2
+ ram: 2048
odl01:
- vcpus: 4
- ram: 5120
+ ram: 6144
cmp001:
- vcpus: 4
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 14336
cmp002:
- vcpus: 4
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 14336
diff --git a/mcp/patches/patches.list b/mcp/config/scenario/os-odl-sfc-noha.yaml
index baa15d79d..735855433 100644
--- a/mcp/patches/patches.list
+++ b/mcp/config/scenario/os-odl-sfc-noha.yaml
@@ -5,8 +5,32 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-/usr/share/salt-formulas/env: 0002-maas-region-skip-credentials-update.patch
-/usr/share/salt-formulas/env: 0008-Handle-extra-environment-variables.patch
-/usr/share/salt-formulas/env: 0010-maas-region-allow-timeout-override.patch
-/usr/share/salt-formulas/env: 0011-system.repo-Debian-Add-keyserver-proxy-support.patch
-/usr/share/salt-formulas/env: 0015-Set-ovs-bridges-as-L3-interfaces.patch
+---
+cluster:
+ domain: mcp-odl-noha.local
+ states:
+ - opendaylight
+ - openstack_noha
+ - neutron_gateway
+ - tacker
+ - networks
+virtual:
+ nodes:
+ control:
+ - ctl01
+ - gtw01
+ - odl01
+ compute:
+ - cmp001
+ - cmp002
+ ctl01:
+ ram: 14336
+ gtw01:
+ vcpus: 2
+ ram: 2048
+ odl01:
+ ram: 6144
+ cmp001:
+ disks: 100G;100G
+ cmp002:
+ disks: 100G;100G
diff --git a/mcp/config/scenario/os-ovn-nofeature-ha.yaml.j2 b/mcp/config/scenario/os-ovn-nofeature-ha.yaml
index 218b5dece..69c5b9495 100644
--- a/mcp/config/scenario/os-ovn-nofeature-ha.yaml.j2
+++ b/mcp/config/scenario/os-ovn-nofeature-ha.yaml
@@ -5,41 +5,22 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-{%- import 'net_map.j2' as nm with context %}
---
cluster:
domain: mcp-ovn-ha.local
states:
-{%- if nm.cluster.has_baremetal_nodes %}
- - maas
- - baremetal_init
-{%- endif %}
-{%- if conf.MCP_VCP %}
- - virtual_control_plane
-{%- endif %}
- openstack_ha
- networks
virtual:
nodes:
- - cfg01
-{%- if nm.cluster.has_baremetal_nodes %}
- - mas01
-{%- endif %}
-{#- Most likely, controllers will always have the same type and number (3) #}
-{%- if conf.nodes[nm.ctl01.idx].node.type == 'virtual' %}
- - kvm01
- - kvm02
- - kvm03
-{%- endif %}
-{#- Later, we might have mixed computes here, for hybrid multi-arch testing #}
-{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
- - cmp001
- - cmp002
-{%- endif %}
- cfg01:
- vcpus: 4
- ram: 6144
- # Below values are only used when nodes are defined in virtual.nodes above
+ control:
+ - kvm01
+ - kvm02
+ - kvm03
+ compute:
+ - cmp001
+ - cmp002
+ # Below values are only used for each node if said node is virtual
mas01:
vcpus: 4
ram: 6144
@@ -54,8 +35,10 @@ virtual:
vcpus: 4
ram: 14336
cmp001:
+ disks: 100G;100G
vcpus: 4
ram: 8192
cmp002:
+ disks: 100G;100G
vcpus: 4
ram: 8192
diff --git a/mcp/config/scenario/os-ovn-nofeature-noha.yaml b/mcp/config/scenario/os-ovn-nofeature-noha.yaml
index 7036340bf..e7ff2a7d8 100644
--- a/mcp/config/scenario/os-ovn-nofeature-noha.yaml
+++ b/mcp/config/scenario/os-ovn-nofeature-noha.yaml
@@ -13,16 +13,18 @@ cluster:
- networks
virtual:
nodes:
- - cfg01
- - ctl01
- - cmp001
- - cmp002
+ control:
+ - ctl01
+ compute:
+ - cmp001
+ - cmp002
ctl01:
- vcpus: 4
ram: 14336
cmp001:
- vcpus: 5
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 10240
cmp002:
- vcpus: 5
- ram: 8192
+ disks: 100G;100G
+ vcpus: 6
+ ram: 10240
diff --git a/mcp/config/states/akraino_iec b/mcp/config/states/akraino_iec
new file mode 100755
index 000000000..efe0d4df0
--- /dev/null
+++ b/mcp/config/states/akraino_iec
@@ -0,0 +1,62 @@
+#!/bin/bash -e
+# shellcheck disable=SC1090
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")/../../scripts
+
+source "${SCRIPTS_DIR}/lib.sh"
+source "${SCRIPTS_DIR}/xdf_data.sh"
+source "${SCRIPTS_DIR}/globals.sh"
+
+IEC_REPO_URI='https://gerrit.akraino.org/r/iec'
+IEC_USER_L='ubuntu'
+# shellcheck disable=SC2154
+if [[ "${base_image}" =~ centos ]]; then
+ IEC_USER='centos'
+else
+ IEC_USER=${IEC_USER_L}
+fi
+IEC_REPO_PATH=/var/lib/akraino/iec
+IEC_SCRIPTS_PATH="${IEC_REPO_PATH}/src/foundation/scripts"
+
+POD_NETWORK_CIDR='100.100.0.0/16' # Avoid overlapping Fuel's PXE/admin net
+
+# shellcheck disable=SC2174
+mkdir -p -m 777 "$(dirname ${IEC_REPO_PATH})"
+[ -e "${IEC_REPO_PATH}" ] || su - "${IEC_USER_L}" -c \
+ "git clone '${IEC_REPO_URI}' '${IEC_REPO_PATH}'"
+# shellcheck disable=SC2086
+wait_for 3.0 "! salt-cp 'iec*' -C '${IEC_REPO_PATH}/' \
+ '$(dirname ${IEC_REPO_PATH})' | grep -e False"
+salt -C 'iec*' cmd.run "chown -R ${IEC_USER}:${IEC_USER} ${IEC_REPO_PATH}"
+
+salt -C 'iec*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/k8s_common.sh"
+
+IEC_MASTER_IP=$(salt --out txt -C 'iec* and *01*' pillar.get \
+ _param:single_address | cut -d ' ' -f2)
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ stdin="$(sed -z 's/\n/\\n/g' "${SCRIPTS_DIR}/$(basename "${SSH_KEY}")")" \
+ "mkdir -p .ssh && touch .ssh/id_rsa && chmod 600 .ssh/id_rsa && \
+ cat > .ssh/id_rsa && \
+ ${IEC_SCRIPTS_PATH}/k8s_master.sh ${IEC_MASTER_IP} ${POD_NETWORK_CIDR}"
+
+KUBE_NODE_CNT=$(salt --out txt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ 'kubectl get nodes | grep -c -e "^iec"' | cut -d ' ' -f2)
+if [ "${KUBE_NODE_CNT}" != "$(salt-key | grep -c -e '^iec')" ]; then
+ KUBE_JOIN_CMD=$(salt --out txt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ 'kubeadm token create --print-join-command' | cut -d ' ' -f2-)
+ salt -C 'iec* and not *01*' cmd.run "${KUBE_JOIN_CMD}"
+fi
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" 'kubectl get nodes'
+
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" \
+ "${IEC_SCRIPTS_PATH}/setup-cni.sh '' ${POD_NETWORK_CIDR}"
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/nginx.sh"
+salt -C 'iec* and *01*' cmd.run runas="${IEC_USER}" "${IEC_SCRIPTS_PATH}/helm.sh"
diff --git a/mcp/config/states/baremetal_init b/mcp/config/states/baremetal_init
index 6a44b788b..dcedfbeda 100755
--- a/mcp/config/states/baremetal_init
+++ b/mcp/config/states/baremetal_init
@@ -11,28 +11,31 @@ CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
# shellcheck disable=SC1090
source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
+
+cluster_nodes_query="${control_nodes_query} or cmp*"
# KVM, compute node prereqs
# patch the networking module for Debian based distros
debian_ip_source=/usr/lib/python2.7/dist-packages/salt/modules/debian_ip.py
-salt -C 'kvm* or cmp*' file.line $debian_ip_source \
+salt -C "${cluster_nodes_query}" file.line $debian_ip_source \
content='iface = iface.lower()' mode='delete'
-salt -C 'kvm* or cmp*' file.replace $debian_ip_source \
+salt -C "${cluster_nodes_query}" file.replace $debian_ip_source \
pattern="^\s{8}__salt__\['pkg.install'\]\('vlan'\)" \
repl="\n if not __salt__['pkg.version']('vlan'):\n __salt__['pkg.install']('vlan')"
-salt -C 'kvm* or cmp*' pkg.install bridge-utils
-salt -C 'kvm*' state.apply linux.network,linux.system.kernel
-wait_for 5.0 "salt -C 'kvm* or cmp*' state.apply salt.minion"
-wait_for 5.0 "salt -C 'cmp*' state.apply linux.system"
-# wrap distro `route` binary to silence errors when route already exists
-wait_for 5.0 "salt -C 'kvm* or cmp*' state.apply opnfv.route_wrapper"
-wait_for 5.0 "salt -C 'cmp*' state.apply linux.network"
-wait_for 30.0 "salt -C 'kvm* or cmp*' test.ping"
+salt -C "${cluster_nodes_query}" state.apply linux.system.repo
+salt -C "${cluster_nodes_query}" pkg.install force_yes=true bridge-utils,python-jinja2
+salt -C "${cluster_nodes_query}" service.restart salt-minion
+wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply salt.minion"
+salt -C "${cluster_nodes_query}" file.remove /etc/resolv.conf
+salt -C "${cluster_nodes_query}" file.touch /etc/resolv.conf
+wait_for 5.0 "salt -C '${cluster_nodes_query}' state.apply linux,ntp"
+wait_for 30.0 "salt -C '${cluster_nodes_query}' test.ping"
-salt -C 'kvm* or cmp*' system.reboot
-wait_for 90.0 "salt -C 'kvm* or cmp*' test.ping"
+salt -C "${cluster_nodes_query}" cmd.run 'reboot'
+wait_for 90.0 "salt -C '${cluster_nodes_query}' test.ping"
-salt -C 'kvm* or cmp*' state.apply linux,ntp
-salt -C 'kvm* or cmp*' pkg.upgrade refresh=False
+salt -C "${cluster_nodes_query}" state.apply linux,ntp
+salt -C "${cluster_nodes_query}" pkg.upgrade refresh=False dist_upgrade=True
diff --git a/mcp/config/states/kubernetes b/mcp/config/states/kubernetes
new file mode 100755
index 000000000..0894b10a6
--- /dev/null
+++ b/mcp/config/states/kubernetes
@@ -0,0 +1,39 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+# Create and distribute SSL certificates for services
+salt-call state.sls salt.minion
+
+# Install etcd
+salt -I 'etcd:server' state.sls etcd.server.service
+salt -I 'etcd:server' cmd.run ". /var/lib/etcd/configenv && etcdctl cluster-health"
+
+# Install Kubernetes and Calico
+salt -I 'kubernetes:master' state.sls kubernetes.master.kube-addons
+salt -I 'kubernetes:pool' state.sls kubernetes.pool
+salt -I 'kubernetes:pool' cmd.run "calicoctl node status"
+salt -I 'kubernetes:pool' cmd.run "calicoctl get ippool"
+
+# Setup NAT for Calico
+salt -I 'kubernetes:master' state.sls etcd.server.setup
+
+# Run whole master to check consistency
+salt -I 'kubernetes:master' state.sls kubernetes exclude=kubernetes.master.setup
+
+# Register addons
+salt -I 'kubernetes:master' state.sls kubernetes.master.setup
+
+# Upload config
+K8S_CONFIG=kubernetes.config
+K8S_HOST_ID=$(salt -I 'kubernetes:master' --out=yaml cp.push \
+ /etc/kubernetes/admin-kube-config \
+ upload_path="$K8S_CONFIG" | cut -d':' -f1)
+cd /opt && ln -sf "/var/cache/salt/master/minions/${K8S_HOST_ID}/files/${K8S_CONFIG}"
diff --git a/mcp/config/states/maas b/mcp/config/states/maas
index ec2458234..28ef4cae0 100755
--- a/mcp/config/states/maas
+++ b/mcp/config/states/maas
@@ -1,7 +1,7 @@
#!/bin/bash -e
-# shellcheck disable=SC1090,SC2155
+# shellcheck disable=SC1090
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -17,69 +17,18 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
bm_nodes=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
awk '/^\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
-# Wait for MaaS commissioning/deploy to finish, retry on failure
-function maas_fixup() {
- local statuscmd="salt 'mas01*' --out yaml state.apply maas.machines.status"
- local ncount=$(salt --out yaml 'mas01*' pillar.get maas:region:machines | \
- grep -cE '^\s{2}\w+:$')
-
- # wait_for has 10sec timeout * 96 = 16 min > 15min for Failed state
- wait_for 96 "${statuscmd} | tee /dev/stderr | " \
- "grep -Eq '((Deployed|Ready): ${ncount}|status: (Failed|Allocated))'"
- local statusout=$(eval "${statuscmd}")
-
- local fcnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Failed commissioning\n\s+system_id: \K.+\n')
- local ftnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Failed testing\n\s+system_id: \K.+\n')
- for node_system_id in ${fcnodes}; do
- salt -C 'mas01*' state.apply maas.machines.delete \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- for node_system_id in ${ftnodes}; do
- salt -C 'mas01*' state.apply maas.machines.override_failed_testing \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- if [ -n "${fcnodes}" ] || [ -n "${ftnodes}" ]; then
- salt -C 'mas01*' state.apply maas.machines
- return 1
- fi
-
- local fdnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: (Failed deployment|Allocated)\n\s+system_id: \K.+\n')
- local rnodes=$(echo "${statusout}" | \
- grep -Pzo 'status: Ready\n\s+system_id: \K.+\n')
- for node_system_id in ${fdnodes}; do
- salt -C 'mas01*' state.apply maas.machines.mark_broken_fixed \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
- done
- if [ -n "${fdnodes}" ] || [ -n "${rnodes}" ]; then
- salt -C 'mas01*' state.apply maas.machines.storage
- salt -C 'mas01*' state.apply maas.machines.deploy
- return 1
- fi
-
- return 0
-}
+wait_for 60.0 "salt --out yaml -C 'mas01*' service.status maas-fixup | fgrep -q 'false'"
# Optionally destroy MaaS machines from a previous run
if [ "${ERASE_ENV}" -gt 1 ]; then
- set +e; dnodes=$(salt 'mas01*' --out yaml state.apply maas.machines.status | \
- grep -Pzo '\s+system_id: \K.+\n'); set -e
cleanup_uefi
- for node_system_id in ${dnodes}; do
- salt -C 'mas01*' state.apply maas.machines.delete \
- pillar="{'system_id': '${node_system_id}'}"
- sleep 10
+ for node_hostname in ${bm_nodes//\*/}; do
+ salt -C 'mas01*' maasng.delete_machine "${node_hostname}" || true
done
fi
# MaaS rack/region controller, node commissioning
-salt -C 'mas01*' state.apply linux,salt,openssh,ntp
-salt -C 'mas01*' state.apply maas.pxe_nat
+wait_for 10.0 "salt -C 'mas01*' state.apply salt,iptables"
salt -C 'mas01*' state.apply maas.cluster
wait_for 10 "salt -C 'mas01*' state.apply maas.region"
@@ -94,13 +43,15 @@ salt-key --out yaml | awk '!/^(minions|- cfg01|- mas01)/ {print $2}' | \
xargs --no-run-if-empty -I{} salt-key -yd {}
# MaaS node deployment
-wait_for 10 maas_fixup
-
-salt -C 'mas01*' pillar.item\
- maas:region:admin:username \
- maas:region:admin:password
+if [ -n "${bm_nodes}" ]; then
+ notify "[NOTE] MaaS operations might take a long time, please be patient" 2
+ salt -C 'mas01*' state.apply maas.machines.wait_for_ready_or_deployed
+ salt -C 'mas01*' state.apply maas.machines.storage
+ salt -C 'mas01*' state.apply maas.machines.deploy
+ salt -C 'mas01*' state.apply maas.machines.wait_for_deployed
+fi
# Check all baremetal nodes are available
-wait_for 5.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
+wait_for 10.0 "(for n in ${bm_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
wait_for 10.0 "salt -C '* and not cfg01* and not mas01*' saltutil.sync_all"
diff --git a/mcp/config/states/onap b/mcp/config/states/onap
new file mode 100755
index 000000000..d196074d9
--- /dev/null
+++ b/mcp/config/states/onap
@@ -0,0 +1,65 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2018 Tieto
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+# Deploy ONAP on top of OPNFV installed by Fuel/MCP
+# ONAP installation is managed by OPNFV Auto project
+
+AUTO_INSTALL_DIR=/opt/auto
+AUTO_REPO='https://gerrit.opnfv.org/gerrit/auto'
+ONAP_INSTALL_SCRIPT='ci/deploy-onap-fuel.sh'
+
+echo "Clone Auto Repo"
+salt -C 'I@nova:controller and *01*' cmd.run "\
+ rm -rf $AUTO_INSTALL_DIR; \
+ git clone $AUTO_REPO $AUTO_INSTALL_DIR"
+
+echo "ONAP installation starts at $(date)"
+echo "It can take several hours to finish."
+
+# detect compute HW configuration, i.e. minimal values available across
+# all compute nodes
+CMP_COUNT=$(salt -C 'I@nova:compute' grains.get id --out txt | wc -l)
+CMP_MIN_MEM=$(salt -C 'I@nova:compute' grains.get mem_total --out txt |\
+ sed -re 's/^[^:]+: ([0-9]+)$/\1/g' | sort -n | head -n1)
+CMP_MIN_CPUS=$(salt -C 'I@nova:compute' grains.get num_cpus --out txt |\
+ sed -re 's/^[^:]+: ([0-9]+)$/\1/g' | sort -n | head -n1)
+# check disk size for storage of instances; if shared storage is mounted,
+# then return its size, otherwise sum up avalable space of root disk of all
+# compute nodes
+STORAGE_PATH='/var/lib/nova/instances'
+MOUNT_COUNT=$(salt "cmp*" mount.is_mounted $STORAGE_PATH --out txt |\
+ grep True | wc -l)
+if [ $MOUNT_COUNT -eq $CMP_COUNT ] ; then
+ CMP_STORAGE_TOTAL=$(salt "cmp*" cmd.run "df -BGB $STORAGE_PATH" --out txt |\
+ grep "$STORAGE_PATH" |\
+ sed -re 's/^.* +([0-9]+)GB +([0-9]+GB +){2}.*$/\1/g' |\
+ sort -n | head -n1)
+else
+ CMP_STORAGE_TOTAL=0
+ for STORAGE in $(salt "cmp*" cmd.run "df -BGB /" --out txt | grep '/$' |\
+ sed -re 's/^.* +([0-9]+GB +){2}([0-9]+)GB +.*$/\2/g') ; do
+ CMP_STORAGE_TOTAL=$(($CMP_STORAGE_TOTAL+$STORAGE));
+ done
+fi
+
+# Deploy ONAP with detected configuration
+# execute installation from the 1st controller node
+CTL01=$(salt -C 'I@nova:controller and *01*' grains.get id --out txt |\
+ head -n1 | cut -d':' -f1)
+ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
+ -i /root/fuel/mcp/scripts/mcp.rsa -l ubuntu $CTL01 "bash -s" <<COMMANDS
+ sudo -i
+ source /root/keystonercv3
+ cd $AUTO_INSTALL_DIR
+ export CMP_COUNT=$CMP_COUNT
+ export CMP_MIN_MEM=$CMP_MIN_MEM
+ export CMP_MIN_CPUS=$CMP_MIN_CPUS
+ export CMP_STORAGE_TOTAL=$CMP_STORAGE_TOTAL
+ export AUTO_INSTALL_DIR=$AUTO_INSTALL_DIR
+ $ONAP_INSTALL_SCRIPT | tee $AUTO_INSTALL_DIR/auto_deploy.log
+COMMANDS
diff --git a/mcp/config/states/opendaylight b/mcp/config/states/opendaylight
index de15d0cef..ae8b4cc92 100755
--- a/mcp/config/states/opendaylight
+++ b/mcp/config/states/opendaylight
@@ -14,8 +14,14 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
# Get OpenDaylight server options with prefix odl_
function odl() {
- salt --out txt -I 'opendaylight:server' pillar.get "opendaylight:server:odl_$1" | cut -d ' ' -f2
+ salt --out txt -C 'I@opendaylight:server and *01*' pillar.get "opendaylight:server:odl_$1" | cut -d ' ' -f2
}
wait_for 5.0 "salt -I 'opendaylight:server' state.sls opendaylight"
wait_for 20 "salt --out yaml -C 'I@neutron:server and *01*' network.connect $(odl bind_ip) $(odl rest_port) | fgrep -q 'result: true'"
+
+# https://bugs.launchpad.net/networking-odl/+bug/1822559
+FILE=/usr/lib/python3/dist-packages/networking_odl/cmd/set_ovs_hostconfigs.py
+PFILE=/var/tmp/odl_hostconfig.patch
+salt -I 'linux:network:bridge:openvswitch' pkg.install python3-networking-odl
+salt -I 'linux:network:bridge:openvswitch' cmd.run "patch -R -s --dry-run $FILE < $PFILE || patch $FILE < $PFILE"
diff --git a/mcp/config/states/openstack_ha b/mcp/config/states/openstack_ha
index 73c44bb66..11da4e33a 100755
--- a/mcp/config/states/openstack_ha
+++ b/mcp/config/states/openstack_ha
@@ -15,8 +15,8 @@ source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
wait_for 5.0 "salt -I 'keepalived:cluster' state.sls keepalived -b 1"
wait_for 5.0 "salt -I 'keepalived:cluster' pillar.get keepalived:cluster:instance:VIP:address"
-salt -C 'I@rabbitmq:server and *01*' state.sls rabbitmq
-salt -I 'rabbitmq:server' state.sls rabbitmq
+wait_for 5.0 "salt -I 'rabbitmq:cluster:role:master' state.sls rabbitmq"
+salt -I 'rabbitmq:cluster:role:slave' state.sls rabbitmq
salt -I 'rabbitmq:server' cmd.run "rabbitmqctl cluster_status"
salt -I 'glusterfs:server' state.sls glusterfs.server.service
@@ -34,36 +34,68 @@ salt -I 'haproxy:proxy' state.sls haproxy
salt -I 'haproxy:proxy' service.status haproxy
salt -I 'haproxy:proxy' service.restart rsyslog
-set +e; salt -I 'keystone:server' state.sls keystone.server -b 1; set -e
+salt -I 'keystone:server:role:primary' state.sls keystone.server
+salt -I 'keystone:server:role:secondary' state.sls keystone.server
salt -I 'keystone:server' service.restart apache2
-salt -I 'keystone:server' state.sls keystone.server -b 1
wait_for 30.0 "salt -I 'keystone:client' state.sls keystone.client"
salt -I 'keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
-salt -I 'glance:server' state.sls glance -b 1
-salt -I 'nova:controller' state.sls nova -b 1
-salt -I 'heat:server' state.sls heat -b 1
+salt -I 'glance:server:role:primary' state.sls glance
+salt -I 'glance:server:role:secondary' state.sls glance
+salt -I 'nova:controller:role:primary' state.sls nova
+salt -I 'nova:controller:role:secondary' state.sls nova
+salt -I 'heat:server:role:primary' state.sls heat
+salt -I 'heat:server:role:secondary' state.sls heat
-wait_for 5.0 "salt -I 'cinder:controller' state.sls cinder -b 1"
+salt -I 'cinder:controller:role:primary' state.sls cinder
+salt -I 'cinder:controller:role:secondary' state.sls cinder
wait_for 3.0 "salt -I 'cinder:volume' state.sls cinder"
-salt -I 'neutron:server' state.sls neutron -b 1
+salt -I 'neutron:server:role:primary' state.sls neutron
+salt -I 'neutron:server:role:secondary' state.sls neutron
salt -I 'neutron:gateway' state.sls neutron.gateway
+if salt 'cmp*' match.pillar 'neutron:compute:backend:engine:ovn' \
+ --out yaml --static | grep -q -e 'true' ; then
+ salt -I 'neutron:compute' state.sls neutron.compute
+fi
-salt -I 'nova:compute' state.sls nova
+salt -I 'nova:compute' state.sls nova,armband
+
+salt -I 'barbican:server:role:primary' state.sls barbican
+salt -I 'barbican:server:role:secondary' state.sls barbican
+salt -I 'barbican:client' state.sls barbican
+
+# remove config files coming from packages
+for service in gnocchi panko; do
+ salt -I "${service}:server" pkg.install ${service}-api
+ salt -I "${service}:server" file.remove "/etc/apache2/sites-enabled/${service}-api.conf"
+done
salt -I 'redis:cluster:role:master' state.sls redis
salt -I 'redis:server' state.sls redis
-salt -I 'gnocchi:server' state.sls gnocchi -b 1
-salt -I 'panko:server' state.sls panko -b 1
-salt -I 'aodh:server' state.sls aodh -b 1
+salt -I 'gnocchi:server:role:primary' state.sls gnocchi
+salt -I 'gnocchi:server:role:secondary' state.sls gnocchi
+salt -I 'panko:server:role:primary' state.sls panko
+salt -I 'panko:server:role:secondary' state.sls panko
+salt -I 'aodh:server:role:primary' state.sls aodh
+salt -I 'aodh:server:role:secondary' state.sls aodh
salt -I 'ceilometer:server' state.sls ceilometer
salt -I 'ceilometer:agent' state.sls ceilometer
-salt -I 'horizon:server' state.sls horizon
+wait_for 3.0 "salt -I 'horizon:server' state.sls apache,horizon"
salt -I 'nginx:server' state.sls nginx
+# Workaround Horizon missing CSS/JS, see FUEL-324
+if ! salt -C 'I@horizon:server and *01*' --out=yaml pkg.version openstack-dashboard | grep -qE ':.*mcp'; then
+ salt -I 'horizon:server' file.symlink \
+ /var/lib/openstack-dashboard/static \
+ /usr/share/openstack-dashboard/static
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py collectstatic --noinput"
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py compress --force"
+ salt -I 'horizon:server' service.reload apache2
+fi
+
cluster_public_host=$(salt -C 'I@nginx:server and *01*' --out=yaml \
pillar.get _param:cluster_public_host | awk '{print $2; exit}')
dashboard_host=$(salt -C 'I@nginx:server and *01*' --out=yaml cp.push \
@@ -71,7 +103,3 @@ dashboard_host=$(salt -C 'I@nginx:server and *01*' --out=yaml cp.push \
upload_path='certs/os_cacert' | cut -d':' -f1)
cd /etc/ssl/certs && \
ln -sf "/var/cache/salt/master/minions/${dashboard_host}/files/certs/os_cacert"
-
-# glance v1 api is required by orchestra tests
-salt -I 'glance:server' ini.set_option /etc/glance/glance-api.conf '{DEFAULT: {enable_v1_api: True}}'
-salt -I 'glance:server' service.restart glance-api
diff --git a/mcp/config/states/openstack_noha b/mcp/config/states/openstack_noha
index 70db238be..6b503504b 100755
--- a/mcp/config/states/openstack_noha
+++ b/mcp/config/states/openstack_noha
@@ -23,17 +23,14 @@ salt -I 'mysql:server' state.sls mysql
salt -I 'memcached:server' state.sls memcached
salt -I 'haproxy:proxy' state.sls haproxy
-set +e; salt -I 'keystone:server' state.sls keystone.server; set -e
-salt -I 'keystone:server' service.restart apache2
salt -I 'keystone:server' state.sls keystone.server
+salt -I 'keystone:server' service.restart apache2
salt -I 'keystone:server' state.sls keystone.client
salt -I 'keystone:server' cmd.run ". /root/keystonercv3; openstack service list"
salt -I 'glance:server' state.sls glance
-# apply nova state twice to complete broken db sync
-salt -I 'nova:controller' state.sls nova
salt -I 'nova:controller' state.sls nova
salt -I 'heat:server' state.sls heat
@@ -41,10 +38,19 @@ salt -I 'heat:server' state.sls heat
salt -I 'cinder:controller' state.sls cinder
wait_for 3 "salt -I 'cinder:volume' state.sls cinder"
-salt -I 'neutron:server' state.sls neutron
+salt -I 'neutron:server' state.sls etcd,neutron
salt -I 'neutron:compute' state.sls neutron
-salt -I 'nova:compute' state.sls nova
+salt -I 'nova:compute' state.sls nova,armband
+
+salt -I 'barbican:server' state.sls barbican
+salt -I 'barbican:client' state.sls barbican
+
+# remove config files coming from packages
+for service in gnocchi panko; do
+ salt -I "${service}:server" pkg.install ${service}-api
+ salt -I "${service}:server" file.remove "/etc/apache2/sites-enabled/${service}-api.conf"
+done
salt -I 'redis:server' state.sls redis
salt -I 'gnocchi:server' state.sls gnocchi
@@ -53,4 +59,14 @@ salt -I 'aodh:server' state.sls aodh
salt -I 'ceilometer:server' state.sls ceilometer
salt -I 'ceilometer:agent' state.sls ceilometer
-salt -I 'horizon:server' state.sls horizon
+salt -I 'horizon:server' state.sls apache,horizon,nginx
+
+# Workaround Horizon missing CSS/JS, see FUEL-324
+if ! salt -C 'I@horizon:server and *01*' --out=yaml pkg.version openstack-dashboard | grep -qE ':.*mcp'; then
+ salt -I 'horizon:server' file.symlink \
+ /var/lib/openstack-dashboard/static \
+ /usr/share/openstack-dashboard/static
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py collectstatic --noinput"
+ salt -I 'horizon:server' cmd.run "python3 /usr/share/openstack-dashboard/manage.py compress --force"
+ salt -I 'horizon:server' service.reload apache2
+fi
diff --git a/mcp/config/states/dpdk b/mcp/config/states/quagga
index 281b78fed..e3c9de7da 100755
--- a/mcp/config/states/dpdk
+++ b/mcp/config/states/quagga
@@ -1,6 +1,6 @@
#!/bin/bash -e
##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Intracom Telecom and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -9,8 +9,4 @@
CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-# shellcheck disable=SC1090
-source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
-
-salt -I 'nova:compute' alternatives.set ovs-vswitchd /usr/lib/openvswitch-switch-dpdk/ovs-vswitchd-dpdk
-salt -I 'nova:compute' service.restart openvswitch-switch
+salt -I 'quagga:server' state.sls quagga -b 1
diff --git a/mcp/scripts/user-data.mcp.sh.j2 b/mcp/config/states/tacker
index bd80961e6..bd8bc9991 100644..100755
--- a/mcp/scripts/user-data.mcp.sh.j2
+++ b/mcp/config/states/tacker
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -e
##############################################################################
# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
@@ -6,8 +6,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
-rm /etc/salt/minion_id
-rm -f /etc/salt/pki/minion/minion_master.pub
-echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion
-echo "master: {{ conf.SALT_MASTER }}" >> /etc/salt/minion
-service salt-minion restart
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+salt -I 'tacker:server' state.sls tacker -b 1
diff --git a/mcp/config/states/virtual_control_plane b/mcp/config/states/virtual_control_plane
index 18e6d1cd2..f2e861ac2 100755
--- a/mcp/config/states/virtual_control_plane
+++ b/mcp/config/states/virtual_control_plane
@@ -27,13 +27,13 @@ if [ "${ERASE_ENV}" -eq 1 ]; then
fi
# KVM libvirt first, VCP deployment
-wait_for 5.0 "salt -C 'kvm*' state.sls libvirt"
+wait_for 5.0 "salt -C 'kvm*' state.sls armband,libvirt"
salt -C 'kvm* or cmp*' state.apply salt
-wait_for 10.0 "salt -C 'kvm*' state.sls salt.control"
+wait_for 10.0 "salt -C 'kvm*' state.sls salt.control,linux.system.kernel"
vcp_nodes=$(salt --out yaml 'kvm01*' pillar.get salt:control:cluster:internal:node | \
- awk '/\s+\w+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
+ awk '/\s+\w+[[:digit:]]+:$/ {gsub(/:$/, "*"); printf "%s ", $1}')
# Check all vcp nodes are available
wait_for 25.0 "(for n in ${vcp_nodes}; do salt \${n} test.ping 2>/dev/null || exit; done)"
@@ -48,14 +48,16 @@ cd /srv/salt/env/prd/maas/files && ln -sf \
salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' cp.get_file \
"salt://maas/files/$(basename "${APT_CONF_D_CURTIN}")" "${APT_CONF_D_CURTIN}"
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.sls linux.system.repo
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.install force_yes=true python-jinja2
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' service.restart salt-minion
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply salt"
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' state.apply linux,ntp"
wait_for 10.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' ssh.set_auth_key ${SUDO_USER} \
$(awk 'NR==1{print $2}' "$(eval echo "~${SUDO_USER}/.ssh/authorized_keys")")"
-salt -C 'prx*' state.apply opnfv.route_wrapper
-salt -C 'prx*' system.reboot
-wait_for 30.0 "salt -C 'prx*' test.ping"
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' cmd.run 'reboot'
+wait_for 30.0 "salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' test.ping"
-salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.upgrade refresh=False
+salt -C 'E@^(?!cfg01|mas01|kvm|cmp00).*' pkg.upgrade refresh=False dist_upgrade=True
diff --git a/mcp/config/states/virtual_init b/mcp/config/states/virtual_init
new file mode 100755
index 000000000..b5bb18955
--- /dev/null
+++ b/mcp/config/states/virtual_init
@@ -0,0 +1,43 @@
+#!/bin/bash -e
+# shellcheck disable=SC1090
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+
+# shellcheck disable=SC1090
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/lib.sh"
+source "$(dirname "${BASH_SOURCE[0]}")/../../scripts/xdf_data.sh"
+
+CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
+# shellcheck disable=SC2154,SC2086,SC2116
+LOCAL_VIRT_NODES=$(echo ${virtual_nodes[*]}) # unquoted to filter space
+[[ ! "${cluster_states[*]}" =~ maas ]] || LOCAL_VIRT_NODES='mas01'
+NODE_MASK="${LOCAL_VIRT_NODES// /|}"
+
+wait_for 5.0 "salt-call state.sls reclass,linux.network,salt.minion \
+ exclude='[{id: reclass_packages}, {id: /etc/reclass/reclass-config.yml}]'"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.refresh_pillar"
+
+# NOTE: domain name changes are not yet supported without a clean redeploy
+
+# Init specific to VMs on FN (all for virtual, mas for baremetal)
+wait_for 3.0 "(for n in ${LOCAL_VIRT_NODES}; do salt -C \${n}.* test.ping || exit; done)"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' saltutil.sync_all"
+[[ ! "${NODE_MASK}" =~ mas01 ]] || exit 0
+
+if [[ "${base_image}" =~ centos ]]; then
+ # CentOS uses an older kernel, skip non-existing sysctl options
+ EXCLUDE_IDS="exclude='[{id: linux_kernel_net.core.netdev_budget_usecs}]'"
+fi
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.sls linux ${EXCLUDE_IDS}"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' pkg.upgrade refresh=False dist_upgrade=True"
+
+salt -C "E@^(${NODE_MASK}).*" cmd.run 'reboot'
+wait_for 90.0 "salt -C 'E@^(${NODE_MASK}).*' test.ping"
+wait_for 3.0 "salt -C 'E@^(${NODE_MASK}).*' state.apply salt,ntp"
diff --git a/mcp/deploy/images/.gitkeep b/mcp/deploy/images/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
--- a/mcp/deploy/images/.gitkeep
+++ /dev/null
diff --git a/mcp/deploy/scripts b/mcp/deploy/scripts
deleted file mode 160000
-Subproject 4c9818260372459977be1cc4a3869b871ddba0a
diff --git a/mcp/patches/0011-system.repo-Debian-Add-keyserver-proxy-support.patch b/mcp/patches/0011-system.repo-Debian-Add-keyserver-proxy-support.patch
deleted file mode 100644
index 87cd219cd..000000000
--- a/mcp/patches/0011-system.repo-Debian-Add-keyserver-proxy-support.patch
+++ /dev/null
@@ -1,181 +0,0 @@
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-:
-: All rights reserved. This program and the accompanying materials
-: are made available under the terms of the Apache License, Version 2.0
-: which accompanies this distribution, and is available at
-: http://www.apache.org/licenses/LICENSE-2.0
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
-Date: Sun, 3 Jun 2018 19:28:18 +0200
-Subject: [PATCH] system.repo: Debian: Use proxy for keyservers
-
-Previously, when fetching GPG keys for APT keyring, either using
-public key download & import (as for default repos) or via keyserver,
-we relied on simple `curl` calls or passed it down to Salt aptpkg
-module.
-To be able to retrieve APT keys behind a proxy, one used to have to
-configure the proxy for the Salt minion, which does not yet have
-`no_proxy` support (either *all* or *no* traffic hits the proxy).
-
-When `linux:system:proxy` http(s) proxies are set:
-- no longer pass key configuration to Salt aptpkg (until it properly
- supports `no_proxy`);
-- handle all keys explicitly with `curl` and `apt-key`;
-- set 'http(s)_proxy' env vars for `cmd.wait` calls;
-
-If `linux:system:proxy` is not defined, the behavior is
-unchanged for backwards compatibility.
-
-NOTE: If present, per-repo proxies are also used for keyserver access.
-
-system.repo: Fix conditions order for Debian proxy
-
-Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
----
- linux/system/repo.sls | 65 +++++++++++++++++++++++++++++++++++--------
- 1 file changed, 53 insertions(+), 12 deletions(-)
-
-diff --git a/linux/system/repo.sls b/linux/system/repo.sls
-index 303ea9c..5dfc4c1 100644
---- a/linux/system/repo.sls
-+++ b/linux/system/repo.sls
-@@ -5,9 +5,15 @@ linux_repo_prereq_pkgs:
- pkg.installed:
- - pkgs: {{ system.pkgs }}
-
-+{%- set proxies = {'system': {}, 'repo': {}} %}
-+
- # global proxy setup
--{%- if system.proxy.get('pkg', {}).get('enabled', False) %}
- {%- if grains.os_family == 'Debian' %}
-+{%- if system.proxy.get('pkg', {}).get('enabled', False) %}
-+
-+{%- do proxies.system.update({'https': system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), true)}) %}
-+{%- do proxies.system.update({'http': system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), true)}) %}
-+{%- do proxies.system.update({'ftp': system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), true)}) %}
-
- /etc/apt/apt.conf.d/99proxies-salt:
- file.managed:
-@@ -15,9 +21,9 @@ linux_repo_prereq_pkgs:
- - source: salt://linux/files/apt.conf.d_proxies
- - defaults:
- external_host: False
-- https: {{ system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), true) }}
-- http: {{ system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), true) }}
-- ftp: {{ system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), true) }}
-+ https: {{ proxies.system.https }}
-+ http: {{ proxies.system.http }}
-+ ftp: {{ proxies.system.ftp }}
-
- {%- else %}
-
-@@ -25,9 +31,6 @@ linux_repo_prereq_pkgs:
- file.absent
-
- {%- endif %}
--{%- endif %}
--
--{% set default_repos = {} %}
-
- {%- if system.purge_repos|default(False) %}
-
-@@ -38,6 +41,10 @@ purge_sources_list_d_repos:
-
- {%- endif %}
-
-+{%- endif %}
-+
-+{% set default_repos = {} %}
-+
- {%- for name, repo in system.repo.items() %}
- {%- set name=repo.get('name', name) %}
- {%- if grains.os_family == 'Debian' %}
-@@ -45,16 +52,20 @@ purge_sources_list_d_repos:
- # per repository proxy setup
- {%- if repo.get('proxy', {}).get('enabled', False) %}
- {%- set external_host = repo.proxy.get('host', None) or repo.source.split('/')[2] %}
-+{%- do proxies.repo.update({'https': repo.proxy.get('https', None) or system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), true)}) %}
-+{%- do proxies.repo.update({'http': repo.proxy.get('http', None) or system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), true)}) %}
-+{%- do proxies.repo.update({'ftp': repo.proxy.get('ftp', None) or system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), true)}) %}
- /etc/apt/apt.conf.d/99proxies-salt-{{ name }}:
- file.managed:
- - template: jinja
- - source: salt://linux/files/apt.conf.d_proxies
- - defaults:
- external_host: {{ external_host }}
-- https: {{ repo.proxy.get('https', None) or system.proxy.get('pkg', {}).get('https', None) | default(system.proxy.get('https', None), True) }}
-- http: {{ repo.proxy.get('http', None) or system.proxy.get('pkg', {}).get('http', None) | default(system.proxy.get('http', None), True) }}
-- ftp: {{ repo.proxy.get('ftp', None) or system.proxy.get('pkg', {}).get('ftp', None) | default(system.proxy.get('ftp', None), True) }}
-+ https: {{ proxies.repo.https }}
-+ http: {{ proxies.repo.http }}
-+ ftp: {{ proxies.repo.ftp }}
- {%- else %}
-+{%- do proxies.repo.update({'https': None, 'http': None, 'ftp': None}) %}
- /etc/apt/apt.conf.d/99proxies-salt-{{ name }}:
- file.absent
- {%- endif %}
-@@ -110,6 +121,13 @@ linux_repo_{{ name }}_key:
- {% else %}
- - pkgrepo: linux_repo_{{ name }}
- {% endif %}
-+ - env:
-+{%- if proxies.repo.get('https', None) or proxies.system.get('https', None) %}
-+ - https_proxy: {{ proxies.repo.get('https', None) or proxies.system.get('https', None) }}
-+{%- endif %}
-+{%- if proxies.repo.get('http', None) or proxies.system.get('http', None) %}
-+ - http_proxy: {{ proxies.repo.get('http', None) or proxies.system.get('http', None) }}
-+{%- endif %}
-
- {%- endif %} {# 2 #}
-
-@@ -120,6 +138,10 @@ linux_repo_{{ name }}_key:
-
- {%- if repo.get('enabled', True) %}
-
-+{%- set use_proxy = ( ( proxies.repo.get('https', None) or proxies.system.get('https', None) or
-+ proxies.repo.get('http', None) or proxies.system.get('http', None) ) and
-+ repo.key_id is defined and repo.key_server is defined ) %}
-+
- linux_repo_{{ name }}:
- pkgrepo.managed:
- {%- if repo.ppa is defined %}
-@@ -132,10 +154,10 @@ linux_repo_{{ name }}:
- {%- endif %}
- - file: /etc/apt/sources.list.d/{{ name }}.list
- - clean_file: {{ repo.clean|default(True) }}
-- {%- if repo.key_id is defined %}
-+ {%- if not use_proxy and repo.key_id is defined %}
- - keyid: {{ repo.key_id }}
- {%- endif %}
-- {%- if repo.key_server is defined %}
-+ {%- if not use_proxy and repo.key_server is defined %}
- - keyserver: {{ repo.key_server }}
- {%- endif %}
- {%- if repo.key_url is defined and (grains['saltversioninfo'] >= [2017, 7] or repo.key_url.startswith('salt://')) %}
-@@ -157,6 +179,25 @@ linux_repo_{{ name }}:
- {%- endif %}
- {%- endif %}
-
-+{%- if use_proxy and repo.key_id is defined and repo.key_server is defined %}
-+
-+linux_repo_{{ name }}_key:
-+ cmd.run:
-+ - name: "apt-key adv --keyserver {{ repo.key_server }} --recv {{ repo.key_id }}"
-+ - unless: 'test -e /etc/apt/sources.list.d/{{ name }}.list'
-+ - require_in:
-+ - pkgrepo: linux_repo_{{ name }}
-+ - env:
-+{%- if proxies.repo.get('https', None) or proxies.system.get('https', None) %}
-+ - https_proxy: {{ proxies.repo.get('https', None) or proxies.system.get('https', None) }}
-+{%- endif %}
-+{%- if proxies.repo.get('http', None) or proxies.system.get('http', None) %}
-+ - http_proxy: {{ proxies.repo.get('http', None) or proxies.system.get('http', None) }}
-+{%- endif %}
-+
-+{%- endif %}
-+
-+{#- repo.enabled is false #}
- {%- else %}
-
- linux_repo_{{ name }}_absent:
diff --git a/mcp/patches/Makefile b/mcp/patches/Makefile
index be3eb9ee5..e0a1c34ce 100644
--- a/mcp/patches/Makefile
+++ b/mcp/patches/Makefile
@@ -25,7 +25,7 @@ FPATCHES = $(shell find ${F_PATCH_DIR} -name '*.patch')
# In order to keep things sort of separate, we should only pass up (to main
# Makefile) the fully-patched repos, and gather any fingerprinting info here.
-# Fuel@OPNFV relies on upstream git repos (one per component) in 1 of 2 ways:
+# OPNFV Fuel relies on upstream git repos (one per component) in 1 of 2 ways:
# - pinned down to tag objects (e.g. "9.0.1")
# - tracking upstream remote HEAD on a stable or master branch
# FIXME(alav): Should we support mixed cases? (e.g. pin down only fuel-main)
diff --git a/mcp/patches/README.rst b/mcp/patches/README.rst
index 735b70341..28af0e8f7 100644
--- a/mcp/patches/README.rst
+++ b/mcp/patches/README.rst
@@ -1,30 +1,30 @@
.. This work is licensed under a Creative Commons Attribution 4.0 International License.
.. SPDX-License-Identifier: CC-BY-4.0
-.. (c) 2017 Mirantis Inc., Enea AB and others.
+.. (c) 2018 Mirantis Inc., Enea AB and others.
==========================================
-Fuel@OPNFV submodule fetching and patching
+OPNFV Fuel Submodule Fetching and Patching
==========================================
This directory holds submodule fetching/patching scripts, intended for
-working with upstream Fuel/MCP components (e.g.: reclass-system-salt-model) in
-developing/applying OPNFV patches (backports, custom fixes etc.).
+working with upstream Fuel/MCP components (e.g.: ``reclass-system-salt-model``)
+in developing/applying OPNFV patches (backports, custom fixes etc.).
The scripts should be friendly to the following 2 use-cases:
- - development work: easily cloning, binding repos to specific commits,
- remote tracking, patch development etc.;
- - to provide parent build scripts an easy method of tracking upstream
- references and applying OPNFV patches on top;
+- development work: easily cloning, binding repos to specific commits,
+ remote tracking, patch development etc.;
+- to provide parent build scripts an easy method of tracking upstream
+ references and applying OPNFV patches on top;
Also, we need to support at least the following modes of operations:
- - submodule bind - each submodule patches will be based on the commit ID
- saved in the .gitmodules config file;
- - remote tracking - each submodule will sync with the upstream remote
- and patches will be applied on top of <sub_remote>/<sub_branch>/HEAD;
+- submodule bind - each submodule patches will be based on the commit ID
+ saved in the ``.gitmodules`` config file;
+- remote tracking - each submodule will sync with the upstream remote
+ and patches will be applied on top of ``<sub_remote>/<sub_branch>/HEAD``;
-Workflow (development)
+Workflow (Development)
======================
The standard development workflow should look as follows:
@@ -32,114 +32,116 @@ The standard development workflow should look as follows:
Decide whether remote tracking should be active or not
------------------------------------------------------
-NOTE: Setting the following var to any non-empty str enables remote track.
+.. NOTE::
-NOTE: Leaving unset will enable remote track for anything but stable branch.
+ Setting the following var to any non-empty str enables remote track.
- .. code-block:: bash
+.. code-block:: console
- $ export FUEL_TRACK_REMOTES=""
+ developer@machine:~/fuel$ export FUEL_TRACK_REMOTES=""
Initialize git submodules
-------------------------
-All Fuel sub-projects are registered as submodules.
+All Fuel direct dependency projects are registered as submodules.
If remote tracking is active, upstream remote is queried and latest remote
-branch HEAD is fetched. Otherwise, checkout commit IDs from .gitmodules.
+branch ``HEAD`` is fetched. Otherwise, checkout commit IDs from ``.gitmodules``.
- .. code-block:: bash
+.. code-block:: console
- $ make sub
+ developer@machine:~/fuel$ make -C mcp/patches sub
-Apply patches from `patches/<sub-project>/*` to respective submodules
----------------------------------------------------------------------
+Apply patches from ``patches/<sub-project>/*`` to respective submodules
+-----------------------------------------------------------------------
This will result in creation of:
-- a tag called `${FUEL_MAIN_TAG}-opnfv-root` at the same commit as Fuel@OPNFV
- upstream reference (bound to git submodule OR tracking remote HEAD);
-- a new branch `opnfv-fuel` which will hold all the OPNFV patches,
- each patch is applied on this new branch with `git-am`;
-- a tag called `${FUEL_MAIN_TAG}-opnfv` at `opnfv-fuel/HEAD`;
+- a tag called ``${F_OPNFV_TAG}-root`` at the same commit as OPNFV Fuel
+ upstream reference (bound to git submodule OR tracking remote ``HEAD``);
+- a new branch ``nightly`` which will hold all the OPNFV patches,
+ each patch is applied on this new branch with ``git-am``;
+- a tag called ``${F_OPNFV_TAG}`` at ``nightly/HEAD``;
+- for each (sub)directory of ``patches/<sub-project>``, another pair of tags
+ ``${F_OPNFV_TAG}-<sub-directory>-fuel/patch-root`` and
+ ``${F_OPNFV_TAG}-<sub-directory>-fuel/patch`` are also created;
- .. code-block:: bash
+.. code-block:: console
- $ make patches-import
+ developer@machine:~/fuel$ make -C mcp/patches patches-import
Modify sub-projects for whatever you need
-----------------------------------------
-Commit your changes when you want them taken into account in the build.
+To add/change OPNFV-specific patches for a sub-project:
-Re-create patches
+- commit your changes inside the git submodule(s);
+- move the git tag to the new reference so ``make patches-export`` will
+ pick up the new commit later;
+
+.. code-block:: console
+
+ developer@machine:~/fuel$ cd ./path/to/submodule
+ developer@machine:~/fuel/path/to/submodule$ # ...
+ developer@machine:~/fuel/path/to/submodule$ git commit
+ developer@machine:~/fuel/path/to/submodule$ git tag -f ${F_OPNFV_TAG}-fuel/patch
+
+Re-create Patches
-----------------
-Each commit on `opnfv-fuel` branch of each subproject will be
-exported to `patches/subproject/` via `git format-patch`.
+Each commit on ``nightly`` branch of each subproject will be
+exported to ``patches/subproject/`` via ``git format-patch``.
+
+.. NOTE::
+
+ Only commit submodule file changes when you need to bump upstream refs.
-NOTE: Only commit (-f) submodules when you need to bump upstream ref.
+.. WARNING::
-NOTE: DO NOT commit patched submodules!
+ DO NOT commit patched submodules!
- .. code-block:: bash
+.. code-block:: console
- $ make patches-export
+ developer@machine:~/fuel$ make -C mcp/patches patches-export patches-copyright
-Clean workbench branches and tags
+Clean Workbench Branches and Tags
---------------------------------
- .. code-block:: bash
+.. code-block:: console
- $ make clean
+ developer@machine:~/fuel$ make -C mcp/patches clean
-De-initialize submodules and force a clean clone
+De-initialize Submodules and Force a Clean Clone
------------------------------------------------
- .. code-block:: bash
+.. code-block:: console
- $ make deepclean
+ developer@machine:~/fuel$ make -C mcp/patches deepclean
-Sub-project maintenance
+Sub-project Maintenance
=======================
-Adding a new submodule
+Adding a New Submodule
----------------------
-If you need to add another subproject, you can do it with `git submodule`.
-Make sure that you specify branch (with `-b`), short name (with `--name`):
-
- .. code-block:: bash
-
- $ git submodule -b master add --name reclass-system-salt-model
- https://github.com/Mirantis/reclass-system-salt-model
- relative/path/to/submodule
-
-Working with remote tracking for upgrading Fuel components
-----------------------------------------------------------
-
-Enable remote tracking as described above, which at `make sub` will update
-ALL submodules (e.g. reclass-system-salt-model) to remote branch (set in
-.gitmodules) HEAD.
+If you need to add another subproject, you can do it with ``git submodule``.
+Make sure that you specify branch (with ``-b``), short name (with ``--name``):
-* If upstream has NOT already tagged a new version, we can still work on
- our patches, make sure they apply etc., then check for new upstream
- changes (and that our patches still apply on top of them) by:
+.. code-block:: console
-* If upstream has already tagged a new version we want to pick up, checkout
- the new tag in each submodule:
+ developer@machine:~/fuel$ git submodule -b master add --name reclass-system-salt-model \
+ https://github.com/Mirantis/reclass-system-salt-model \
+ mcp/reclass/classes/system
-* Once satisfied with the patch and submodule changes, commit them:
+Working with Remote Tracking
+----------------------------
- - enforce FUEL_TRACK_REMOTES to "yes" if you want to constatly use the
- latest remote branch HEAD (as soon as upstream pushes a change on that
- branch, our next build will automatically include it - risk of our
- patches colliding with new upstream changes);
- - stage patch changes if any;
- - if submodule tags have been updated (relevant when remote tracking is
- disabled, i.e. we have a stable upstream baseline), add submodules;
+Enable remote tracking as described above, which at ``make sub`` will update
+ALL submodules (e.g. ``reclass-system-salt-model``) to remote branch (set in
+``.gitmodules``) ``HEAD``.
- .. code-block:: bash
+.. WARNING::
- $ make deepclean patches-import
- $ git submodule foreach 'git checkout <newtag>'
- $ make deepclean sub && git add -f relative/path/to/submodule
+ Enforce ``FUEL_TRACK_REMOTES`` to ``yes`` only if you want to constatly
+ use the latest remote branch ``HEAD`` (as soon as upstream pushes a change
+ on that branch, our next build will automatically include it - risk of our
+ patches colliding with new upstream changes) - for **ALL** submodules.
diff --git a/mcp/patches/config.mk b/mcp/patches/config.mk
index 260cbf829..5e5d3b327 100644
--- a/mcp/patches/config.mk
+++ b/mcp/patches/config.mk
@@ -1,5 +1,5 @@
##############################################################################
-# Copyright (c) 2015,2016,2017 Ericsson AB, Enea AB and others.
+# Copyright (c) 2018 Ericsson AB, Enea AB and others.
# stefan.k.berg@ericsson.com
# jonas.bjurel@ericsson.com
# All rights reserved. This program and the accompanying materials
@@ -18,6 +18,5 @@ F_GIT_DIR := $(shell git rev-parse --git-dir)
F_PATCH_DIR := $(shell pwd)
F_OPNFV_TAG := master-opnfv
-# for the patches applying purposes (empty git config in docker build container)
export GIT_COMMITTER_NAME?=Fuel OPNFV
export GIT_COMMITTER_EMAIL?=fuel@opnfv.org
diff --git a/mcp/patches/docker/0001-tasks.py-Allow-passing-extra-build-arg.patch b/mcp/patches/docker/0001-tasks.py-Allow-passing-extra-build-arg.patch
new file mode 100644
index 000000000..9abbfb0b2
--- /dev/null
+++ b/mcp/patches/docker/0001-tasks.py-Allow-passing-extra-build-arg.patch
@@ -0,0 +1,59 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Sun, 9 Sep 2018 23:18:39 +0200
+Subject: [PATCH] tasks.py: Allow passing extra --build-arg
+
+E.g. A, B with values "a", respectively "b c":
+$ invoke build saltmaster-reclass --build-arg-extra='A="a" B="b c"'
+will be passed down as:
+--build-arg A="a" --build-arg B="b c"
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ tasks.py | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/tasks.py b/tasks.py
+index d6bf3bc..59e217d 100644
+--- a/tasks.py
++++ b/tasks.py
+@@ -3,6 +3,7 @@
+
+
+ from invoke import Collection, task
++from shlex import split
+ from string import Template
+ import re
+ import ast
+@@ -25,7 +26,7 @@ def all(ctx, dry=False, push=False, dry_targets=False, filter=None, **kwargs):
+ dry=dry, push=push, dry_targets=dry_targets, filter=filter, **kwargs)
+
+ @task
+-def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None, formula_rev=None, push=False, dry=False, dry_targets=False, **kwargs):
++def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None, formula_rev=None, push=False, dry=False, dry_targets=False, build_arg_extra='', **kwargs):
+
+ kwargs['dist'] = dist
+ kwargs['dist_rel'] = dist_rel
+@@ -35,6 +36,7 @@ def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None,
+ kwargs['require'] = require
+ kwargs['salt'] = salt
+ kwargs['target'] = target
++ kwargs['build_arg_extra'] = ' --build-arg '.join([''] + split(build_arg_extra.replace('"', '"\\"')))
+ # command formating + update
+ fmt = {'tag': ''}
+ fmt.update(ctx.dockermake)
+@@ -60,6 +62,7 @@ def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None,
+ \t--requires ${requires} \
+ \t--build-arg SALT_VERSION="${salt}" \
+ \t--build-arg SALT_FORMULA_VERSION="${formula_rev}" \
++ \t${build_arg_extra} \
+ \t${push} ${options} \
+ ${fin}""").safe_substitute(fmt)
+ ctx.run(cmd.replace(' ', ''))
diff --git a/mcp/patches/docker/0002-OPNFV-tag-convention-alignment.patch b/mcp/patches/docker/0002-OPNFV-tag-convention-alignment.patch
new file mode 100644
index 000000000..683dc4600
--- /dev/null
+++ b/mcp/patches/docker/0002-OPNFV-tag-convention-alignment.patch
@@ -0,0 +1,78 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Tue, 21 Aug 2018 02:42:35 +0200
+Subject: [PATCH] OPNFV tag convention alignment
+
+* (arch)- prefix Docker tags
+ Hacky implementation that only detects 'arm64', otherwise it will
+ default to 'amd64'.
+ Note that Docker arch notation uses 'arm64' instead of 'aarch64'.
+* Override Docker tag to align with OPNFV format
+* use Docker repository: opnfv/fuel
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ invoke.yml | 2 +-
+ tasks.py | 7 +++++--
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/invoke.yml b/invoke.yml
+index 0bd793e..291f4c7 100644
+--- a/invoke.yml
++++ b/invoke.yml
+@@ -65,7 +65,7 @@ target:
+
+ dockermake:
+ destination: 'images'
+- repository: 'docker.io/epcim/salt'
++ repository: 'docker.io/opnfv/fuel'
+ # options: '--no-cache'
+ # options: '--bust-cache reclass'
+ # options: '--registry-user epcim'
+diff --git a/tasks.py b/tasks.py
+index 59e217d..8afbde9 100644
+--- a/tasks.py
++++ b/tasks.py
+@@ -7,6 +7,7 @@ from shlex import split
+ from string import Template
+ import re
+ import ast
++import platform
+
+ # TODOs:
+ # - WIP - use namespace to expose images/targets as inoke tasks # image = Collection('image')
+@@ -26,8 +27,9 @@ def all(ctx, dry=False, push=False, dry_targets=False, filter=None, **kwargs):
+ dry=dry, push=push, dry_targets=dry_targets, filter=filter, **kwargs)
+
+ @task
+-def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None, formula_rev=None, push=False, dry=False, dry_targets=False, build_arg_extra='', **kwargs):
++def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None, formula_rev=None, opnfv_tag='latest', push=False, dry=False, dry_targets=False, build_arg_extra='', **kwargs):
+
++ kwargs['arch'] = 'arm64' if platform.machine() == 'aarch64' else 'amd64'
+ kwargs['dist'] = dist
+ kwargs['dist_rel'] = dist_rel
+ kwargs['dry'] = True if dry_targets or dry else False
+@@ -37,6 +39,7 @@ def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None,
+ kwargs['salt'] = salt
+ kwargs['target'] = target
+ kwargs['build_arg_extra'] = ' --build-arg '.join([''] + split(build_arg_extra.replace('"', '"\\"')))
++ kwargs['opnfv_tag'] = opnfv_tag
+ # command formating + update
+ fmt = {'tag': ''}
+ fmt.update(ctx.dockermake)
+@@ -58,7 +61,7 @@ def build(ctx, target, require=[], dist='debian', dist_rel='stretch', salt=None,
+ # execute
+ cmd = Template("""
+ ${dry}docker-make -f DockerMake.${dist}.yml -u ${repository}: --name ${target} \
+- \t-t ${dist}-${dist_rel}${tag} \
++ \t-t ${arch}-${opnfv_tag} \
+ \t--requires ${requires} \
+ \t--build-arg SALT_VERSION="${salt}" \
+ \t--build-arg SALT_FORMULA_VERSION="${formula_rev}" \
diff --git a/mcp/patches/docker/0003-OPNFV-package-installation-Ubuntu-user.patch b/mcp/patches/docker/0003-OPNFV-package-installation-Ubuntu-user.patch
new file mode 100644
index 000000000..27df86e8d
--- /dev/null
+++ b/mcp/patches/docker/0003-OPNFV-package-installation-Ubuntu-user.patch
@@ -0,0 +1,112 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Wed, 22 Aug 2018 01:35:06 +0200
+Subject: [PATCH] OPNFV package installation, Ubuntu user
+
+* Install OpenSSH server (and client), so other OPNFV projects can
+ easily connect to the Salt master node;
+* Install 'salt-formula-gnocchi' distro package (empty git formula
+ workaround);
+* While at it, create 'ubuntu' user so other OPNFV projects don't
+ have to switch to 'root' login;
+* Preinstall `salt_minion_dependency_packages` and
+ `salt_minion_reclass_dependencies`;
+* Pin dockermake to v0.8 to allow using python3.5 for virtualenv,
+ since python 3.6 is not easily available for Ubuntu Xenial
+ jump/build hosts.
+* Pin jinja2 to 2.11.0 to bypass [1].
+
+[1] https://github.com/saltstack/salt/issues/46594
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ DockerMake.yml | 39 ++++++++++++++++++++++++++++++++++++++-
+ Pipfile | 4 ++--
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+diff --git a/DockerMake.yml b/DockerMake.yml
+index 2c75586..f30024c 100644
+--- a/DockerMake.yml
++++ b/DockerMake.yml
+@@ -29,6 +29,7 @@ common-cleanup:
+
+ common:
+ build: |
++ ARG CACHE_INVALIDATE="0"
+ RUN echo "Layer with common packages" \
+ && eval ${LAYER_PKGUPDT} \
+ && apt-get upgrade -qy \
+@@ -94,7 +95,7 @@ salt-formulas:
+ build: |
+ ARG SALT_FORMULA_VERSION="master"
+ ENV SALT_FORMULA_VERSION $SALT_FORMULA_VERSION
+- ARG SALT_FORMULA_SOURCES="https://github.com/salt-formulas https://github.com/saltstack-formulas"
++ ARG SALT_FORMULA_SOURCES="https://github.com/salt-formulas"
+ ENV SALT_FORMULA_SOURCES $SALT_FORMULA_SOURCES
+ ARG SALT_FORMULAS_BASE="/srv/salt/formula"
+ ENV SALT_FORMULAS_BASE $SALT_FORMULAS_BASE
+@@ -108,6 +109,42 @@ salt-formulas:
+ && bash -c 'source /srv/salt/formula-fetch.sh && setupPyEnv && fetchAll' \
+ && eval ${LAYER_CLEANUP}
+
++opnfv:
++ build: |
++ # Some formulas have empty git repos, use the package version
++ ARG SALT_FORMULA_VERSION="nightly"
++ ENV SALT_FORMULA_VERSION $SALT_FORMULA_VERSION
++ ARG SALT_FORMULA_LIST="salt-formula-gnocchi"
++ ENV SALT_FORMULA_LIST $SALT_FORMULA_LIST
++ ENV APT_REPOSITORY "deb [arch=amd64] http://apt.mirantis.com/xenial ${SALT_FORMULA_VERSION} salt"
++ ENV APT_REPOSITORY_GPG "http://apt.mirantis.com/public.gpg"
++ RUN echo "Layer extra salt-formulas packages" \
++ && echo "$APT_REPOSITORY" | tee /etc/apt/sources.list.d/salt-formulas.list >/dev/null \
++ && curl -sL $APT_REPOSITORY_GPG | $SUDO apt-key add - \
++ && eval ${LAYER_PKGUPDT} \
++ && ${LAYER_INSTALL} ${SALT_FORMULA_LIST} -y --fix-missing -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" \
++ && eval ${LAYER_CLEANUP}
++ RUN echo "Layer with OPNFV packages" \
++ && eval ${LAYER_PKGUPDT} \
++ && ${LAYER_INSTALL} \
++ gawk \
++ inetutils-ping \
++ kmod \
++ net-tools \
++ openssh-server \
++ python-futures \
++ python-m2crypto \
++ python-msgpack \
++ python-netaddr \
++ python-oauth \
++ python-psutil \
++ python-yaml \
++ && useradd -m ubuntu \
++ && echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > /etc/sudoers.d/ubuntu \
++ && python -m pip install -U jinja2==2.11.0 \
++ && eval ${LAYER_CLEANUP}
++
++
+ wheel:
+ requires:
+ - base
+diff --git a/Pipfile b/Pipfile
+index d3e8d66..340e125 100644
+--- a/Pipfile
++++ b/Pipfile
+@@ -7,8 +7,8 @@ name = "pypi"
+
+ [packages]
+ pygithub = "*"
+-docker-make = {git = "https://github.com/avirshup/DockerMake"}
+-dockermake = {git = "https://github.com/avirshup/DockerMake"}
++docker-make = {git = "https://github.com/avirshup/DockerMake", ref = "aeac230fd5ab468d806bf42b120aa97f91eb40a2"}
++dockermake = {git = "https://github.com/avirshup/DockerMake", ref = "aeac230fd5ab468d806bf42b120aa97f91eb40a2"}
+
+ [requires]
+ python_version = "3.6"
diff --git a/mcp/patches/docker/0004-reclass-Set-ignore_overwritten_missing_references.patch b/mcp/patches/docker/0004-reclass-Set-ignore_overwritten_missing_references.patch
new file mode 100644
index 000000000..0af1e6967
--- /dev/null
+++ b/mcp/patches/docker/0004-reclass-Set-ignore_overwritten_missing_references.patch
@@ -0,0 +1,34 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Tue, 20 Nov 2018 23:16:44 +0200
+Subject: [PATCH] reclass: Set ignore_overwritten_missing_references
+
+The recent changes in `reclass` 1.6.x allow configuring this new
+option; however the default for it is broken - see [1].
+
+[1] https://github.com/salt-formulas/reclass/issues/77
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ files/reclass/reclass-config.yml | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/files/reclass/reclass-config.yml b/files/reclass/reclass-config.yml
+index 00aba49..f4da04c 100644
+--- a/files/reclass/reclass-config.yml
++++ b/files/reclass/reclass-config.yml
+@@ -3,6 +3,7 @@ inventory_base_uri: /srv/salt/reclass
+ pretty_print: True
+ output: yaml
+
++ignore_overwritten_missing_references: True
+ ignore_class_notfound: True
+ ignore_class_regexp:
+ - 'service.*'
diff --git a/mcp/patches/docker/0005-Add-saltminion-maas-build-target.patch b/mcp/patches/docker/0005-Add-saltminion-maas-build-target.patch
new file mode 100644
index 000000000..889fdcd43
--- /dev/null
+++ b/mcp/patches/docker/0005-Add-saltminion-maas-build-target.patch
@@ -0,0 +1,108 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Wed, 30 Jan 2019 17:21:03 +0100
+Subject: [PATCH] Add saltminion-maas build target
+
+---
+ DockerMake.yml | 64 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ invoke.yml | 5 ++++
+ 2 files changed, 69 insertions(+)
+
+diff --git a/DockerMake.yml b/DockerMake.yml
+index f30024c..a23541c 100644
+--- a/DockerMake.yml
++++ b/DockerMake.yml
+@@ -50,6 +50,70 @@ common:
+ && eval ${LAYER_CLEANUP}
+
+
++maas:
++ requires:
++ - python
++ build: |
++ ENV container docker
++ ARG SALT_VERSION="stable"
++ ENV SALT_VERSION $SALT_VERSION
++ ARG SALT_BOOTSTRAP_OPTS="-PdX ${SALT_VERSION}"
++ ENV SALT_BOOTSTRAP_OPTS $SALT_BOOTSTRAP_OPTS
++ RUN echo "Layer with MaaS, salt-minion packages" \
++ && eval ${LAYER_PKGUPDT} \
++ && apt-get upgrade -qy \
++ && ${LAYER_INSTALL} maas-common systemd \
++ && find /etc/systemd/system \
++ /lib/systemd/system \
++ -path '*.wants/*' \
++ -not -name '*journald*' \
++ -not -name '*systemd-tmpfiles*' \
++ -not -name '*systemd-user-sessions*' \
++ -exec rm \{} \; \
++ && systemctl set-default multi-user.target \
++ && ${LAYER_INSTALL} \
++ at \
++ avahi-utils \
++ dbconfig-pgsql \
++ ipmitool \
++ iptables \
++ iptables-persistent \
++ iputils-ping \
++ kmod \
++ lsb-release \
++ maas-cli \
++ maas-dns \
++ maas-rack-controller \
++ maas-region-api \
++ postgresql \
++ python-futures \
++ python-m2crypto \
++ python-msgpack \
++ python-netaddr \
++ python-oauth \
++ python-pip \
++ python-psutil \
++ python-setuptools \
++ python-yaml \
++ sysfsutils \
++ tcpdump \
++ && apt-get download maas-region-controller \
++ && dpkg-deb --extract maas-region-controller*.deb maas-region-controller \
++ && dpkg-deb --control maas-region-controller*.deb maas-region-controller/DEBIAN \
++ && mkdir -p /var/lib/opnfv/etc \
++ && mv maas-region-controller/DEBIAN/postinst /var/lib/opnfv/maas-region-controller.postinst \
++ && dpkg-deb --build maas-region-controller \
++ && dpkg -i maas-region-controller.deb \
++ && rm -rf maas* \
++ && mv /var/lib/maas /var/lib/postgresql /var/lib/opnfv/ \
++ && mv /etc/maas /etc/ssh /var/lib/opnfv/etc/ \
++ && mv /var/lib/opnfv/maas-region-controller.postinst /var/lib/dpkg/info/ \
++ && python -m pip install -U jinja2==2.11.0 \
++ && curl -qL https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh | $SUDO sh -s -- -A cfg01 ${SALT_BOOTSTRAP_OPTS} \
++ && eval ${LAYER_CLEANUP}
++ ENTRYPOINT ["/bin/bash", "-c", "/entrypoint.sh && exec /lib/systemd/systemd"]
++
++
+ salt:
+ requires:
+ - python
+diff --git a/invoke.yml b/invoke.yml
+index 291f4c7..a2f4614 100644
+--- a/invoke.yml
++++ b/invoke.yml
+@@ -62,6 +62,11 @@ target:
+ # - salt
+ # require:
+ # - saltclass
++ saltminion-maas:
++ matrix:
++ - dist
++ require:
++ - maas
+
+ dockermake:
+ destination: 'images'
diff --git a/mcp/patches/docker/0006-Use-archive.repo.saltstack.com-repos.patch b/mcp/patches/docker/0006-Use-archive.repo.saltstack.com-repos.patch
new file mode 100644
index 000000000..1889e3a9b
--- /dev/null
+++ b/mcp/patches/docker/0006-Use-archive.repo.saltstack.com-repos.patch
@@ -0,0 +1,42 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 18 Jun 2020 16:33:35 +0200
+Subject: [PATCH] Use archive.repo.saltstack.com repos
+
+Saltstack packages for 2017.7 have been archives, use the appropiate
+repository URL.
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ DockerMake.yml | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/DockerMake.yml b/DockerMake.yml
+index a23541c..5910613 100644
+--- a/DockerMake.yml
++++ b/DockerMake.yml
+@@ -109,7 +109,7 @@ maas:
+ && mv /etc/maas /etc/ssh /var/lib/opnfv/etc/ \
+ && mv /var/lib/opnfv/maas-region-controller.postinst /var/lib/dpkg/info/ \
+ && python -m pip install -U jinja2==2.11.0 \
+- && curl -qL https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh | $SUDO sh -s -- -A cfg01 ${SALT_BOOTSTRAP_OPTS} \
++ && curl -qL https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh | $SUDO sh -s -- -A cfg01 -R archive.repo.saltstack.com ${SALT_BOOTSTRAP_OPTS} \
+ && eval ${LAYER_CLEANUP}
+ ENTRYPOINT ["/bin/bash", "-c", "/entrypoint.sh && exec /lib/systemd/systemd"]
+
+@@ -127,7 +127,7 @@ salt:
+ RUN echo "Layer salt" \
+ && eval ${LAYER_PKGUPDT} \
+ && mkdir -p /var/run/salt /var/cache/salt /var/log/salt /etc/salt/pki/master/minions /srv/salt/formula /srv/salt/env \
+- && curl -qL https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh | $SUDO sh -s -- ${SALT_BOOTSTRAP_PKGS} -M -A localhost ${SALT_BOOTSTRAP_OPTS} \
++ && curl -qL https://raw.githubusercontent.com/saltstack/salt-bootstrap/stable/bootstrap-salt.sh | $SUDO sh -s -- ${SALT_BOOTSTRAP_PKGS} -R archive.repo.saltstack.com -M -A localhost ${SALT_BOOTSTRAP_OPTS} \
+ && if ! getent passwd salt > /dev/null;then useradd --system salt; fi \
+ && chown -R salt:salt /etc/salt /var/cache/salt /var/log/salt /var/run/salt \
+ && eval ${LAYER_CLEANUP}
diff --git a/mcp/patches/fuel-patch-copyright.template b/mcp/patches/fuel-patch-copyright.template
index e92458082..d50bdbe8a 100644
--- a/mcp/patches/fuel-patch-copyright.template
+++ b/mcp/patches/fuel-patch-copyright.template
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
diff --git a/mcp/patches/patch.sh b/mcp/patches/patch.sh
deleted file mode 100755
index bb48dcd07..000000000
--- a/mcp/patches/patch.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash -e
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-
-CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-
-if [ -r "$1" ]; then
- while IFS=': ' read -r p_dest p_file; do
- if ! patch --dry-run -Rd "${p_dest}" -r - -s -p1 < \
- "/root/fuel/mcp/patches/${p_file}" > /dev/null; then
- patch -d "${p_dest}" -p1 < "/root/fuel/mcp/patches/${p_file}"
- fi
- done < <(grep -vE '^#' "${1}" | grep -E "^.*${2}.*: ")
-fi
diff --git a/mcp/patches/reclass-system-salt-model/0001-Use-keystone-v3-endpoints-by-default.patch b/mcp/patches/reclass-system-salt-model/0001-Use-keystone-v3-endpoints-by-default.patch
index 514ea59e4..f003c2031 100644
--- a/mcp/patches/reclass-system-salt-model/0001-Use-keystone-v3-endpoints-by-default.patch
+++ b/mcp/patches/reclass-system-salt-model/0001-Use-keystone-v3-endpoints-by-default.patch
@@ -20,10 +20,10 @@ Change-Id: I7e9a1b180f4e0ddb24ec72ed9f08c9e2580c7897
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/keystone/client/single.yml b/keystone/client/single.yml
-index a79ed7ec..86b4e09e 100644
+index b8ab7f1b..98cf359c 100644
--- a/keystone/client/single.yml
+++ b/keystone/client/single.yml
-@@ -4,7 +4,7 @@ classes:
+@@ -5,7 +5,7 @@ classes:
- system.keystone.client.service.glance
- system.keystone.client.service.heat
- system.keystone.client.service.heat-cfn
diff --git a/mcp/patches/reclass-system-salt-model/0003-system.repo-Pin-glusterfs-with-higher-prio.patch b/mcp/patches/reclass-system-salt-model/0003-system.repo-Pin-glusterfs-with-higher-prio.patch
deleted file mode 100644
index 91219f6fa..000000000
--- a/mcp/patches/reclass-system-salt-model/0003-system.repo-Pin-glusterfs-with-higher-prio.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-:
-: All rights reserved. This program and the accompanying materials
-: are made available under the terms of the Apache License, Version 2.0
-: which accompanies this distribution, and is available at
-: http://www.apache.org/licenses/LICENSE-2.0
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
-Date: Mon, 18 Jun 2018 15:53:31 +0200
-Subject: [PATCH] system.repo: Pin glusterfs with higher prio
-
-When both glusterfs PPA repo and MCP repos are used on the same node,
-the MCP repos used to take priority and install a predefined version
-no matter the configuration for GlusterFS PPA.
-
-Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
----
- linux/system/repo/glusterfs.yml | 4 ++++
- 1 file changed, 4 insertions(+)
-
-diff --git a/linux/system/repo/glusterfs.yml b/linux/system/repo/glusterfs.yml
-index fb331f0f..71d063ac 100644
---- a/linux/system/repo/glusterfs.yml
-+++ b/linux/system/repo/glusterfs.yml
-@@ -9,3 +9,7 @@ parameters:
- architectures: amd64
- key_id: 3FE869A9
- key_server: keyserver.ubuntu.com
-+ pin:
-+ - package: '*'
-+ pin: release o=LP-PPA-gluster-glusterfs-${_param:glusterfs_version}
-+ priority: 1100
diff --git a/mcp/patches/salt-formula-aodh/0001-Extend-apache-service-state.patch b/mcp/patches/salt-formula-aodh/0001-Extend-apache-service-state.patch
new file mode 100644
index 000000000..9bf85efa0
--- /dev/null
+++ b/mcp/patches/salt-formula-aodh/0001-Extend-apache-service-state.patch
@@ -0,0 +1,47 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Michael Polenchuk <mpolenchuk@mirantis.com>
+Date: Wed, 10 Jul 2019 10:18:58 +0400
+Subject: [PATCH] Extend apache service state
+
+diff --git a/aodh/server.sls b/aodh/server.sls
+index 04b0395..258e5fe 100644
+--- a/aodh/server.sls
++++ b/aodh/server.sls
+@@ -183,20 +183,16 @@ aodh_api_config:
+
+ {%- endif %}
+
+-aodh_apache_restart:
+- service.running:
+- - enable: true
+- - name: apache2
+- {%- if grains.get('noservices') %}
+- - onlyif: /bin/false
+- {%- endif %}
+- - watch:
+- - file: /etc/aodh/aodh.conf
+- {%- if pillar.get('apache', {}).get('server', {}).get('site', {}).aodh is defined %}
+- - apache_enable_aodh_wsgi
+- {%- else %}
+- - file: aodh_api_apache_config
+- {%- endif %}
++extend:
++ apache_service:
++ service.running:
++ - watch:
++ - file: /etc/aodh/aodh.conf
++ {%- if pillar.get('apache', {}).get('server', {}).get('site', {}).aodh is defined %}
++ - apache_enable_aodh_wsgi
++ {%- else %}
++ - file: aodh_api_apache_config
++ {%- endif %}
+
+ {%- endif %}
+
diff --git a/mcp/patches/salt-formula-cinder/0001-Support-stein-version.patch b/mcp/patches/salt-formula-cinder/0001-Support-stein-version.patch
new file mode 100644
index 000000000..bae93ce6f
--- /dev/null
+++ b/mcp/patches/salt-formula-cinder/0001-Support-stein-version.patch
@@ -0,0 +1,25 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Michael Polenchuk <mpolenchuk@mirantis.com>
+Date: Tue, 9 Jul 2019 14:23:14 +0400
+Subject: [PATCH] Support stein version
+
+diff --git a/cinder/controller.sls b/cinder/controller.sls
+index 31cb53c..f58e261 100644
+--- a/cinder/controller.sls
++++ b/cinder/controller.sls
+@@ -72,7 +72,7 @@ cinder_controller_packages:
+ {%- endif %}
+
+ {# Starting from ocata api running undder apache, so dedicated loggong.conf is not needed #}
+-{%- if controller.version not in ('ocata','pike','queens', 'rocky') %}
++{%- if controller.version not in ('ocata','pike','queens', 'rocky', 'stein') %}
+ {%- do cinder_log_services.append('cinder-api') %}
+ {%- endif %}
+
diff --git a/mcp/patches/salt-formula-horizon/0001-Support-stein-version.patch b/mcp/patches/salt-formula-horizon/0001-Support-stein-version.patch
new file mode 100644
index 000000000..3ae93638d
--- /dev/null
+++ b/mcp/patches/salt-formula-horizon/0001-Support-stein-version.patch
@@ -0,0 +1,35 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Tue, 9 Jul 2019 14:54:50 +0400
+Subject: [PATCH] Support stein version
+
+---
+ horizon/files/local_settings/stein_settings.py | 1 +
+ horizon/files/policy/stein | 1 +
+ 2 files changed, 2 insertions(+)
+ create mode 120000 horizon/files/local_settings/stein_settings.py
+ create mode 120000 horizon/files/policy/stein
+
+diff --git a/horizon/files/local_settings/stein_settings.py b/horizon/files/local_settings/stein_settings.py
+new file mode 120000
+index 0000000..183e9a8
+--- /dev/null
++++ b/horizon/files/local_settings/stein_settings.py
+@@ -0,0 +1 @@
++rocky_settings.py
+\ No newline at end of file
+diff --git a/horizon/files/policy/stein b/horizon/files/policy/stein
+new file mode 120000
+index 0000000..c8dbb8c
+--- /dev/null
++++ b/horizon/files/policy/stein
+@@ -0,0 +1 @@
++rocky
+\ No newline at end of file
diff --git a/mcp/patches/salt-formula-horizon/0002-Align-packages-with-Stein-reqs.patch b/mcp/patches/salt-formula-horizon/0002-Align-packages-with-Stein-reqs.patch
new file mode 100644
index 000000000..90a0057cb
--- /dev/null
+++ b/mcp/patches/salt-formula-horizon/0002-Align-packages-with-Stein-reqs.patch
@@ -0,0 +1,30 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Mon, 30 Dec 2019 16:15:04 +0100
+Subject: [PATCH] Align packages with Stein reqs
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ horizon/map.jinja | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/horizon/map.jinja b/horizon/map.jinja
+index 3779d69..0cebe0c 100644
+--- a/horizon/map.jinja
++++ b/horizon/map.jinja
+@@ -6,7 +6,7 @@
+
+ {%- if not salt['pillar.get']('horizon:server:cache:backend') %}
+ {%- if salt['pillar.get']('horizon:server:version') not in ['mitaka','newton'] %}
+- {%- set cache_pkgs = ['python-pylibmc'] %}
++ {%- set cache_pkgs = ['python3-pylibmc'] %}
+ {%- set default_cache_backend = 'django.core.cache.backends.memcached.PyLibMCCache' %}
+ {%- else %}
+ {% if grains['os_family'] == 'RedHat' %}
diff --git a/mcp/patches/0008-Handle-extra-environment-variables.patch b/mcp/patches/salt-formula-keystone/0001-Handle-extra-environment-variables.patch
index 8df3227f2..2108587b1 100644
--- a/mcp/patches/0008-Handle-extra-environment-variables.patch
+++ b/mcp/patches/salt-formula-keystone/0001-Handle-extra-environment-variables.patch
@@ -11,6 +11,9 @@ Date: Mon, 12 Mar 2018 17:43:09 +0400
Subject: [PATCH] Handle extra environment variables
Change-Id: Ieae46ac65041630759c82238a8a5ce0535c454b2
+---
+ keystone/files/keystonercv3 | 3 +++
+ 1 file changed, 3 insertions(+)
diff --git a/keystone/files/keystonercv3 b/keystone/files/keystonercv3
index 1b7f378..984c8a2 100644
diff --git a/mcp/patches/0015-Set-ovs-bridges-as-L3-interfaces.patch b/mcp/patches/salt-formula-linux/0001-Set-ovs-bridges-as-L3-interfaces.patch
index a7b366b94..9479a990b 100644
--- a/mcp/patches/0015-Set-ovs-bridges-as-L3-interfaces.patch
+++ b/mcp/patches/salt-formula-linux/0001-Set-ovs-bridges-as-L3-interfaces.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
@@ -10,21 +10,29 @@ From: Michael Polenchuk <mpolenchuk@mirantis.com>
Date: Wed, 28 Feb 2018 17:54:28 +0400
Subject: [PATCH] Set ovs bridges as L3 interfaces
-Change-Id: I1e83129cc184cf481bea21d7aa452bf60d9e0499
+---
+ linux/files/ovs_bridge | 22 ++++++++++++++++++++++
+ linux/files/ovs_port | 8 +++++++-
+ linux/network/interface.sls | 34 +++++++++++++++++++++++++++++++++-
+ 3 files changed, 62 insertions(+), 2 deletions(-)
+ create mode 100644 linux/files/ovs_bridge
diff --git a/linux/files/ovs_bridge b/linux/files/ovs_bridge
new file mode 100644
-index 0000000..c609e45
+index 0000000..4718b91
--- /dev/null
+++ b/linux/files/ovs_bridge
-@@ -0,0 +1,19 @@
-+auto {{ bridge_name }}
+@@ -0,0 +1,22 @@
++# With systemd, adding OVS bridges as 'auto' can cause race conditions
++# https://github.com/openvswitch/ovs/blob/master/debian/openvswitch-switch.README.Debian
++# auto {{ bridge_name }}
+allow-ovs {{ bridge_name }}
-+iface {{ bridge_name }} inet static
++iface {{ bridge_name }} inet {{ bridge.get('proto', 'static' if bridge.address is defined else 'manual') }}
+ ovs_type OVSBridge
++ {%- if bridge.address is defined %}
+ address {{ bridge.address }}
+ netmask {{ bridge.netmask }}
-+ mtu {{ bridge.get('mtu', '1500') }}
++ {%- endif %}
+ {%- if bridge.use_interfaces is defined %}
+ ovs_ports {{ bridge.use_interfaces|join(' ') }}
+ {%- endif %}
@@ -38,14 +46,15 @@ index 0000000..c609e45
+ dns-nameservers {{ bridge.name_servers | join(' ') }}
+ {%- endif %}
diff --git a/linux/files/ovs_port b/linux/files/ovs_port
-index 222ca8e..efb0307 100644
+index 222ca8e..41821b7 100644
--- a/linux/files/ovs_port
+++ b/linux/files/ovs_port
-@@ -1,6 +1,11 @@
+@@ -1,6 +1,12 @@
+-auto {{ port_name }}
+# With systemd, adding OVS bridges as 'auto' can cause race conditions
+# https://github.com/openvswitch/ovs/blob/master/debian/openvswitch-switch.README.Debian
++# OVS ports will be automatically ifup-ed when ifup-ing the OVS bridge
+# auto {{ port_name }}
--auto {{ port_name }}
allow-{{ port.bridge }} {{ port_name }}
iface {{ port_name }} inet {{ port.get('proto', 'manual') }}
+{%- if '.' in port_name %}
@@ -55,14 +64,13 @@ index 222ca8e..efb0307 100644
mtu {{ port.get('mtu', '1500') }}
ovs_bridge {{ port.bridge }}
diff --git a/linux/network/interface.sls b/linux/network/interface.sls
-index 180f912..dcb295b 100644
+index a39fc37..8bce092 100644
--- a/linux/network/interface.sls
+++ b/linux/network/interface.sls
-@@ -91,6 +91,34 @@ add_int_{{ int_name }}_to_ovs_dpdk_bridge_{{ interface_name }}:
+@@ -92,6 +92,35 @@ add_int_{{ int_name }}_to_ovs_dpdk_bridge_{{ interface_name }}:
ovs_bridge_{{ interface_name }}:
openvswitch_bridge.present:
- name: {{ interface_name }}
-+{%- if interface.get('proto', 'manual') == 'static' %}
+ file.managed:
+ - name: /etc/network/interfaces.u/ifcfg-{{ interface_name }}
+ - makedirs: True
@@ -82,14 +90,35 @@ index 180f912..dcb295b 100644
+
+ovs_bridge_up_{{ interface_name }}:
+ cmd.run:
-+ - name: ifup {{ interface_name }}
++ - name: ifup --ignore-errors {{ interface_name }}
+ - require:
+ - file: ovs_bridge_{{ interface_name }}
+ - openvswitch_bridge: ovs_bridge_{{ interface_name }}
+ - file: linux_interfaces_final_include
-+ - unless:
-+ - ip link show {{ interface_name }} | grep -q '\<UP\>'
-+{%- endif %}
++ {%- if network.noifupdown|d(false) or interface.noifupdown|d(false) %}
++ - onlyif: /bin/false
++ {%- else %}
++ - unless: grep -qFx up /sys/class/net/{{ interface_name }}/operstate
++ {%- endif %}
{# add linux network interface into OVS bridge #}
{%- for int_name, int in network.interface.items() %}
+@@ -102,7 +131,7 @@ ovs_bridge_{{ interface_name }}:
+
+ add_int_{{ int_name }}_to_ovs_bridge_{{ interface_name }}:
+ cmd.run:
+- - unless: ovs-vsctl show | grep {{ int_name }}
++ - unless: ovs-vsctl list-ports {{ interface_name }} | grep -qFx {{ int_name }}
+ - name: ovs-vsctl{%- if network.ovs_nowait %} --no-wait{%- endif %} add-port {{ interface_name }} {{ int_name }}
+
+ {%- endif %}
+@@ -176,6 +205,9 @@ ovs_port_{{ interface_name }}_line2:
+ ovs_port_up_{{ interface_name }}:
+ cmd.run:
+ - name: ifup {{ interface_name }}
++ {%- if network.noifupdown|d(false) or interface.noifupdown|d(false) %}
++ - onlyif: /bin/false
++ {%- endif %}
+ - require:
+ - file: ovs_port_{{ interface_name }}
+ - file: ovs_port_{{ interface_name }}_line1
diff --git a/mcp/patches/salt-formula-linux/0002-network-Bring-in-basic-VPP-support.patch b/mcp/patches/salt-formula-linux/0002-network-Bring-in-basic-VPP-support.patch
new file mode 100644
index 000000000..5166be56a
--- /dev/null
+++ b/mcp/patches/salt-formula-linux/0002-network-Bring-in-basic-VPP-support.patch
@@ -0,0 +1,139 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Mon, 10 Dec 2018 16:16:26 +0100
+Subject: [PATCH] network: Bring in basic VPP support
+
+For now, we only care about DPDK-backed VPP ports.
+
+TODO:
+- README.rst: VPP port usage
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ linux/files/vpp_commands.txt | 4 +++
+ linux/files/vpp_startup.conf | 47 ++++++++++++++++++++++++++++++++++++
+ linux/network/dpdk.sls | 38 +++++++++++++++++++++++++++++
+ 3 files changed, 89 insertions(+)
+ create mode 100644 linux/files/vpp_commands.txt
+ create mode 100644 linux/files/vpp_startup.conf
+
+diff --git a/linux/files/vpp_commands.txt b/linux/files/vpp_commands.txt
+new file mode 100644
+index 0000000..2ce4726
+--- /dev/null
++++ b/linux/files/vpp_commands.txt
+@@ -0,0 +1,4 @@
++{%- from "linux/map.jinja" import network with context %}
++{%- if network.vpp.commands is defined %}
++{{ network.vpp.commands }}
++{%- endif %}
+diff --git a/linux/files/vpp_startup.conf b/linux/files/vpp_startup.conf
+new file mode 100644
+index 0000000..fcfc1fd
+--- /dev/null
++++ b/linux/files/vpp_startup.conf
+@@ -0,0 +1,47 @@
++{%- from "linux/map.jinja" import network with context %}
++unix {
++ cli-listen /run/vpp/cli.sock
++ log /var/log/vpp.log
++ full-coredump
++ nodaemon
++ startup-config /etc/vpp/commands.txt
++{%- if network.vpp.gid is defined %}
++ gid {{ network.vpp.gid }}
++{%- endif %}
++}
++api-trace {
++ on
++}
++{%- if network.vpp.gid is defined %}
++api-segment {
++ gid {{ network.vpp.gid }}
++}
++{%- endif %}
++cpu {
++{%- if network.vpp.main_core is defined %}
++ main-core {{ network.vpp.main_core }}
++{%- endif %}
++{%- if network.vpp.corelist_workers is defined %}
++ corelist-workers {{ network.vpp.corelist_workers }}
++{%- endif %}
++{%- if network.vpp.skip_core is defined %}
++ skip-core {{ network.vpp.skip_core }}
++{%- endif %}
++{%- if network.vpp.workers is defined %}
++ workers {{ network.vpp.workers }}
++{%- endif %}
++}
++dpdk {
++{%- if network.vpp.decimal_interface_names is defined %}
++ decimal-interface-names
++{%- endif %}
++{%- if network.vpp.dpdk_socket_mem is defined %}
++ socket-mem {{ network.vpp.dpdk_socket_mem }}
++{%- endif %}
++ ## Whitelist specific interface by specifying PCI address
++{%- for interface_name, interface in network.interface.items() %}
++{%- if 'dpdk_vpp_port' in interface.type and interface.pci is defined %}
++ dev {{ interface.pci }}
++{%- endif %}
++{%- endfor %}
++}
+diff --git a/linux/network/dpdk.sls b/linux/network/dpdk.sls
+index 786f7c8..09453c6 100644
+--- a/linux/network/dpdk.sls
++++ b/linux/network/dpdk.sls
+@@ -32,6 +32,45 @@ linux_network_dpdk_service:
+ - watch:
+ - file: /etc/dpdk/interfaces
+
++{%- if network.vpp is defined %}
++
++vpp_pkgs:
++ pkg.installed:
++ - pkgs:
++ - vpp
++ - vpp-plugin-core
++ - vpp-plugin-dpdk
++ - bridge-utils
++
++/etc/vpp/commands.txt:
++ file.managed:
++ - source: salt://linux/files/vpp_commands.txt
++ - template: jinja
++ - require:
++ - pkg: vpp_pkgs
++
++/etc/vpp/startup.conf:
++ file.managed:
++ - source: salt://linux/files/vpp_startup.conf
++ - template: jinja
++ - require:
++ - pkg: vpp_pkgs
++
++/etc/sysctl.d/80-vpp.conf:
++ file.managed:
++ - contents:
++ - '# Disabled by salt-formula-linux'
++
++linux_network_vpp_service:
++ service.running:
++ - enable: true
++ - name: vpp
++ - watch:
++ - file: /etc/vpp/startup.conf
++ - file: /etc/vpp/commands.txt
++
++{%- endif %}
++
+ {%- if network.openvswitch is defined %}
+
+ openvswitch_dpdk_pkgs:
diff --git a/mcp/patches/salt-formula-linux/0003-OVS-Fix-Debian-service-deps-OVS-bridge-ifup.patch b/mcp/patches/salt-formula-linux/0003-OVS-Fix-Debian-service-deps-OVS-bridge-ifup.patch
new file mode 100644
index 000000000..70b699a76
--- /dev/null
+++ b/mcp/patches/salt-formula-linux/0003-OVS-Fix-Debian-service-deps-OVS-bridge-ifup.patch
@@ -0,0 +1,97 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Fri, 25 Jan 2019 21:20:04 +0100
+Subject: [PATCH] OVS: Fix Debian service deps, OVS bridge ifup
+
+Fix OVS vs Linux bridge race condition:
+- OVS services should start before networking service;
+- OVS services should start after DPDK service (if present);
+- networking service should ifup OVS bridges (and automatically their
+ OVS ports if present) after Linux interfaces/bridges;
+- br-prv should be handled by OVS to avoid another race condition,
+ so use 'allow-ovs br-prv' instead of 'auto';
+
+NOTE:
+- OVS ports/bridges should NOT be configured as auto for this to work;
+- OVS services correspond to OVS 2.9 or newer, since before that
+ ovsdb-server was called openvswitch-nonetwork.
+- we also need to take care of one particularly ugly circular dep:
+ ovs-vswitchd --> ovsdb-server -(default dep)-> sysinit.target -->
+ cloud-init.service --> networking.service --> ovs-vswitchd
+ We'll just set 'DefaultDependencies=no' for ovs services, although
+ this might require explicitly adding back some of the indirect
+ dependencies of sysinit.target.
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ linux/network/dpdk.sls | 2 +-
+ linux/network/interface.sls | 36 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 37 insertions(+), 1 deletion(-)
+
+diff --git a/linux/network/dpdk.sls b/linux/network/dpdk.sls
+index 09453c6..e866909 100644
+--- a/linux/network/dpdk.sls
++++ b/linux/network/dpdk.sls
+@@ -199,7 +199,7 @@ linux_network_dpdk_bridge_interface_{{ interface_name }}:
+ /etc/network/interfaces.u/ifcfg-{{ interface_name }}:
+ file.managed:
+ - contents: |
+- auto {{ interface_name }}
++ allow-ovs {{ interface_name }}
+ iface {{ interface_name }} inet static
+ address {{ interface.address }}
+ netmask {{ interface.netmask }}
+diff --git a/linux/network/interface.sls b/linux/network/interface.sls
+index 8bce092..11db5be 100644
+--- a/linux/network/interface.sls
++++ b/linux/network/interface.sls
+@@ -24,6 +24,42 @@ linux_network_bridge_pkgs:
+ - pkgs: {{ network.bridge_pkgs }}
+ {%- endif %}
+
++{%- if network.bridge == 'openvswitch' and grains.os_family == 'Debian' %}
++
++{# create drop-in dpdk, networking dependency for ovs services #}
++/etc/systemd/system/ovsdb-server.service.d/override.conf:
++ file.managed:
++ - makedirs: true
++ - require:
++ - pkg: linux_network_bridge_pkgs
++ - contents: |
++ [Unit]
++ After=dpdk.service
++ Before=networking.service
++ DefaultDependencies=no
++
++/etc/systemd/system/ovs-vswitchd.service.d/override.conf:
++ file.managed:
++ - makedirs: true
++ - require:
++ - pkg: linux_network_bridge_pkgs
++ - contents: |
++ [Unit]
++ Before=networking.service
++ DefaultDependencies=no
++
++{# Debian/Ubuntu won't automatically ifup OVS bridges, workaround #}
++/etc/systemd/system/networking.service.d/ovs_workaround.conf:
++ file.managed:
++ - makedirs: true
++ - require:
++ - pkg: linux_network_bridge_pkgs
++ - contents: |
++ [Service]
++ ExecStart=/sbin/ifup --allow=ovs -a --read-environment
++
++{%- endif %}
++
+ {%- endif %}
+
+ {%- for f in network.get('concat_iface_files', []) %}
diff --git a/mcp/patches/salt-formula-linux/0004-dpdk-Handle-per-port-memory-model.patch b/mcp/patches/salt-formula-linux/0004-dpdk-Handle-per-port-memory-model.patch
new file mode 100644
index 000000000..b54650007
--- /dev/null
+++ b/mcp/patches/salt-formula-linux/0004-dpdk-Handle-per-port-memory-model.patch
@@ -0,0 +1,25 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Michael Polenchuk <mpolenchuk@mirantis.com>
+Date: Tue, 30 Apr 2019 12:59:54 +0400
+Subject: [PATCH] [dpdk] Handle per port memory model
+
+
+diff --git a/linux/network/dpdk.sls b/linux/network/dpdk.sls
+index 786f7c8..c6f3020 100644
+--- a/linux/network/dpdk.sls
++++ b/linux/network/dpdk.sls
+@@ -49,6 +49,7 @@ linux_network_dpdk_ovs_service:
+ - unless: 'ovs-vsctl get Open_vSwitch . other_config | grep "dpdk-init=\"true\""'
+
+ {%- set ovs_options = [
++ 'per-port-memory="'+network.openvswitch.per_port_memory|d('false')+'"',
+ "pmd-cpu-mask=\""+network.openvswitch.pmd_cpu_mask+"\"",
+ "dpdk-socket-mem=\""+network.openvswitch.dpdk_socket_mem+"\"",
+ "dpdk-lcore-mask=\""+network.openvswitch.dpdk_lcore_mask+"\"",
diff --git a/mcp/patches/salt-formula-linux/0005-network-RHEL-Set-bridge-for-member-interfaces.patch b/mcp/patches/salt-formula-linux/0005-network-RHEL-Set-bridge-for-member-interfaces.patch
new file mode 100644
index 000000000..67004601c
--- /dev/null
+++ b/mcp/patches/salt-formula-linux/0005-network-RHEL-Set-bridge-for-member-interfaces.patch
@@ -0,0 +1,44 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Tue, 11 Jun 2019 03:57:29 +0200
+Subject: [PATCH] network: RHEL: Set bridge for member interfaces
+
+For RHEL, bridge member interfaces need to be passed down the
+bridge name, so lookup all bridges, find the one containing the
+current interface (if any) and use it when calling
+`network.managed`.
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ linux/network/interface.sls | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/linux/network/interface.sls b/linux/network/interface.sls
+index 11db5be..ccac6d7 100644
+--- a/linux/network/interface.sls
++++ b/linux/network/interface.sls
+@@ -303,6 +303,17 @@ linux_interface_{{ interface_name }}:
+ - cmd: ovs_port_up_{{ network }}
+ {%- endfor %}
+ {%- endif %}
++ {%- if grains.os_family == 'RedHat' %}
++ {%- set br_todo = [] %}
++ {%- for br_name, br_info in network.interface.items() %}
++ {%- if br_info.type == 'bridge' and interface_name in br_info.get('use_interfaces', []) %}
++ {%- do br_todo.append(br_info.get('name', br_name)) %}
++ {%- endif %}
++ {%- endfor %}
++ {%- if br_todo %}
++ - bridge: {{ br_todo | first }}
++ {%- endif %}
++ {%- endif %}
+ {%- if interface.type == 'bridge' %}
+ - bridge: {{ interface_name }}
+ - delay: 0
diff --git a/mcp/patches/salt-formula-linux/0006-dpdk-Remove-invalid-vhost-options.patch b/mcp/patches/salt-formula-linux/0006-dpdk-Remove-invalid-vhost-options.patch
new file mode 100644
index 000000000..acc3c181f
--- /dev/null
+++ b/mcp/patches/salt-formula-linux/0006-dpdk-Remove-invalid-vhost-options.patch
@@ -0,0 +1,25 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Michael Polenchuk <mpolenchuk@mirantis.com>
+Date: Tue, 30 Jul 2019 13:24:41 +0400
+Subject: [PATCH] [dpdk] Remove invalid vhost options
+
+diff --git a/linux/network/dpdk.sls b/linux/network/dpdk.sls
+index 786f7c8..c929e43 100644
+--- a/linux/network/dpdk.sls
++++ b/linux/network/dpdk.sls
+@@ -52,7 +52,7 @@ linux_network_dpdk_ovs_service:
+ "pmd-cpu-mask=\""+network.openvswitch.pmd_cpu_mask+"\"",
+ "dpdk-socket-mem=\""+network.openvswitch.dpdk_socket_mem+"\"",
+ "dpdk-lcore-mask=\""+network.openvswitch.dpdk_lcore_mask+"\"",
+- "dpdk-extra=\"-n "+network.openvswitch.memory_channels+" --vhost-owner libvirt-qemu:kvm --vhost-perm 0664\""
++ "dpdk-extra=\"-n "+network.openvswitch.memory_channels+"\""
+ ]
+ %}
+
diff --git a/mcp/patches/0002-maas-region-skip-credentials-update.patch b/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch
index 1d226e4cd..eb607cf81 100644
--- a/mcp/patches/0002-maas-region-skip-credentials-update.patch
+++ b/mcp/patches/salt-formula-maas/0001-maas-region-skip-credentials-update.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
@@ -17,12 +17,14 @@ updating credentials.
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---
+ maas/region.sls | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/maas/region.sls b/maas/region.sls
-index d3227ca..8a2243d 100644
+index 066490c..de5054a 100644
--- a/maas/region.sls
+++ b/maas/region.sls
-@@ -6,10 +6,9 @@
+@@ -6,10 +6,9 @@ maas_region_packages:
- names: {{ region.pkgs }}
/etc/maas/regiond.conf:
diff --git a/mcp/patches/0010-maas-region-allow-timeout-override.patch b/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch
index c6f9e3a52..3d8deff60 100644
--- a/mcp/patches/0010-maas-region-allow-timeout-override.patch
+++ b/mcp/patches/salt-formula-maas/0002-maas-region-allow-timeout-override.patch
@@ -1,5 +1,5 @@
::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
:
: All rights reserved. This program and the accompanying materials
: are made available under the terms of the Apache License, Version 2.0
@@ -35,18 +35,20 @@ node's PXE physical interface, but that overcomplicates things.
blocksize to be 1008. We can force it to be 1464 and gain some performance due
to MTU beeing 1500 (i.e. allow bigger packets).
-
JIRA: FUEL-316
Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
---
+ maas/region.sls | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
diff --git a/maas/region.sls b/maas/region.sls
+index de5054a..4a7f6cc 100644
--- a/maas/region.sls
+++ b/maas/region.sls
-@@ -19,6 +19,46 @@
- - require:
- - pkg: maas_region_packages
+@@ -38,6 +38,36 @@ restore_maas_database_{{ region.database.name }}:
+
+ {%- endif %}
+maas_timeout_commissioning:
+ file.replace:
@@ -68,16 +70,6 @@ diff --git a/maas/region.sls b/maas/region.sls
+ - require_in:
+ - service: maas_region_services
+
-+maas_ip_blksize_force:
-+ file.replace:
-+ - name: "/usr/lib/python3/dist-packages/tftp/bootstrap.py"
-+ - pattern: 'int_blksize = min\(\(int_blksize, MAX_BLOCK_SIZE\)\)'
-+ - repl: 'int_blksize = 1464'
-+ - require:
-+ - pkg: maas_region_packages
-+ - require_in:
-+ - service: maas_region_services
-+
+maas_interface_default_mode_dhcp:
+ file.replace:
+ - name: "/usr/lib/python3/dist-packages/maasserver/models/node.py"
diff --git a/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch b/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch
new file mode 100644
index 000000000..efd7cc2fd
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0003-Extend-wait_for-maas.py-wait_for_-attempts-arg.patch
@@ -0,0 +1,268 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Sun, 23 Sep 2018 03:57:27 +0200
+Subject: [PATCH] Extend wait_for maas.py, wait_for_* attempts arg
+
+1. maas.py: Extend wait_for states with timeout param
+
+Extend the wait_for states with a timeout parameter.
+The timeout value is taken from reclass pillar data if
+defined. Oterwise, the states use the default value.
+Based on Ting's PR [1], slightly refactored.
+
+2. maas.py: Extend `req_status` support to multiple values
+
+Previously, req_status could be one of the MaaS status strings, e.g.
+'Ready'. Extend matching to '|'-separated statuses (e.g.
+'Ready|Deployed') to allow idempotency in MaaS machine commissioning
+and deployment cycles.
+
+Also provide a `maas.machines.wait_for_ready_or_deployed` sls.
+
+3. maas.py: wait_for_*: Add attempts arg
+
+Introduce a new parameter that allows a maximum number of automatic
+recovery attempts for the common failures w/ machine operations.
+If not present in pillar data, it defaults to 0 (OFF).
+
+Common error states, possible cause and automatic recovery pattern:
+* New
+ - usually indicates issues with BMC connectivity (no network route,
+ but on rare occassions it happens due to MaaS API being flaky);
+ - fix: delete the machine, (re)process machine definitions;
+* Failed commissioning
+ - various causes, usually a simple retry works;
+ - fix: delete the machine, (re)process machine definitions;
+* Failed testing
+ - incompatible hardware, missing drivers etc.
+ - usually consistent and board-specific;
+ - fix: override failed testing
+* Allocated
+ - on rare ocassions nodes get stuck in this state instead 'Deploy';
+ - fix: mark-broken, mark-fixed, if it failed at least once before
+ perform a fio test (fixes another unrelated spurious issue with
+ encrypted disks from previous deployments), (re)deploy machines;
+* Failed deployment
+ - various causes, usually a simple retry works;
+ - fix: same as for nodes stuck in 'Allocated';
+
+[1] https://github.com/salt-formulas/salt-formula-maas/pull/34
+
+Change-Id: Ifb7dd9f8fcfbbed557e47d8fdffb1f963604fb15
+Signed-off-by: ting wu <ting.wu@enea.com>
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ README.rst | 9 +++-
+ _modules/maas.py | 52 +++++++++++++++++---
+ maas/machines/wait_for_deployed.sls | 2 +
+ maas/machines/wait_for_ready.sls | 3 ++
+ maas/machines/wait_for_ready_or_deployed.sls | 15 ++++++
+ maas/map.jinja | 4 ++
+ tests/pillar/maas_region.sls | 4 ++
+ 7 files changed, 81 insertions(+), 8 deletions(-)
+ create mode 100644 maas/machines/wait_for_ready_or_deployed.sls
+
+diff --git a/README.rst b/README.rst
+index 20da43e..78d8aef 100644
+--- a/README.rst
++++ b/README.rst
+@@ -622,12 +622,16 @@ Wait for status of selected machine's:
+ machines:
+ - kvm01
+ - kvm02
+- timeout: 1200 # in seconds
++ timeout: {{ region.timeout.ready }}
++ attempts: {{ region.timeout.attempts }}
+ req_status: "Ready"
+ - require:
+ - cmd: maas_login_admin
+ ...
+
++The timeout setting is taken from the reclass pillar data.
++If the pillar data is not defined, it will use the default value.
++
+ If module run w/\o any extra paremeters,
+ ``wait_for_machines_ready`` will wait for defined in salt
+ machines. In this case, it is usefull to skip some machines:
+@@ -642,7 +646,8 @@ machines. In this case, it is usefull to skip some machines:
+ module.run:
+ - name: maas.wait_for_machine_status
+ - kwargs:
+- timeout: 1200 # in seconds
++ timeout: {{ region.timeout.deployed }}
++ attempts: {{ region.timeout.attempts }}
+ req_status: "Deployed"
+ ignore_machines:
+ - kvm01 # in case it's broken or whatever
+diff --git a/_modules/maas.py b/_modules/maas.py
+index c02f104..bb70576 100644
+--- a/_modules/maas.py
++++ b/_modules/maas.py
+@@ -670,7 +670,7 @@ class DeployMachines(MaasObject):
+ if machine['status'] == self.DEPLOYED:
+ return
+ if machine['status'] != self.READY:
+- raise Exception('Not in ready state')
++ return
+ data = {
+ 'system_id': machine['system_id'],
+ }
+@@ -921,6 +921,7 @@ class MachinesStatus(MaasObject):
+ req_status: string; Polling status
+ machines: list; machine names
+ ignore_machines: list; machine names
++ attempts: max number of automatic hard retries
+ :ret: True
+ Exception - if something fail/timeout reached
+ """
+@@ -929,6 +930,8 @@ class MachinesStatus(MaasObject):
+ req_status = kwargs.get("req_status", "Ready")
+ to_discover = kwargs.get("machines", None)
+ ignore_machines = kwargs.get("ignore_machines", None)
++ attempts = kwargs.get("attempts", 0)
++ failed_attempts = {}
+ if not to_discover:
+ try:
+ to_discover = __salt__['config.get']('maas')['region'][
+@@ -943,11 +946,46 @@ class MachinesStatus(MaasObject):
+ while len(total) <= len(to_discover):
+ for m in to_discover:
+ for discovered in MachinesStatus.execute()['machines']:
+- if m == discovered['hostname'] and \
+- discovered['status'].lower() == req_status.lower():
+- if m in total:
++ if m == discovered['hostname'] and m in total:
++ req_status_list = req_status.lower().split('|')
++ if discovered['status'].lower() in req_status_list:
+ total.remove(m)
+-
++ elif attempts > 0 and (m not in failed_attempts or
++ failed_attempts[m] < attempts):
++ status = discovered['status']
++ sid = discovered['system_id']
++ cls._maas = _create_maas_client()
++ if status in ['Failed commissioning', 'New']:
++ LOG.info('Machine {0} deleted'.format(sid))
++ cls._maas.delete(u'api/2.0/machines/{0}/'
++ .format(sid))
++ Machine().process()
++ elif status in ['Failed testing']:
++ data = {}
++ LOG.info('Machine {0} overriden'.format(sid))
++ action = 'override_failed_testing'
++ cls._maas.post(u'api/2.0/machines/{0}/'
++ .format(sid), action, **data)
++ elif status in ['Failed deployment', 'Allocated']:
++ data = {}
++ LOG.info('Machine {0} mark broken'.format(sid))
++ cls._maas.post(u'api/2.0/machines/{0}/'
++ .format(sid), 'mark_broken', **data)
++ time.sleep(poll_time)
++ LOG.info('Machine {0} mark fixed'.format(sid))
++ cls._maas.post(u'api/2.0/machines/{0}/'
++ .format(sid), 'mark_fixed', **data)
++ if m in failed_attempts and failed_attempts[m]:
++ LOG.info('Machine {0} fio test'.format(sid))
++ data['testing_scripts'] = 'fio'
++ cls._maas.post(u'api/2.0/machines/{0}/'
++ .format(sid), 'commission', **data)
++ DeployMachines().process()
++ else:
++ continue
++ if m not in failed_attempts:
++ failed_attempts[m] = 0
++ failed_attempts[m] = failed_attempts[m] + 1
+ if len(total) <= 0:
+ LOG.debug(
+ "Machines:{} are:{}".format(to_discover, req_status))
+@@ -959,7 +997,9 @@ class MachinesStatus(MaasObject):
+ "Waiting status:{} "
+ "for machines:{}"
+ "\nsleep for:{}s "
+- "Timeout:{}s".format(req_status, total, poll_time, timeout))
++ "Timeout:{}s ({}s left)"
++ .format(req_status, total, poll_time, timeout,
++ timeout - (time.time() - started_at)))
+ time.sleep(poll_time)
+
+
+diff --git a/maas/machines/wait_for_deployed.sls b/maas/machines/wait_for_deployed.sls
+index ebeedac..a646fdb 100644
+--- a/maas/machines/wait_for_deployed.sls
++++ b/maas/machines/wait_for_deployed.sls
+@@ -9,5 +9,7 @@ wait_for_machines_deployed:
+ - name: maas.wait_for_machine_status
+ - kwargs:
+ req_status: "Deployed"
++ timeout: {{ region.timeout.deployed }}
++ attempts: {{ region.timeout.attempts }}
+ - require:
+ - cmd: maas_login_admin
+diff --git a/maas/machines/wait_for_ready.sls b/maas/machines/wait_for_ready.sls
+index c5d3c28..d8a2963 100644
+--- a/maas/machines/wait_for_ready.sls
++++ b/maas/machines/wait_for_ready.sls
+@@ -7,5 +7,8 @@ maas_login_admin:
+ wait_for_machines_ready:
+ module.run:
+ - name: maas.wait_for_machine_status
++ - kwargs:
++ timeout: {{ region.timeout.ready }}
++ attempts: {{ region.timeout.attempts }}
+ - require:
+ - cmd: maas_login_admin
+diff --git a/maas/machines/wait_for_ready_or_deployed.sls b/maas/machines/wait_for_ready_or_deployed.sls
+new file mode 100644
+index 0000000..db3dcc4
+--- /dev/null
++++ b/maas/machines/wait_for_ready_or_deployed.sls
+@@ -0,0 +1,15 @@
++{%- from "maas/map.jinja" import region with context %}
++
++maas_login_admin:
++ cmd.run:
++ - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials"
++
++wait_for_machines_ready_or_deployed:
++ module.run:
++ - name: maas.wait_for_machine_status
++ - kwargs:
++ req_status: "Ready|Deployed"
++ timeout: {{ region.timeout.ready }}
++ attempts: {{ region.timeout.attempts }}
++ - require:
++ - cmd: maas_login_admin
+diff --git a/maas/map.jinja b/maas/map.jinja
+index 0671435..1e6ac07 100644
+--- a/maas/map.jinja
++++ b/maas/map.jinja
+@@ -22,6 +22,10 @@ Debian:
+ bind:
+ host: 0.0.0.0
+ port: 80
++ timeout:
++ ready: 1200
++ deployed: 7200
++ attempts: 0
+ {%- endload %}
+
+ {%- set region = salt['grains.filter_by'](region_defaults, merge=salt['pillar.get']('maas:region', {})) %}
+diff --git a/tests/pillar/maas_region.sls b/tests/pillar/maas_region.sls
+index d3325eb..d710216 100644
+--- a/tests/pillar/maas_region.sls
++++ b/tests/pillar/maas_region.sls
+@@ -34,3 +34,7 @@ maas:
+ password: password
+ username: maas
+ salt_master_ip: 127.0.0.1
++ timeout:
++ deployed: 900
++ ready: 900
++ attempts: 2
diff --git a/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch b/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch
new file mode 100644
index 000000000..978f4ecb6
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0004-curtin-Tune-default-salt-minion-config.patch
@@ -0,0 +1,43 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Mon, 5 Nov 2018 16:47:13 +0100
+Subject: [PATCH] curtin: Tune default salt-minion config
+
+* Sync AArch64 configuration with the changes introduced by commit
+ c6a12de on amd64
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ maas/files/curtin_userdata_arm64_generic_xenial | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/maas/files/curtin_userdata_arm64_generic_xenial b/maas/files/curtin_userdata_arm64_generic_xenial
+index af9a047..1b2e401 100644
+--- a/maas/files/curtin_userdata_arm64_generic_xenial
++++ b/maas/files/curtin_userdata_arm64_generic_xenial
+@@ -32,8 +32,16 @@ late_commands:
+ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
+ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion", "python-futures"]
+ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
+- salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion"]
+- salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion"]
++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion.d/minion.conf"]
++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion.d/minion.conf"]
++ salt_05_max_event_size: ["curtin", "in-target", "--", "sh", "-c", "echo 'max_event_size: 100000000' >> /etc/salt/minion.d/minion.conf"]
++ salt_06_acceptance_wait_time_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time_max: 60' >> /etc/salt/minion.d/minion.conf"]
++ salt_07_acceptance_wait_time: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time: 10' >> /etc/salt/minion.d/minion.conf"]
++ salt_08_random_reauth_delay: ["curtin", "in-target", "--", "sh", "-c", "echo 'random_reauth_delay: 270' >> /etc/salt/minion.d/minion.conf"]
++ salt_09_recon_default: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_default: 1000' >> /etc/salt/minion.d/minion.conf"]
++ salt_10_recon_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_max: 60000' >> /etc/salt/minion.d/minion.conf"]
++ salt_11_recon_randomize: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_randomize: True' >> /etc/salt/minion.d/minion.conf"]
++ salt_12_auth_timeout: ["curtin", "in-target", "--", "sh", "-c", "echo 'auth_timeout: 60' >> /etc/salt/minion.d/minion.conf"]
+ {% raw %}
+ {{if third_party_drivers and driver}}
+ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
diff --git a/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch b/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch
new file mode 100644
index 000000000..09d21d022
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0005-Implement-tags-support.patch
@@ -0,0 +1,95 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Sat, 29 Dec 2018 23:09:01 +0100
+Subject: [PATCH] Implement 'tags' support
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ README.rst | 5 +++++
+ _modules/maas.py | 26 ++++++++++++++++++++++++++
+ maas/region.sls | 8 ++++++++
+ 3 files changed, 39 insertions(+)
+
+diff --git a/README.rst b/README.rst
+index 78d8aef..8c35458 100644
+--- a/README.rst
++++ b/README.rst
+@@ -181,6 +181,11 @@ Single MAAS region service [single UI/API]:
+ mac: '66:55:44:33:22:11'
+ commissioning_scripts:
+ 00-maas-05-simplify-network-interfaces: /etc/maas/files/commisioning_scripts/00-maas-05-simplify-network-interfaces
++ tags:
++ aarch64_hugepages_1g:
++ comment: 'Enable 1G pagesizes on aarch64'
++ definition: '//capability[@id="asimd"]'
++ kernel_opts: 'default_hugepagesz=1G hugepagesz=1G'
+ maas_config:
+ # domain: mydomain.local # This function broken
+ http_proxy: http://192.168.0.10:3142
+diff --git a/_modules/maas.py b/_modules/maas.py
+index bb70576..0cda8dd 100644
+--- a/_modules/maas.py
++++ b/_modules/maas.py
+@@ -876,6 +876,28 @@ class Domain(MaasObject):
+ return ret
+
+
++class Tags(MaasObject):
++ def __init__(self):
++ super(Tags, self).__init__()
++ self._all_elements_url = u'api/2.0/tags/'
++ self._create_url = u'api/2.0/tags/'
++ self._config_path = 'region.tags'
++ self._update_url = u'api/2.0/tags/{0}/'
++ self._update_key = 'name'
++
++ def fill_data(self, name, tag_data):
++ data = {
++ 'name': name,
++ }
++ for key in ['comment', 'definition', 'kernel_opts']:
++ if key in tag_data:
++ data[key] = tag_data[key]
++ return data
++
++ def update(self, new, old):
++ return new
++
++
+ class MachinesStatus(MaasObject):
+ @classmethod
+ def execute(cls, objects_name=None):
+@@ -1065,5 +1087,9 @@ def process_sshprefs():
+ return SSHPrefs().process()
+
+
++def process_tags():
++ return Tags().process()
++
++
+ def wait_for_machine_status(**kwargs):
+ return MachinesStatus.wait_for_machine_status(**kwargs)
+diff --git a/maas/region.sls b/maas/region.sls
+index 4a7f6cc..52fb952 100644
+--- a/maas/region.sls
++++ b/maas/region.sls
+@@ -444,4 +444,12 @@ maas_sshkey_{{ idx }}:
+ {% endfor %}
+ {%- endif %}
+
++{%- if region.get('tags', False) %}
++maas_tags:
++ module.run:
++ - name: maas.process_tags
++ - require:
++ - cmd: maas_login_admin
++{%- endif %}
++
+ {%- endif %}
diff --git a/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch b/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch
new file mode 100644
index 000000000..c69507b20
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0006-curtin-Add-Bionic-support.patch
@@ -0,0 +1,231 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Wed, 16 Oct 2019 15:02:39 +0200
+Subject: [PATCH] curtin: Add Bionic support
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ .../curtin_userdata_amd64_generic_bionic | 84 +++++++++++++++++++
+ .../curtin_userdata_arm64_generic_bionic | 79 +++++++++++++++++
+ maas/region.sls | 24 ++++++
+ 3 files changed, 187 insertions(+)
+ create mode 100644 maas/files/curtin_userdata_amd64_generic_bionic
+ create mode 100644 maas/files/curtin_userdata_arm64_generic_bionic
+
+diff --git a/maas/files/curtin_userdata_amd64_generic_bionic b/maas/files/curtin_userdata_amd64_generic_bionic
+new file mode 100644
+index 0000000..11af3cf
+--- /dev/null
++++ b/maas/files/curtin_userdata_amd64_generic_bionic
+@@ -0,0 +1,84 @@
++{%- from "maas/map.jinja" import cluster with context %}
++{%- raw %}
++#cloud-config
++debconf_selections:
++ maas: |
++ {{for line in str(curtin_preseed).splitlines()}}
++ {{line}}
++ {{endfor}}
++early_commands:
++ thin_tools_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install thin-provisioning-tools"]
++{{if third_party_drivers and driver}}
++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}
++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg
++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"]
++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"]
++{{endif}}
++late_commands:
++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null']
++{% endraw %}
++
++{%- if cluster.get('saltstack_repo_key', False) %}
++ {% set salt_repo_key = salt['hashutil.base64_b64encode'](cluster.saltstack_repo_key) %}
++ apt_00_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo '{{salt_repo_key}}' | base64 -d | apt-key add -"]
++{%- endif %}
++{% if cluster.saltstack_repo_bionic.startswith('deb') %}
++ {%- set saltstack_repo = cluster.saltstack_repo_bionic -%}
++{%- else %}
++ {%- set saltstack_repo = 'deb [arch=amd64] ' + cluster.saltstack_repo_bionic -%}
++{%- endif %}
++
++ apt_01_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo '{{ saltstack_repo }}' >> /etc/apt/sources.list.d/mcp_saltstack.list"]
++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:amd64:bionic:extra_pkgs:enabled')|default(false) %}
++ {% for pkg in pillar.maas.cluster.curtin_vars.amd64.bionic.extra_pkgs.pkgs -%}
++ apt_04_install_pkgs_{{ loop.index }}: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{ pkg }}"]
++ {% endfor %}
++{%- endif %}
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:amd64:bionic:kernel_package:enabled')|default(false) %}
++ apt_05_kernel_old_absent: ["curtin", "in-target", "--", "sh", "-c", "dpkg -l '*linux-generic-*[0-9]*' '*linux-image-*[0-9]*' '*linux-headers-*[0-9]*' '*linux-image-extra-*[0-9]*' '*linux-modules-extra-*[0-9]*' 'linux-generic' 'linux-image-generic' 'linux-headers-generic' 2>/dev/null | grep -E '^ii' | awk '{print $2}' | grep -v '{{ pillar.maas.cluster.curtin_vars.amd64.bionic.kernel_package.value.split('-')[2:-1] | join('-') }}' | xargs dpkg --purge --force-depends"]
++{%- endif %}
++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion", "ifupdown", "cloud-init", "dnsmasq"]
++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion.d/minion.conf"]
++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion.d/minion.conf"]
++ salt_05_max_event_size: ["curtin", "in-target", "--", "sh", "-c", "echo 'max_event_size: 100000000' >> /etc/salt/minion.d/minion.conf"]
++ salt_06_acceptance_wait_time_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time_max: 60' >> /etc/salt/minion.d/minion.conf"]
++ salt_07_acceptance_wait_time: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time: 10' >> /etc/salt/minion.d/minion.conf"]
++ salt_08_random_reauth_delay: ["curtin", "in-target", "--", "sh", "-c", "echo 'random_reauth_delay: 270' >> /etc/salt/minion.d/minion.conf"]
++ salt_09_recon_default: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_default: 1000' >> /etc/salt/minion.d/minion.conf"]
++ salt_10_recon_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_max: 60000' >> /etc/salt/minion.d/minion.conf"]
++ salt_11_recon_randomize: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_randomize: True' >> /etc/salt/minion.d/minion.conf"]
++ salt_12_auth_timeout: ["curtin", "in-target", "--", "sh", "-c", "echo 'auth_timeout: 60' >> /etc/salt/minion.d/minion.conf"]
++
++ salt_20_bionic_nplan_stop: ["curtin", "in-target", "--", "systemctl", "stop", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_21_bionic_nplan_disable: ["curtin", "in-target", "--", "systemctl", "disable", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_22_bionic_nplan_mask: ["curtin", "in-target", "--", "systemctl", "mask", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_23_bionic_nplan_purge: ["curtin", "in-target", "--", "apt", "--assume-yes", "purge", "nplan", "netplan.io"]
++ salt_24_bionic_interfaces: ["curtin", "in-target", "--", "sh", "-c", "echo 'source /etc/network/interfaces.d/*' >> /etc/network/interfaces"]
++ salt_25_bionic_networking_unmask: ["curtin", "in-target", "--", "systemctl", "unmask", "networking.service"]
++ salt_26_bionic_networking_enable: ["curtin", "in-target", "--", "systemctl", "enable", "networking.service"]
++ salt_27_bionic_networking_start: ["curtin", "in-target", "--", "systemctl", "start", "networking.service"]
++
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:amd64:bionic:kernel_package:enabled')|default(false) %}
++kernel:
++ package: {{ pillar.maas.cluster.curtin_vars.amd64.bionic.kernel_package.value }}
++{%- endif %}
++
++{% raw %}
++{{if third_party_drivers and driver}}
++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"]
++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"]
++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"]
++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"]
++{{endif}}
++{% endraw %}
++
++{#
++# vim: ft=jinja
++#}
+diff --git a/maas/files/curtin_userdata_arm64_generic_bionic b/maas/files/curtin_userdata_arm64_generic_bionic
+new file mode 100644
+index 0000000..006d8c2
+--- /dev/null
++++ b/maas/files/curtin_userdata_arm64_generic_bionic
+@@ -0,0 +1,79 @@
++{%- from "maas/map.jinja" import cluster with context %}
++{% raw %}
++#cloud-config
++debconf_selections:
++ maas: |
++ {{for line in str(curtin_preseed).splitlines()}}
++ {{line}}
++ {{endfor}}
++early_commands:
++ thin_tools_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install thin-provisioning-tools"]
++{{if third_party_drivers and driver}}
++ {{py: key_string = ''.join(['\\x%x' % x for x in map(ord, driver['key_binary'])])}}
++ driver_00_get_key: /bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg
++ driver_01_add_key: ["apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_02_add: ["add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_03_update_install: ["sh", "-c", "apt-get update --quiet && apt-get --assume-yes install {{driver['package']}}"]
++ driver_04_load: ["sh", "-c", "depmod && modprobe {{driver['module']}}"]
++{{endif}}
++late_commands:
++ maas: [wget, '--no-proxy', {{node_disable_pxe_url|escape.json}}, '--post-data', {{node_disable_pxe_data|escape.json}}, '-O', '/dev/null']
++{% endraw %}
++{%- if cluster.get('saltstack_repo_key', False) %}
++ {% set salt_repo_key = salt['hashutil.base64_b64encode'](cluster.saltstack_repo_key) %}
++ apt_00_set_gpg: ["curtin", "in-target", "--", "sh", "-c", "echo '{{salt_repo_key}}' | base64 -d | apt-key add -"]
++{%- endif %}
++{% if cluster.saltstack_repo_bionic.startswith('deb') %}
++ {%- set saltstack_repo = cluster.saltstack_repo_bionic -%}
++{%- else %}
++ {%- set saltstack_repo = 'deb [arch=amd64] ' + cluster.saltstack_repo_bionic -%}
++{%- endif %}
++{#- NOTE: Re-use amd64 repos on arm64 since most packages are arch independent #}
++ apt_01_set_repo: ["curtin", "in-target", "--", "sh", "-c", "echo '{{ saltstack_repo }}' >> /etc/apt/sources.list.d/mcp_saltstack.list"]
++ apt_03_update: ["curtin", "in-target", "--", "apt-get", "update"]
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:arm64:bionic:extra_pkgs:enabled')|default(false) %}
++ {% for pkg in pillar.maas.cluster.curtin_vars.arm64.bionic.extra_pkgs.pkgs -%}
++ apt_04_install_pkgs_{{ loop.index }}: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{ pkg }}"]
++ {% endfor %}
++{%- endif %}
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:arm64:bionic:kernel_package:enabled')|default(false) %}
++ apt_05_kernel_old_absent: ["curtin", "in-target", "--", "sh", "-c", "dpkg -l '*linux-generic-*[0-9]*' '*linux-image-*[0-9]*' '*linux-headers-*[0-9]*' '*linux-image-extra-*[0-9]*' '*linux-modules-extra-*[0-9]*' 'linux-generic' 'linux-image-generic' 'linux-headers-generic' 2>/dev/null | grep -E '^ii' | awk '{print $2}' | grep -v '{{ pillar.maas.cluster.curtin_vars.arm64.bionic.kernel_package.value.split('-')[2:-1] | join('-') }}' | xargs dpkg --purge --force-depends"]
++{%- endif %}
++ salt_01_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "salt-minion", "python-futures", "ifupdown", "cloud-init", "dnsmasq"]
++ salt_02_hostname_set: ["curtin", "in-target", "--", "echo", "{% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}"]
++ salt_03_hostname_get: ["curtin", "in-target", "--", "sh", "-c", "echo 'id: {% raw %}{{node.hostname}}{% endraw %}.{{pillar.linux.system.domain}}' >> /etc/salt/minion.d/minion.conf"]
++ salt_04_master: ["curtin", "in-target", "--", "sh", "-c", "echo 'master: {{ salt_master_ip }}' >> /etc/salt/minion.d/minion.conf"]
++ salt_05_max_event_size: ["curtin", "in-target", "--", "sh", "-c", "echo 'max_event_size: 100000000' >> /etc/salt/minion.d/minion.conf"]
++ salt_06_acceptance_wait_time_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time_max: 60' >> /etc/salt/minion.d/minion.conf"]
++ salt_07_acceptance_wait_time: ["curtin", "in-target", "--", "sh", "-c", "echo 'acceptance_wait_time: 10' >> /etc/salt/minion.d/minion.conf"]
++ salt_08_random_reauth_delay: ["curtin", "in-target", "--", "sh", "-c", "echo 'random_reauth_delay: 270' >> /etc/salt/minion.d/minion.conf"]
++ salt_09_recon_default: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_default: 1000' >> /etc/salt/minion.d/minion.conf"]
++ salt_10_recon_max: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_max: 60000' >> /etc/salt/minion.d/minion.conf"]
++ salt_11_recon_randomize: ["curtin", "in-target", "--", "sh", "-c", "echo 'recon_randomize: True' >> /etc/salt/minion.d/minion.conf"]
++ salt_12_auth_timeout: ["curtin", "in-target", "--", "sh", "-c", "echo 'auth_timeout: 60' >> /etc/salt/minion.d/minion.conf"]
++
++ salt_20_bionic_nplan_stop: ["curtin", "in-target", "--", "systemctl", "stop", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_21_bionic_nplan_disable: ["curtin", "in-target", "--", "systemctl", "disable", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_22_bionic_nplan_mask: ["curtin", "in-target", "--", "systemctl", "mask", "systemd-networkd.socket", "systemd-networkd", "networkd-dispatcher", "systemd-networkd-wait-online", "systemd-resolved"]
++ salt_23_bionic_nplan_purge: ["curtin", "in-target", "--", "apt", "--assume-yes", "purge", "nplan", "netplan.io"]
++ salt_24_bionic_interfaces: ["curtin", "in-target", "--", "sh", "-c", "echo 'source /etc/network/interfaces.d/*' >> /etc/network/interfaces"]
++ salt_25_bionic_networking_unmask: ["curtin", "in-target", "--", "systemctl", "unmask", "networking.service"]
++ salt_26_bionic_networking_enable: ["curtin", "in-target", "--", "systemctl", "enable", "networking.service"]
++ salt_27_bionic_networking_start: ["curtin", "in-target", "--", "systemctl", "start", "networking.service"]
++
++{%- if salt['pillar.get']('maas:cluster:curtin_vars:arm64:bionic:kernel_package:enabled')|default(false) %}
++kernel:
++ package: {{ pillar.maas.cluster.curtin_vars.arm64.bionic.kernel_package.value }}
++{%- endif %}
++
++{% raw %}
++{{if third_party_drivers and driver}}
++ driver_00_key_get: curtin in-target -- sh -c "/bin/echo -en '{{key_string}}' > /tmp/maas-{{driver['package']}}.gpg"
++ driver_02_key_add: ["curtin", "in-target", "--", "apt-key", "add", "/tmp/maas-{{driver['package']}}.gpg"]
++ driver_03_add: ["curtin", "in-target", "--", "add-apt-repository", "-y", "deb {{driver['repository']}} {{node.get_distro_series()}} main"]
++ driver_04_update_install: ["curtin", "in-target", "--", "apt-get", "update", "--quiet"]
++ driver_05_install: ["curtin", "in-target", "--", "apt-get", "-y", "install", "{{driver['package']}}"]
++ driver_06_depmod: ["curtin", "in-target", "--", "depmod"]
++ driver_07_update_initramfs: ["curtin", "in-target", "--", "update-initramfs", "-u"]
++{{endif}}
++{% endraw %}
+diff --git a/maas/region.sls b/maas/region.sls
+index 52fb952..ca876ee 100644
+--- a/maas/region.sls
++++ b/maas/region.sls
+@@ -138,6 +138,30 @@ maas_apache_headers:
+ - require:
+ - pkg: maas_region_packages
+
++/etc/maas/preseeds/curtin_userdata_amd64_generic_bionic:
++ file.managed:
++ - source: salt://maas/files/curtin_userdata_amd64_generic_bionic
++ - template: jinja
++ - user: root
++ - group: root
++ - mode: 644
++ - context:
++ salt_master_ip: {{ region.salt_master_ip }}
++ - require:
++ - pkg: maas_region_packages
++
++/etc/maas/preseeds/curtin_userdata_arm64_generic_bionic:
++ file.managed:
++ - source: salt://maas/files/curtin_userdata_arm64_generic_bionic
++ - template: jinja
++ - user: root
++ - group: root
++ - mode: 644
++ - context:
++ salt_master_ip: {{ region.salt_master_ip }}
++ - require:
++ - pkg: maas_region_packages
++
+ Configure /root/.pgpass for MAAS:
+ file.managed:
+ - name: /root/.pgpass
diff --git a/mcp/patches/salt-formula-maas/0007-region-s-syncdb-migrate-for-MaaS-2.4-compatibility.patch b/mcp/patches/salt-formula-maas/0007-region-s-syncdb-migrate-for-MaaS-2.4-compatibility.patch
new file mode 100644
index 000000000..f0a70ffc4
--- /dev/null
+++ b/mcp/patches/salt-formula-maas/0007-region-s-syncdb-migrate-for-MaaS-2.4-compatibility.patch
@@ -0,0 +1,30 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Mon, 27 Jan 2020 17:10:04 +0100
+Subject: [PATCH] region: s/syncdb/migrate/ for MaaS 2.4 compatibility
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ maas/region.sls | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/maas/region.sls b/maas/region.sls
+index ca876ee..31e9906 100644
+--- a/maas/region.sls
++++ b/maas/region.sls
+@@ -186,7 +186,7 @@ maas_region_services:
+ maas_region_syncdb:
+ cmd.run:
+ - names:
+- - maas-region syncdb --noinput
++ - maas-region migrate --noinput
+ - require:
+ - file: /etc/maas/regiond.conf
+ {%- if grains['saltversioninfo'][0] >= 2017 and grains['saltversioninfo'][1] >= 7 %}
diff --git a/mcp/patches/salt-formula-neutron/0001-Bring-in-basic-VPP-support.patch b/mcp/patches/salt-formula-neutron/0001-Bring-in-basic-VPP-support.patch
new file mode 100644
index 000000000..d790ede3d
--- /dev/null
+++ b/mcp/patches/salt-formula-neutron/0001-Bring-in-basic-VPP-support.patch
@@ -0,0 +1,195 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 6 Dec 2018 18:25:42 +0100
+Subject: [PATCH] Bring in basic VPP support
+
+TODO:
+- update README
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+
+diff --git a/neutron/agents/_vpp.sls b/neutron/agents/_vpp.sls
+new file mode 100644
+index 0000000..c1845d0
+--- /dev/null
++++ b/neutron/agents/_vpp.sls
+@@ -0,0 +1,25 @@
++{%- if pillar.neutron.gateway is defined %}
++{%- from "neutron/map.jinja" import gateway as neutron with context %}
++{%- else %}
++{%- from "neutron/map.jinja" import compute as neutron with context %}
++{%- endif %}
++
++{%- if 'vpp' in neutron.get('backend', {}).get('mechanism', []) %}
++
++/etc/neutron/plugins/ml2/ml2_conf.ini:
++ file.managed:
++ - source: salt://neutron/files/{{ neutron.version }}/_ml2_conf.vpp.ini
++ - mode: 0640
++ - user: root
++ - group: neutron
++ - template: jinja
++
++vpp:
++ service.running:
++ - enable: True
++
++vpp-agent:
++ service.running:
++ - enable: True
++
++{%- endif %}
+diff --git a/neutron/compute.sls b/neutron/compute.sls
+index 708a51d..3767011 100644
+--- a/neutron/compute.sls
++++ b/neutron/compute.sls
+@@ -111,7 +111,9 @@ neutron_metadata_agent:
+ {%- if compute.opendaylight is defined %}
+ {%- include "neutron/opendaylight/client.sls" %}
+ {%- else %}
++ {#- We can reuse this for ml2_vpp and ignore openvswitch_agent.ini #}
+ {%- include "neutron/ml2_ovs/init.sls" %}
++ {%- include "neutron/agents/_vpp.sls" %}
+ {%- endif %}
+
+ {%- elif compute.backend.engine == "ovn" %}
+diff --git a/neutron/files/rocky/_ml2_conf.vpp.ini b/neutron/files/rocky/_ml2_conf.vpp.ini
+new file mode 100644
+index 0000000..2373f64
+--- /dev/null
++++ b/neutron/files/rocky/_ml2_conf.vpp.ini
+@@ -0,0 +1,41 @@
++{%- if pillar.neutron.server is defined %}
++{%- from "neutron/map.jinja" import server as neutron with context %}
++{%- elif pillar.neutron.gateway is defined %}
++{%- from "neutron/map.jinja" import gateway as neutron with context %}
++{%- else %}
++{%- from "neutron/map.jinja" import compute as neutron with context %}
++{%- endif %}
++
++{%- if 'vpp' in neutron.get('backend', {}).get('mechanism', []) %}
++
++{%- set physnets_vpp = [] %}
++{%- set mechanism_vpp = neutron.backend.mechanism.vpp %}
++{%- for physnet, params in neutron.backend.get('physnets', {}).iteritems() %}
++{%- if params.get('vpp_interface', False) %}
++{%- do physnets_vpp.append([physnet, params.get('vpp_interface')]|join(":")) %}
++{%- endif %}
++{%- endfor %}
++{%- if not physnets_vpp %}
++{%- do physnets_vpp.append('physnet1:tap-0') %}
++{%- endif %}
++
++{%- if pillar.neutron.server is not defined %}
++[ml2]
++type_drivers = flat,vlan
++{%- endif %}
++
++[ml2_vpp]
++jwt_signing = False
++etcd_insecure_explicit_disable_https = True
++l3_hosts = {{ mechanism_vpp.get('l3_hosts', '127.0.0.1') }}
++enable_l3_ha = False
++gpe_locators =
++gpe_src_cidr =
++enable_vpp_restart = False
++etcd_pass = {{ mechanism_vpp.get('etcd_pass', '') }}
++etcd_user = {{ mechanism_vpp.get('etcd_user', '') }}
++etcd_port = {{ mechanism_vpp.get('etcd_port', 2379) }}
++etcd_host = {{ mechanism_vpp.get('etcd_host', '127.0.0.1') }}
++physnets = {{ ','.join(physnets_vpp) }}
++
++{%- endif %}
+diff --git a/neutron/files/rocky/ml2_conf.ini b/neutron/files/rocky/ml2_conf.ini
+index a9a598f..4429c80 100644
+--- a/neutron/files/rocky/ml2_conf.ini
++++ b/neutron/files/rocky/ml2_conf.ini
+@@ -27,6 +27,9 @@ agent_boot_time = {{ server.get('agent_boot_time', 180) }}
+ # List of network type driver entrypoints to be loaded from the
+ # neutron.ml2.type_drivers namespace. (list value)
+ #type_drivers = local,flat,vlan,gre,vxlan,geneve
++{%- if 'vpp' in server.backend.get('mechanism', []) %}
++type_drivers = flat,vlan
++{%- endif %}
+
+ # Ordered list of network_types to allocate as tenant networks. The default
+ # value 'local' is useful for single-box testing but provides no connectivity
+@@ -239,6 +242,7 @@ neutron_sync_mode = {{ _ovn.neutron_sync_mode|default('repair') }}
+ enable_distributed_floating_ip = {{ server.dvr|default('false') }}
+ {%- endif %}
+
++{%- include "neutron/files/rocky/_ml2_conf.vpp.ini" %}
+
+ {%- if server.backend.opendaylight|default(False) %}
+ [ml2_odl]
+diff --git a/neutron/gateway.sls b/neutron/gateway.sls
+index e51990a..95d4d07 100644
+--- a/neutron/gateway.sls
++++ b/neutron/gateway.sls
+@@ -40,6 +40,8 @@ haproxy:
+
+ {%- endif %}
+
++{%- include "neutron/agents/_vpp.sls" %}
++
+ {%- if gateway.l2gw is defined %}
+ {%- include "neutron/agents/_l2gw.sls" %}
+ {%- endif %}
+diff --git a/neutron/map.jinja b/neutron/map.jinja
+index 9e6cb36..17cd5b3 100644
+--- a/neutron/map.jinja
++++ b/neutron/map.jinja
+@@ -14,9 +14,13 @@
+ {%- do compute_pkgs_ovn.extend(['neutron-common', 'python-networking-ovn', 'haproxy']) %}
+ {%- endif %}
+ {%- set linuxbridge_enabled = pillar.neutron.compute is defined and pillar.neutron.compute.get('backend', {}).get('mechanism', {}).get('lb', {}).get('driver', {}) == "linuxbridge" %}
++{%- set vpp_enabled = 'vpp' in pillar.neutron.get('compute', {}).get('backend', {}).get('mechanism', []) %}
+ {%- if linuxbridge_enabled %}
+ {%- set pkgs_cmp = ['neutron-linuxbridge-agent'] %}
+ {%- set services_cmp = ['neutron-linuxbridge-agent'] %}
++{%- elif vpp_enabled %}
++{%- set pkgs_cmp = ['vpp-agent'] %}
++{%- set services_cmp = ['vpp-agent'] %}
+ {%- else %}
+ {%- set pkgs_cmp = ['neutron-openvswitch-agent', 'python-pycadf'] %}
+ {%- set services_cmp = ['neutron-openvswitch-agent'] %}
+@@ -73,6 +77,7 @@
+ {%- set opendaylight_enabled = pillar.neutron.gateway is defined and pillar.neutron.gateway.opendaylight is defined %}
+ {%- set linuxbridge_enabled = pillar.neutron.gateway is defined and pillar.neutron.gateway.get('backend', {}).get('mechanism', {}).get('lb', {}).get('driver', {}) == "linuxbridge" %}
+ {%- set dhcp_enabled = pillar.neutron.gateway is defined and pillar.neutron.gateway.get('dhcp_agent_enabled', True) %}
++{%- set vpp_enabled = 'vpp' in pillar.neutron.get('gateway', {}).get('backend', {}).get('mechanism', []) %}
+ {%- set pkgs_list = ['neutron-metadata-agent'] %}
+
+ {%- set services_list = ['neutron-metadata-agent'] %}
+@@ -83,6 +88,13 @@
+ {%- if linuxbridge_enabled %}
+ {%- do pkgs_list.extend(['neutron-linuxbridge-agent', 'neutron-l3-agent']) %}
+ {%- do services_list.extend(['neutron-linuxbridge-agent', 'neutron-l3-agent']) %}
++{%- elif vpp_enabled %}
++{%- do pkgs_list.extend(['vpp-agent']) %}
++{%- do services_list.extend(['vpp-agent']) %}
++{%- if 'vpp-router' not in pillar.neutron.gateway.backend.get('router', '') %}
++{%- do pkgs_list.extend(['neutron-l3-agent']) %}
++{%- do services_list.extend(['neutron-l3-agent']) %}
++{%- endif %}
+ {%- elif not opendaylight_enabled %}
+ {%- do pkgs_list.extend(['neutron-openvswitch-agent', 'neutron-l3-agent']) %}
+ {%- do services_list.extend(['neutron-openvswitch-agent', 'neutron-l3-agent']) %}
+@@ -132,6 +144,10 @@
+ {%- set server_services_list = ['neutron-server'] %}
+ {%- do server_services_list.append('neutron-rpc-server') if wsgi_enabled %}
+
++{%- if 'vpp' in pillar.neutron.get('server', {}).get('backend', {}).get('mechanism', []) %}
++{%- do server_pkgs_list.extend(['python3-networking-vpp']) %}
++{%- endif %}
++
+ {% set server = salt['grains.filter_by']({
+ 'BaseDefaults': default_params,
+ 'Debian': {
diff --git a/mcp/patches/salt-formula-neutron/0002-Align-packages-with-stein-reqs.patch b/mcp/patches/salt-formula-neutron/0002-Align-packages-with-stein-reqs.patch
new file mode 100644
index 000000000..6dcd91a3c
--- /dev/null
+++ b/mcp/patches/salt-formula-neutron/0002-Align-packages-with-stein-reqs.patch
@@ -0,0 +1,114 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Michael Polenchuk <mpolenchuk@mirantis.com>
+Date: Thu, 11 Jul 2019 11:52:53 +0400
+Subject: [PATCH] Align packages with stein reqs
+
+diff --git a/neutron/map.jinja b/neutron/map.jinja
+index 17cd5b3..ed1831a 100644
+--- a/neutron/map.jinja
++++ b/neutron/map.jinja
+@@ -11,7 +11,7 @@
+ {%- if os_family == "Debian" %}
+ {%- set compute_pkgs_ovn = ['ovn-common', 'ovn-host'] %}
+ {%- if pillar.neutron.compute is defined and pillar.neutron.compute.metadata is defined %}
+-{%- do compute_pkgs_ovn.extend(['neutron-common', 'python-networking-ovn', 'haproxy']) %}
++{%- do compute_pkgs_ovn.extend(['neutron-common', 'python3-networking-ovn', 'haproxy']) %}
+ {%- endif %}
+ {%- set linuxbridge_enabled = pillar.neutron.compute is defined and pillar.neutron.compute.get('backend', {}).get('mechanism', {}).get('lb', {}).get('driver', {}) == "linuxbridge" %}
+ {%- set vpp_enabled = 'vpp' in pillar.neutron.get('compute', {}).get('backend', {}).get('mechanism', []) %}
+@@ -22,7 +22,7 @@
+ {%- set pkgs_cmp = ['vpp-agent'] %}
+ {%- set services_cmp = ['vpp-agent'] %}
+ {%- else %}
+-{%- set pkgs_cmp = ['neutron-openvswitch-agent', 'python-pycadf'] %}
++{%- set pkgs_cmp = ['neutron-openvswitch-agent'] %}
+ {%- set services_cmp = ['neutron-openvswitch-agent'] %}
+ {%- endif %}
+ {%- endif %}
+@@ -33,7 +33,7 @@
+ 'Debian': {
+ 'pkgs': pkgs_cmp,
+ 'pkgs_ovn': compute_pkgs_ovn,
+- 'pkgs_bagpipe': ['python-networking-bagpipe'],
++ 'pkgs_bagpipe': ['python3-networking-bagpipe'],
+ 'services': services_cmp,
+ 'services_ovn': ['ovn-host'],
+ 'dpdk': false,
+@@ -138,9 +138,9 @@
+
+ {%- set sfc_enabled = pillar.neutron.get('server', {}).get('sfc', {}).get('enabled', False) %}
+ {%- set wsgi_enabled = pillar.neutron.get('server', {}).get('wsgi', {}).get('enabled', False) %}
+-{%- set server_pkgs_list = ['python-neutron-lbaas'] %}
++{%- set server_pkgs_list = [] %}
+ {%- do server_pkgs_list.append('uwsgi-plugin-python' if wsgi_enabled else 'neutron-server') %}
+-{%- do server_pkgs_list.append('python-networking-sfc') if sfc_enabled %}
++{%- do server_pkgs_list.append('python3-networking-sfc') if sfc_enabled %}
+ {%- set server_services_list = ['neutron-server'] %}
+ {%- do server_services_list.append('neutron-rpc-server') if wsgi_enabled %}
+
+@@ -152,11 +152,11 @@
+ 'BaseDefaults': default_params,
+ 'Debian': {
+ 'pkgs': server_pkgs_list,
+- 'pkgs_ovn': ['python-networking-ovn', 'ovn-common', 'ovn-central'],
++ 'pkgs_ovn': ['python3-networking-ovn', 'ovn-common', 'ovn-central'],
+ 'pkgs_ml2': ['neutron-plugin-ml2'],
+- 'pkgs_l2gw': ['python-networking-l2gw'],
+- 'pkgs_bgpvpn': ['python-networking-bgpvpn'],
+- 'pkgs_bagpipe': ['python-networking-bagpipe'],
++ 'pkgs_l2gw': ['python3-networking-l2gw'],
++ 'pkgs_bgpvpn': ['python3-networking-bgpvpn'],
++ 'pkgs_bagpipe': ['python3-networking-bagpipe'],
+ 'services': server_services_list,
+ 'services_ovn': ['openvswitch-switch', 'ovn-central'],
+ 'notification': {},
+@@ -204,7 +204,7 @@
+
+ {% set client = salt['grains.filter_by']({
+ 'Debian': {
+- 'pkgs': ['python-neutronclient'],
++ 'pkgs': ['python3-neutronclient'],
+ 'enabled': false
+ },
+ 'RedHat': {
+diff --git a/neutron/opendaylight/client.sls b/neutron/opendaylight/client.sls
+index fb5829f..be9aff6 100644
+--- a/neutron/opendaylight/client.sls
++++ b/neutron/opendaylight/client.sls
+@@ -4,7 +4,7 @@
+ {%- from "neutron/map.jinja" import compute as neutron with context %}
+ {%- endif %}
+
+-python-networking-odl:
++python3-networking-odl:
+ pkg.installed
+
+ {%- if not grains.get('noservices', False) %}
+@@ -31,6 +31,6 @@ neutron_odl_ovs_hostconfig:
+ cmd.run:
+ - name: 'neutron-odl-ovs-hostconfig {{ ovs_hostconfig|join(' ') }}'
+ - require:
+- - pkg: python-networking-odl
++ - pkg: python3-networking-odl
+
+ {%- endif %}
+diff --git a/neutron/server.sls b/neutron/server.sls
+index 99bbdef..692e0ce 100644
+--- a/neutron/server.sls
++++ b/neutron/server.sls
+@@ -115,7 +115,7 @@ ml2_plugin_link:
+ {%- endif %}
+
+ {%- if server.backend.get('opendaylight', False) %}
+-python-networking-odl:
++python3-networking-odl:
+ pkg.installed:
+ - require_in:
+ - pkg: neutron_server_packages
diff --git a/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch b/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch
new file mode 100644
index 000000000..f7b85f5ab
--- /dev/null
+++ b/mcp/patches/salt-formula-rabbitmq/0001-Stop-epmd.socket-before-relaunching-rabbit-service.patch
@@ -0,0 +1,34 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Fri, 18 Oct 2019 17:07:13 +0200
+Subject: [PATCH] Stop epmd.socket before relaunching rabbit service
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ rabbitmq/server/service.sls | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/rabbitmq/server/service.sls b/rabbitmq/server/service.sls
+index 3e95a30..05f2eb1 100644
+--- a/rabbitmq/server/service.sls
++++ b/rabbitmq/server/service.sls
+@@ -58,6 +58,12 @@ rabbitmq_limits_systemd:
+ - require:
+ - pkg: rabbitmq_server
+
++rabbitmq_epmd_socket:
++ service.dead:
++ - name: epmd.socket
++ - require:
++ - pkg: rabbitmq_server
++
+ {%- endif %}
+
+ {%- if server.secret_key is defined and not grains.get('noservices', False) %}
diff --git a/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch b/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch
new file mode 100644
index 000000000..1c06bd9d3
--- /dev/null
+++ b/mcp/patches/salt-formula-redis/0001-Add-Ubuntu-Bionic-support.patch
@@ -0,0 +1,44 @@
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+: Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+:
+: All rights reserved. This program and the accompanying materials
+: are made available under the terms of the Apache License, Version 2.0
+: which accompanies this distribution, and is available at
+: http://www.apache.org/licenses/LICENSE-2.0
+::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
+From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+Date: Thu, 24 Oct 2019 23:04:16 +0200
+Subject: [PATCH] Add Ubuntu Bionic support
+
+Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
+---
+ redis/map.jinja | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/redis/map.jinja b/redis/map.jinja
+index 299d5a4..7cbceb4 100755
+--- a/redis/map.jinja
++++ b/redis/map.jinja
+@@ -37,6 +37,9 @@
+ 'xenial': {
+ 'version': '3.0',
+ },
++ 'bionic': {
++ 'version': '3.0',
++ },
+ }, grain='oscodename', merge=salt['pillar.get']('redis:server'))) %}
+
+ {% set cluster = salt['grains.filter_by']({
+@@ -60,4 +63,12 @@
+ 'port': '26379'
+ }
+ },
++ 'bionic': {
++ 'pkgs': ['redis-sentinel'],
++ 'service': 'redis-sentinel',
++ 'sentinel': {
++ 'address': '127.0.0.1',
++ 'port': '26379'
++ }
++ },
+ }, grain='oscodename', merge=salt['pillar.get']('redis:cluster'))) %}
diff --git a/mcp/patches/scripts/0001-salt-master-setup.sh-Allow-arm64-salt-bootstrap.patch b/mcp/patches/scripts/0001-salt-master-setup.sh-Allow-arm64-salt-bootstrap.patch
deleted file mode 100644
index 498743b77..000000000
--- a/mcp/patches/scripts/0001-salt-master-setup.sh-Allow-arm64-salt-bootstrap.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-:
-: All rights reserved. This program and the accompanying materials
-: are made available under the terms of the Apache License, Version 2.0
-: which accompanies this distribution, and is available at
-: http://www.apache.org/licenses/LICENSE-2.0
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
-Date: Fri, 1 Sep 2017 00:48:26 +0200
-Subject: [PATCH] salt-master-setup.sh: Allow arm64 salt-bootstrap
-
-Upstream commit [1] broke Salt bootstrap on AArch64, by
-introducing an architecture condition that is too strict to allow
-Debian package installation (even if we provide our own repo).
-
-Add "arm64" to the list of supported architectures. This needs
-to be done on the fly, as the bootstrap script is fetched using
-`curl` from <salt-master-setup.sh>.
-
-[1] https://github.com/saltstack/salt-bootstrap/commit/caa6d7d
-
-Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
----
- salt-master-setup.sh | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/salt-master-setup.sh b/salt-master-setup.sh
-index cba21fb..0dd3036 100755
---- a/salt-master-setup.sh
-+++ b/salt-master-setup.sh
-@@ -237,12 +237,12 @@ install_salt_master_pkg()
- debian)
- $SUDO apt-get install -y git
- which reclass || $SUDO apt install -qqq -y reclass
-- curl -L https://bootstrap.saltstack.com | $SUDO sh -s -- -M ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
-+ curl -L https://bootstrap.saltstack.com | sed 's@"amd64")@"amd64"|"arm64")@g' | $SUDO sh -s -- -M ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
- ;;
- rhel)
- yum install -y git
- which reclass || $SUDO yum install -y reclass
-- curl -L https://bootstrap.saltstack.com | $SUDO sh -s -- -M ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
-+ curl -L https://bootstrap.saltstack.com | sed 's@"amd64")@"amd64"|"arm64")@g' | $SUDO sh -s -- -M ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
- ;;
- esac
-
-@@ -310,10 +310,10 @@ install_salt_minion_pkg()
-
- case $PLATFORM_FAMILY in
- debian)
-- curl -L https://bootstrap.saltstack.com | $SUDO sh -s -- ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
-+ curl -L https://bootstrap.saltstack.com | sed 's@"amd64")@"amd64"|"arm64")@g' | $SUDO sh -s -- ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
- ;;
- rhel)
-- curl -L https://bootstrap.saltstack.com | $SUDO sh -s -- ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
-+ curl -L https://bootstrap.saltstack.com | sed 's@"amd64")@"amd64"|"arm64")@g' | $SUDO sh -s -- ${BOOTSTRAP_SALTSTACK_OPTS} &>/dev/null || true
- ;;
- esac
-
diff --git a/mcp/patches/scripts/0003-salt-master-setup-Group-APT-install-formulas.patch b/mcp/patches/scripts/0003-salt-master-setup-Group-APT-install-formulas.patch
deleted file mode 100644
index d5a9bf3c4..000000000
--- a/mcp/patches/scripts/0003-salt-master-setup-Group-APT-install-formulas.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-: Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-:
-: All rights reserved. This program and the accompanying materials
-: are made available under the terms of the Apache License, Version 2.0
-: which accompanies this distribution, and is available at
-: http://www.apache.org/licenses/LICENSE-2.0
-::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
-From: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
-Date: Tue, 30 Jan 2018 01:23:54 +0100
-Subject: [PATCH] salt-master-setup: Group APT install formulas
-
-Instead of calling `apt install` for each salt formula package that
-we miss, construct a list and install them all at once.
-
-While at it, disable colored output on terminals that don't support
-it, like vt220 (used in OPNFV CI).
-
-Signed-off-by: Alexandru Avadanii <Alexandru.Avadanii@enea.com>
----
- salt-master-init.sh | 6 ++++++
- salt-master-setup.sh | 12 +++++++-----
- 2 files changed, 13 insertions(+), 5 deletions(-)
-
-diff --git a/salt-master-init.sh b/salt-master-init.sh
-index a4ec138..db90a22 100755
---- a/salt-master-init.sh
-+++ b/salt-master-init.sh
-@@ -24,6 +24,12 @@ options() {
- export $(find $path -maxdepth 1 -name '*.env' 2> /dev/null | xargs --no-run-if-empty cat ) > /dev/null
- done;
-
-+ # If terminal does not support color output, stop using it
-+ if ! tput setaf 1 && tput sgr0; then
-+ SALT_OPTS="${SALT_OPTS/--force-color/--no-color}"
-+ return 0
-+ fi
-+
- export MAGENTA='\033[0;95m'
- export YELLOW='\033[1;33m'
- export BLUE='\033[0;35m'
-diff --git a/salt-master-setup.sh b/salt-master-setup.sh
-index 0dd3036..a0c6311 100755
---- a/salt-master-setup.sh
-+++ b/salt-master-setup.sh
-@@ -347,15 +347,17 @@ install_salt_formula_pkg()
- # Set essentials if FORMULAS_SALT_MASTER is not defined at all
- [ -z ${FORMULAS_SALT_MASTER+x} ] && declare -a FORMULAS_SALT_MASTER=("linux" "reclass" "salt" "memcached")
- for formula_service in "${FORMULAS_SALT_MASTER[@]}"; do
-- echo -e "\nConfiguring salt formula ${formula_service} ...\n"
-+ echo -e "Configuring salt formula ${formula_service} ..."
- [ ! -d "${FORMULAS_PATH}/env/${formula_service}" ] && \
-- if ! $SUDO apt-get install -y salt-formula-${formula_service}; then
-- echo -e "\nInstall salt-formula-${formula_service} failed.\n"
-- exit 1
-- fi
-+ _FORMULAS_SALT_MASTER="${_FORMULAS_SALT_MASTER} salt-formula-${formula_service}"
-+ # Create links first, install pkgs later
- [ ! -L "/srv/salt/reclass/classes/service/${formula_service}" ] && \
- ln -sf ${FORMULAS_PATH}/reclass/service/${formula_service} /srv/salt/reclass/classes/service/${formula_service}
- done
-+ if ! $SUDO apt-get install -qqq -y ${_FORMULAS_SALT_MASTER}; then
-+ echo -e "\nInstall ${_FORMULAS_SALT_MASTER} failed.\n"
-+ exit 1
-+ fi
- ;;
- rhel)
- # TODO
diff --git a/mcp/reclass/classes/cluster/.gitignore b/mcp/reclass/classes/cluster/.gitignore
index a7efc5744..9ab39928b 100644
--- a/mcp/reclass/classes/cluster/.gitignore
+++ b/mcp/reclass/classes/cluster/.gitignore
@@ -3,25 +3,26 @@ all-mcp-arch-common/init.yml
all-mcp-arch-common/opnfv/init.yml
mcp*common-*/infra/config.yml
mcp-common-ha/infra/init.yml
-mcp-common-ha/infra/maas.yml
+all-mcp-arch-common/infra/maas.yml
mcp-common-ha/infra/kvm_novcp.yml
-mcp-common-ha/include/maas_proxy.yml
-mcp-common-ha/openstack_control.yml
+all-mcp-arch-common/opnfv/maas_proxy.yml
+mcp-common-*/openstack_control.yml
mcp-common-ha/openstack_telemetry.yml
mcp*common-*/openstack_init.yml
mcp-common-ha/openstack_interface_vcp_biport.yml
mcp-common-ha/openstack_interface_vcp_triport.yml
mcp-common-ha/openstack_proxy.yml
-mcp-ovs-ha/infra/init_vcp.yml
-mcp-ovs-ha/infra/kvm.yml
-mcp-ovs-dpdk-ha/infra/init_vcp.yml
-mcp-ovs-dpdk-ha/infra/kvm.yml
-mcp-odl-ha/infra/init_vcp.yml
-mcp-odl-ha/infra/kvm.yml
+mcp-*-ha/infra/kvm.yml
+mcp-*-ha/infra/init_vcp.yml
mcp-odl-ha/infra/maas.yml
-mcp-ovn-ha/infra/init_vcp.yml
-mcp-ovn-ha/infra/kvm.yml
-mcp-odl-ha/opendaylight/control.yml
+mcp-odl-*/opendaylight/control.yml
mcp-odl-ha/openstack/init.yml
-mcp-odl-noha/infra/config.yml
+mcp-odl-*/infra/config.yml
mcp-*-noha/openstack/compute.yml
+mcp-common-noha/infra/init.yml
+mcp-*-noha/openstack/gateway.yml
+mcp-fdio-noha/infra/config.yml
+mcp-iec-noha/akraino/iec.yml
+mcp-iec-noha/infra/config.yml
+mcp-iec-noha/infra/init.yml
+mcp-iec-noha/infra/kvm.yml
diff --git a/mcp/reclass/classes/cluster/README.rst b/mcp/reclass/classes/cluster/README.rst
index 69234043a..2bb0f265b 100644
--- a/mcp/reclass/classes/cluster/README.rst
+++ b/mcp/reclass/classes/cluster/README.rst
@@ -2,22 +2,22 @@
.. http://creativecommons.org/licenses/by/4.0
.. (c) 2017 Mirantis Inc., Enea AB and others.
-Fuel@OPNFV Cluster Reclass Models
+OPNFV Fuel Cluster Reclass Models
=================================
Overview
--------
-#. Common classes (HA + noHA)
+#. Common classes (HA **and** noHA)
- - all-mcp-arch-common
+ - all-mcp-arch-common
-#. Common classes (HA baremetal/virtual, noHA virtual)
+#. Common classes (HA **or** noHA)
- - mcp-<release>-common-ha
- - mcp-<release>-common-noha
+ - mcp-common-ha
+ - mcp-common-noha
#. Cluster specific classes
- - mcp-<release>-*-{ha,noha}
- - mcp-<release>-*-{ha,noha}
+ - mcp-\*-ha
+ - mcp-\*-noha
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml b/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml
new file mode 100644
index 000000000..04b38abae
--- /dev/null
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/backports.yml
@@ -0,0 +1,74 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - system.linux.system.repo.keystorage.mirantis_com
+ - cluster.all-mcp-arch-common.uca_repo
+parameters:
+ _param:
+ backports_version: rocky
+ fakeinitscripts_ppa_key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+ xsFNBFwKq9ABEADqWu9anJFs3RJ87i53tU8lBC8JGa55YmRlN7LgvkPYMtXj3xOR
+ tBn8HJ3B0b2fKx2htUs+oWtFFCkNUmptnNz+tMVdwXt1lXSr2MEzO6PgBBAvak0j
+ GMLSsI4p60YqoPARMjPXvZ+VNcGZ6RSOKlNnEqSb+M76iaVaqEWBipDR1g+llCd9
+ lgUVQ8iKolw+5iCnPnjmm0GdE9iw7Az0aUIv3yXNaEZwnGb9egdoioY4OvkY9HqR
+ KkgsrTVBWiTOsoDctrPkLNsB1BZLA/Qkgv4Sih2Bc7atgid6SvvuGClex+9MdBPQ
+ r0nT03O0uiXQ4Zk/ULlXaE2ci9dhMD5SNspgZnEULcubqL/Xd2iq6DlW22iXmj2X
+ PSoF6YxrtxlocaC2ChKFGITR7yiudxDYSCyBzXBMP7zfLVwZC3IX309HaxJRPCk5
+ PEatmq0++z3lWfNXEjQ48Rt0mYTC5ktcJQGpSSp30hjrIfz5Jxa/FACQCJBGbr0/
+ jO6cB6TJpHDnwdsEvCLJmeI6+OYkEzExarL8Wg8DdQUo5uppS4zANAgMsUbVqFz5
+ 7WDlLMKPRAheEdZJIwCHXZrB3TibZTNUuafmQD+4a50cfKgNHlb+ks/5gbkxRdNj
+ DdZYI6gbh7PZcvIKOvakrEer8RIpqgSXyWPxIviyCGpp/+webUyapFwstQARAQAB
+ zRxMYXVuY2hwYWQgUFBBIGZvciBPUE5GViBGdWVswsF4BBMBAgAiBQJcCqvQAhsD
+ BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCVWdwe/i1a+JgbEADZPwsdXStw
+ kqS+bg+bL4sCK55LnYAPWWnqXLuqpEEXusuGYEyahu69SOidL3/AXY1iM9FnbBE0
+ qyycLQVOv/lt7Bs1WVg7M3gNjTsnCH7RbZsGVWDnOuZ1G0KP2o72dmrR9GYFArHA
+ MMc3YVoKAWhRBWHUKdSp/D68i/cfJ4V1PNhDpchOz4ytPjo2xyHyBW+wxLxNiC32
+ 3uZeT7EpO8UbhuFDd3+PLaNrI1p2mkYxdmTpVBLIdKdAMq1QYi0B1nLvJ7Cp2yck
+ 2HKrI6pb74l7dkQOxx+x/inAMbZKX/AvKSjzyJ+Fxc4TT28m79QLuHtORiaPWCep
+ HePcl/0Qu2n85qOtWbWFWCJwlmvfTkHw2u7PEjutTgX9zOLdEFliu3v9nhvec7Mk
+ AzwpilBD6eAHav8Yhx6CKNR5GReK3viJ8+lso/D/56ap7el+W+M6K59imJ/r8WVx
+ 79qPXTAB29Co8hC5ky2qqeHMHw39VqC/JpCYPjH7qZNyWWhXBwHcobktuCc+tXdq
+ t1qlTz0aU/DLGUW8Buk9R6ZZTvSUibT8tRqDYtVhyJ7u/2qCdqhFoculWr6e6DQF
+ KP41NGKN4LtqQh7HmFCswvBnlu7BpkVlBqlHEMpqRUbJd7fg0oGkEf6P8hhWwdd2
+ 0keWK/lCMRHDEN6+/1ppP7M90/JyUPXfFA==
+ =stQK
+ -----END PGP PUBLIC KEY BLOCK-----
+ linux:
+ system:
+ repo:
+ opnfv_fakeinitscripts:
+ source: "deb http://ppa.launchpad.net/opnfv-fuel/fakeinitscripts/ubuntu ${_param:linux_system_codename} main"
+ key: ${_param:fakeinitscripts_ppa_key}
+ mirantis_openstack:
+ # yamllint disable-line rule:line-length
+ source: "deb ${_param:linux_system_repo_url}/openstack-${_param:backports_version}/xenial xenial main"
+ key: ${_param:linux_system_repo_mirror_mirantis_key}
+ architectures: ${_param:linux_system_architecture}
+ clean_file: true
+ pinning:
+ 10:
+ enabled: true
+ pin: 'release o=Mirantis'
+ priority: 1101
+ package: '/jinja2|redis/'
+ 15:
+ enabled: true
+ pin: 'release o=Mirantis,l=extra-nightly'
+ priority: -1
+ package: 'mysql-common'
+ 20:
+ enabled: true
+ pin: 'release o=Mirantis,l=openstack-rocky-nightly'
+ priority: 1
+ mirantis_extra:
+ # yamllint disable-line rule:line-length
+ source: "deb ${_param:linux_system_repo_url}extra/${_param:linux_system_codename} ${_param:linux_system_codename} main"
+ key: ${_param:linux_system_repo_mirror_mirantis_key}
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/fdio_repo.yml b/mcp/reclass/classes/cluster/all-mcp-arch-common/fdio_repo.yml
new file mode 100644
index 000000000..2a3bfddc4
--- /dev/null
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/fdio_repo.yml
@@ -0,0 +1,118 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+parameters:
+ _param:
+ vpp_version: '19.08.1-*'
+ linux:
+ system:
+ repo:
+ fdio-ubuntu:
+ source: "deb https://packagecloud.io/fdio/1908/ubuntu/ ${_param:linux_system_codename} main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+ mQINBF1LI+cBEAC8PxQ5U6GUwSfd1iYf8UgTr4MKvQ9+rEhpZFBb/p6KYjeDKC+R
+ ZcSMO115J9vVMUFw05gZ76o4qAOe9u3WXEpTx8XGgCI7hx1r7TMhPNgbwUBRBunj
+ iRafzb+E8THX1lO2rE9W71AcFgBMS/+4LyofsVofBd7ldJaNeGUmOiOESpqmLkTz
+ KIv/w+XvJmeZViRupRY8RK/J6DDruE66UgPaQv3A0Bb3tMeKago0+dC74NEqDYez
+ eIKqZCh0xCxt+DhVXH4jykt5qka+Z4XWwac7jNG5nWK38af6pj/jYto9yQ3FVTkd
+ MBsq0haZiul4jtl1/29ydEfcPKojuxBrImUk1K5EEghTvKH8iX97oFn5df+dTpY9
+ oZaYjWoLPhqxC8ruTBQyU2Tj0J2k70LK7QtB5XXYVUb5pTHLAkabQIEajtYnYsUl
+ 538/SMVk++U/gMpfM/DTConIiA2Vr2lSRU/zD2O1fR4fXtCsHSqCc7wUiMfkXjWN
+ mPfspeb5wwBU3j6EVWejf264oSY5BDUMJlgw/fpm0OO/814tCJAvM8mpGxNMqmNc
+ MmJTvqH5VWsMswhP/oLkniNfOZ7uRmEHP+wdmeE+D9F/IRAp59Zw4YptuZw/yjum
+ caLpPa5g+XR5ThxnaehUDJPRr/jIBA7oncb61cLgvEL9yLwBiOpxDYO3ywARAQAB
+ tGZodHRwczovL3BhY2thZ2VjbG91ZC5pby9mZGlvLzE5MDggKGh0dHBzOi8vcGFj
+ a2FnZWNsb3VkLmlvL2RvY3MjZ3BnX3NpZ25pbmcpIDxzdXBwb3J0QHBhY2thZ2Vj
+ bG91ZC5pbz6JAk4EEwEKADgWIQQsCK0NKJY6yc/hkfO+1QWW1GNBDwUCXUsj5wIb
+ LwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRC+1QWW1GNBDyYWD/sHWt0oyD+f
+ k9wk8y2Ot8o2yrpRVeY1mr3EI+AKQlZ3f2ABeQ6cBJHPQiqq+YORsw+gGX7UKkIR
+ 83J1hSdRshPomwOOnml9kDGVfH+cF2F9UC4xvsGp9LovOoDptgeGXnndv1IIFFvl
+ G6WIVz1OHeOH8xLoaL5QamQ5SrZkbMHDqPZ/FiS8SVK3y81O2CwGPJg7c/MBl7bK
+ FrRveA5ZkpvfjbHlv+Hd8AN8EjWjzU9HRETHuG2wMRZbHFlKYQSWKbAVjE8MotSh
+ c7pVCGYOMFh/jiFWVd9/6nD61AWKqLk9M7hxg4baQbz5kB2A14vNQWCtT9xq8QvC
+ JtE5wSNkeW+2yvntxsFxwLwsXtVfFuETro3c2ocUv7tgmR7x81h2fSYg6JP6Ch49
+ u7URVpXU4NfAfw9Ii1PuHFPhLbJL0jHJKPWRkv2iUOv72tnb8Op2O7sAqSbdky8a
+ vNfMsr9aR53wFErTJnxitAMuUaEaS42SqmYP1H1De2ejw2NTsM0cFaNqF9tkYRaI
+ HOxLEkVtnWZ0eQkT+erRw0MRY9Y0btCRVTKAO8kW8WbNyBIQ8pynJsRHuMqWHgqC
+ pdzycyC70uyXMPhPtuaf7JdXbcH4Drg/x1CDA7KQPmOfmSI8RxzrFTigvVQ0zrmf
+ s0UoEAcFH269I8D4qmxsZajk91V93PK2dLkCDQRdSyPnARAAx5Y3uV3D8xp5HlUX
+ qVY2D1Mf75Qkn/oJbPAc/E9OetNA92ry1TLSq4chgLd46iMaCmLxXTLM7DRs9TMP
+ vqPM5kFI9ogKUxSwpjvrgDNscmPTUNA+/oyy4MCZROEGlP2wqGoISFC01MqQDm2i
+ 3yneqNcO/LFTJWpvjj3n+4GrEhQkc4o69QKZrjaWi79O58FMxLM/4ePHViPLMaX0
+ y1JAt/qp8A0mr1mBXUfG6SireC5JAqmbB+pxONIuf7tgLGqIHqRKo+Kgk6/bqGN+
+ UU/bGqLMVeTKHavp/vPjH24r70fL/j06IdEbYQxajjeIGp8hx1r0xRvw/4ktgfEf
+ LNqH7n8/tKdp6j/TEU4nJANzp+SLtewaWCNPlLvU9AkHngydbmknaVyjgZJe05UD
+ zdUBfFcHtJPiMcge7il+mhRA+7LSwiPQfRBFHWti/7z5D7sxSCt3o5XRIuzzjZKU
+ wjkoxh2x7b4Vt32UCVa2f/tAYAlWlEuPydZgvAsI0azym/TWmVVP+xQO9auGsvOK
+ 7H9/QgYmpIcOZwezyDWmySHC/8ju2bJXNmvUC9OP5oqMgfkfOEJr5xRihH9f1Xvw
+ DM4EU99ITYP36fD0kH6Xd6OdBPTk1W6R5Trr2zlxQfpts5JKyaH2PU4fJeXoQLFH
+ Al5m7f5Z9YayNKp1I1tZmZjgWasAEQEAAYkEbAQYAQoAIBYhBCwIrQ0oljrJz+GR
+ 877VBZbUY0EPBQJdSyPnAhsuAkAJEL7VBZbUY0EPwXQgBBkBCgAdFiEEC9r8C6QH
+ M4zVmwdGcMYVlT/IHRcFAl1LI+cACgkQcMYVlT/IHRetbxAAlKgcBsBuTtmdtzhQ
+ TkKIjm+eizcwkPVYl5WKZkp6uZV9hJFlpKWnnli2IosRy678NVdQFaT79/ImpH4t
+ uN5WNAh2WS15ZjYFCTOLygNNqGepXR0pwYG66oY8WKHBqk9FTOf5aSOl0gQvH2oX
+ x5HJI0dIJ3RcA+qozmXbRuF+uCmPlkfAQRSHyUpeveCwCUF4l568yCRhe4V63KI8
+ klfVRx0wBzfgAgwrz2+Bsfr7NehU4LJYfklGIvZI4CzGuH49J3wZXDDmtWj2V6pf
+ WF1gCqpn5J6BTj/FSPJ6o3c1esECb/CDNnxm+V6xknE0yl2niwG/uEVf5v8VpTZK
+ f9Ct9nSp7Y9hzvzsajALKcYmEVB8hjyUBCf/LS2TGw/oNnnRfJFb/L1hxJsFZOw/
+ kdaTuxJhM3M5UsM+snOxeGWe5sdfA/q0B1reVvbnjOE4aBLRm+NP/YJwZo7JMXGV
+ YBuOE1Qu4GYtAJ99G8/JoyEApc9sy+UPGNhdYApjDxsnbDVqDQ6Ge7tgtf+kFU8T
+ 9rCMDo/0riS+hSIeQn4XA65xDSBfP9WfEwLjYBl6P5aAKrV3yYs8qqeC0L4caOr/
+ 2abq5zp5ZnQb2XEqoKiNY8M+/rbaIPuZJPJg/tOr4u6WZdDClZzoFx3JTBk7sAOI
+ /iHa3HxNcd6JHgKDT4R0G3NvvW47ghAAr0YdMziI7dJZlIYjcO3kx7fRbUP5xOdn
+ W793aaC5SGmHXBARmhgaqwfj4HA0OYAajdoUiG1cvhiNb2hHOKaNOn+2aLABapDT
+ v2cJtpl5B+hZycYE2Iidg4ezTUYEMOLJW961XICXlYWeZVDx7QI9VTGyTyTuUZaf
+ UbKgrOtIEeqR9wgVM67iyhvEqkngeCVmewIPlBA9TrysDHH3a2r3kuG+N5NMXHDt
+ Cgkj78jvZzQE9mvgIZGjWIiS4WHj2IOBr34kSQkKZtcUxqsWTm8JORoM2USG/RMg
+ 5ODQGurz1sNkILPvUIbAVLBnLj5ePC6pV0wrDfWg98h1Y3HuNr/UsD6A1NyfY1hD
+ 6nHogzKntI6S3oGhxx3JwK1Bh9vagZJpxINYDJdo3pOkbYCE7XbiBAsHyAI7JIK1
+ rWzuzs/ofkcEfy7CdplNwIjhAwH3EmA7VfJ/R7WPEqqzWoM1ip0uhjerpbRU+Qf/
+ iRi32wIfnk1t7dTdd8nYtR299o7nKHb9rYRrVgIAaNf2SuCmCFJ2+DTD96s1ka95
+ i/uC/rNY0kKqoAWv1kn2Y4mPZJ8S1un/MjLSKO/P7f//fK15pwFGWQCozOXO1bCc
+ +9E6qetMManZflq+NUP3MsnPQb+2adtVyGSmYqFNIv8B+RSbJ0/7n0eRYmwGvuxR
+ Stl5YLE/10c=
+ =/RWK
+ -----END PGP PUBLIC KEY BLOCK-----
+ pin:
+ - pin: 'version ${_param:vpp_version}'
+ priority: 1200
+ package: 'vpp*'
+ networking-vpp:
+ source: "deb http://ppa.launchpad.net/opnfv-fuel/networking-vpp/ubuntu ${_param:linux_system_codename} main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1
+
+ mQINBFwKq9ABEADqWu9anJFs3RJ87i53tU8lBC8JGa55YmRlN7LgvkPYMtXj3xOR
+ tBn8HJ3B0b2fKx2htUs+oWtFFCkNUmptnNz+tMVdwXt1lXSr2MEzO6PgBBAvak0j
+ GMLSsI4p60YqoPARMjPXvZ+VNcGZ6RSOKlNnEqSb+M76iaVaqEWBipDR1g+llCd9
+ lgUVQ8iKolw+5iCnPnjmm0GdE9iw7Az0aUIv3yXNaEZwnGb9egdoioY4OvkY9HqR
+ KkgsrTVBWiTOsoDctrPkLNsB1BZLA/Qkgv4Sih2Bc7atgid6SvvuGClex+9MdBPQ
+ r0nT03O0uiXQ4Zk/ULlXaE2ci9dhMD5SNspgZnEULcubqL/Xd2iq6DlW22iXmj2X
+ PSoF6YxrtxlocaC2ChKFGITR7yiudxDYSCyBzXBMP7zfLVwZC3IX309HaxJRPCk5
+ PEatmq0++z3lWfNXEjQ48Rt0mYTC5ktcJQGpSSp30hjrIfz5Jxa/FACQCJBGbr0/
+ jO6cB6TJpHDnwdsEvCLJmeI6+OYkEzExarL8Wg8DdQUo5uppS4zANAgMsUbVqFz5
+ 7WDlLMKPRAheEdZJIwCHXZrB3TibZTNUuafmQD+4a50cfKgNHlb+ks/5gbkxRdNj
+ DdZYI6gbh7PZcvIKOvakrEer8RIpqgSXyWPxIviyCGpp/+webUyapFwstQARAQAB
+ tBxMYXVuY2hwYWQgUFBBIGZvciBPUE5GViBGdWVsiQI4BBMBAgAiBQJcCqvQAhsD
+ BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCVWdwe/i1a+JgbEADZPwsdXStw
+ kqS+bg+bL4sCK55LnYAPWWnqXLuqpEEXusuGYEyahu69SOidL3/AXY1iM9FnbBE0
+ qyycLQVOv/lt7Bs1WVg7M3gNjTsnCH7RbZsGVWDnOuZ1G0KP2o72dmrR9GYFArHA
+ MMc3YVoKAWhRBWHUKdSp/D68i/cfJ4V1PNhDpchOz4ytPjo2xyHyBW+wxLxNiC32
+ 3uZeT7EpO8UbhuFDd3+PLaNrI1p2mkYxdmTpVBLIdKdAMq1QYi0B1nLvJ7Cp2yck
+ 2HKrI6pb74l7dkQOxx+x/inAMbZKX/AvKSjzyJ+Fxc4TT28m79QLuHtORiaPWCep
+ HePcl/0Qu2n85qOtWbWFWCJwlmvfTkHw2u7PEjutTgX9zOLdEFliu3v9nhvec7Mk
+ AzwpilBD6eAHav8Yhx6CKNR5GReK3viJ8+lso/D/56ap7el+W+M6K59imJ/r8WVx
+ 79qPXTAB29Co8hC5ky2qqeHMHw39VqC/JpCYPjH7qZNyWWhXBwHcobktuCc+tXdq
+ t1qlTz0aU/DLGUW8Buk9R6ZZTvSUibT8tRqDYtVhyJ7u/2qCdqhFoculWr6e6DQF
+ KP41NGKN4LtqQh7HmFCswvBnlu7BpkVlBqlHEMpqRUbJd7fg0oGkEf6P8hhWwdd2
+ 0keWK/lCMRHDEN6+/1ppP7M90/JyUPXfFA==
+ =73aY
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
index c129a0a12..1178843d9 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/config_pdf.yml.j2
@@ -6,14 +6,15 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
---
classes:
- - service.git.client
- - system.linux.system.single
- - system.linux.system.repo.mcp.salt
- system.reclass.storage.salt
+{%- if nm.cluster.has_baremetal_nodes %}
+ - system.reclass.storage.system.infra_maas_single
+{%- endif %}
- system.salt.master.api
- - system.salt.master.pkg
+ - system.salt.master.single
- system.salt.minion.ca.salt_master
parameters:
_param:
@@ -23,14 +24,25 @@ parameters:
salt_master_base_environment: prd
# yamllint disable-line rule:line-length
salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+ infra_maas_system_codename: bionic
+ linux:
+ system:
+ user:
+ salt:
+ home: /home/salt
salt:
master:
accept_policy: open_mode
file_recv: true
+ minion:
+ mine:
+ module:
+ x509.get_pem_entries: ['/etc/pki/all_cas/*']
reclass:
storage:
data_source:
engine: local
+{%- if nm.cmp_nodes > 0 %}
node:
# We support per-node (not only per-role) compute configuration via IDF
{%- for cmp in range(1, nm.cmp_nodes + 1) %}
@@ -48,37 +60,42 @@ parameters:
- cluster.${_param:cluster_name}.openstack.compute
params:
pxe_admin_address: {{ nm.net_admin | ipnet_hostaddr(admin) }}
- control_address: {{ nm.net_mgmt | ipnet_hostaddr(mgmt) }}
+ pxe_admin_interface: {{ conf.idf.fuel.network.node[i].interfaces[nm.idx_admin] }}
+ single_address: {{ nm.net_mgmt | ipnet_hostaddr(mgmt) }}
tenant_address: {{ nm.net_private | ipnet_hostaddr(pri) }}
external_address: {{ nm.net_public | ipnet_hostaddr(pub) }}
salt_master_host: ${_param:reclass_config_master}
- linux_system_codename: xenial
+ linux_system_codename: bionic
{#- No partial defaults, all or nothing. Defaults tuned for lf-pod2. #}
- {%- if 'dpdk' in conf.cluster.domain or conf.MCP_DPDK_MODE %}
+ {%- if '-ovs-' in conf.MCP_DEPLOY_SCENARIO or '-fdio-' in conf.MCP_DEPLOY_SCENARIO %}
+ {%- set private_speed = conf.nodes[i].interfaces[nm.idx_private].speed %}
+ {%- set private_pci = conf.idf.fuel.network.node[i].busaddr[nm.idx_private] %}
{%- if conf.idf.fuel.reclass is defined %}
{%- if conf.idf.fuel.reclass.node[i].compute_params.dpdk is defined %}
{#- Can't dump json here due to dpdk0_* below, explicitly create yaml #}
{%- set _dpdk = conf.idf.fuel.reclass.node[i].compute_params.dpdk %}
+ {%- set private_drv = _dpdk.dpdk0_driver %}
{%- for _i in _dpdk %}
{{ _i }}: '"{{ _dpdk[_i] }}"'
{%- endfor %}
{%- endif %}
{%- else %}
compute_hugepages_size: 2M
- compute_hugepages_count: 8192
+ compute_hugepages_count: 13312
compute_hugepages_mount: /mnt/hugepages_2M
- compute_kernel_isolcpu: 2,3,10,11
+ compute_kernel_isolcpu: 3,8,9,10,11
compute_dpdk_driver: uio
- compute_ovs_pmd_cpu_mask: '"0xc04"'
- compute_ovs_dpdk_socket_mem: '"2048,2048"'
- compute_ovs_dpdk_lcore_mask: '"0x8"'
+ compute_ovs_pmd_cpu_mask: '"0x708"'
+ compute_ovs_dpdk_socket_mem: '"4096,4096"'
+ compute_ovs_dpdk_lcore_mask: '"0x800"'
compute_ovs_memory_channels: '"2"'
dpdk0_driver: igb_uio
dpdk0_n_rxq: 2
{%- endif %}
dpdk0_name: {{ conf.idf.fuel.network.node[i].interfaces[nm.idx_private] }}
- dpdk0_pci: '"{{ conf.idf.fuel.network.node[i].busaddr[nm.idx_private] }}"'
+ dpdk0_pci: '"{{ private_pci }}"'
+ dpdk0_vpp: {{ ma.vpp_interface_str(private_speed, private_pci, private_drv or '') }}
{%- else %}
{%- if conf.idf.fuel.reclass is defined %}
{%- if conf.idf.fuel.reclass.node[i].compute_params.common is defined %}
@@ -96,3 +113,4 @@ parameters:
{%- endif %}
{%- endif %}
{%- endfor %}
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
new file mode 100644
index 000000000..092febabb
--- /dev/null
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/infra/maas.yml.j2
@@ -0,0 +1,172 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+# NOTE: pod_config is generated and transferred into its final location on
+# cfg01 only during deployment to prevent leaking sensitive data
+classes:
+ - system.maas.region.single
+ - service.maas.cluster.single
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.pod_config
+parameters:
+ _param:
+ linux_system_codename: bionic
+ maas_admin_username: opnfv
+ dns_server01: '{{ nm.dns_public[0] }}'
+ single_address: ${_param:infra_maas_node01_deploy_address}
+ hwe_kernel: 'ga-18.04'
+ opnfv_maas_timeout_comissioning: {{ nm.maas_timeout_comissioning }}
+ opnfv_maas_timeout_deploying: {{ nm.maas_timeout_deploying }}
+ maas:
+ region:
+ services:
+ - maas-regiond
+ - bind9
+{%- if '-ovs-' in conf.MCP_DEPLOY_SCENARIO or '-fdio-' in conf.MCP_DEPLOY_SCENARIO %}
+ tags:
+ aarch64_hugepages_1g:
+ comment: 'Enable 1G pagesizes on aarch64'
+ definition: '//capability[@id="asimd"]|//capability[@id="cp15_barrier"]'
+ kernel_opts: 'default_hugepagesz=1G hugepagesz=1G kpti=off'
+{%- endif %}
+ enable_iframe: False
+ timeout:
+ # Set maas.wait_for_<state> timeouts to ~2.5x of MaaS <state> timeout
+ ready: {{ nm.maas_timeout_comissioning * 150 }}
+ deployed: {{ nm.maas_timeout_deploying * 150 }}
+ attempts: 3
+ boot_sources_delete_all_others: true
+ boot_sources:
+ resources_mirror:
+ url: http://images.maas.io/ephemeral-v3/daily
+ keyring_file: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
+ boot_sources_selections:
+ bionic:
+ url: "http://images.maas.io/ephemeral-v3/daily"
+ os: "ubuntu"
+ release: "${_param:linux_system_codename}"
+ arches:
+{%- for arch in nm.cluster.arch %}
+ - "{{ arch | dpkg_arch }}"
+{%- endfor %}
+ subarches:
+ - "generic"
+ - "ga-18.04"
+ labels: '"*"'
+ fabrics:
+ pxe_admin:
+ name: 'pxe_admin'
+ description: Fabric for PXE/admin
+ vlans:
+ 0:
+ name: 'vlan 0'
+ description: PXE/admin VLAN
+ dhcp: true
+ primary_rack: "${linux:network:hostname}"
+ subnets:
+ {{ nm.net_admin }}:
+ name: {{ nm.net_admin }}
+ cidr: {{ nm.net_admin }}
+ gateway_ip: ${_param:single_address}
+ fabric: ${maas:region:fabrics:pxe_admin:name}
+ vlan: 0
+ ipranges:
+ 1:
+ start: {{ nm.net_admin_pool_start }}
+ end: {{ nm.net_admin_pool_end }}
+ type: dynamic
+ sshprefs:
+ - '{{ conf.MAAS_SSH_KEY }}'
+{%- if 'aarch64' in nm.cluster.arch %}
+ package_repositories:
+ armband:
+ name: armband
+ enabled: '1'
+ url: 'http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial'
+ distributions: '${_param:armband_repo_version}-armband'
+ components: 'main'
+ arches: 'arm64'
+ key: ${_param:armband_key}
+{%- endif %}
+ salt_master_ip: ${_param:reclass_config_master}
+ domain: ${_param:cluster_domain}
+ ~maas_config:
+ maas_name: mas01
+ active_discovery_interval: 600
+ ntp_external_only: true
+ upstream_dns: ${_param:dns_server01}
+ commissioning_distro_series: 'bionic'
+ default_distro_series: 'bionic'
+ default_osystem: 'ubuntu'
+ default_storage_layout: 'lvm'
+ enable_http_proxy: true
+ disk_erase_with_secure_erase: false
+ dnssec_validation: 'no'
+ enable_third_party_drivers: true
+ network_discovery: 'enabled'
+ default_min_hwe_kernel: ${_param:hwe_kernel}
+ kernel_opts: 'spectre_v2=off nopti kpti=off nospec_store_bypass_disable noibrs noibpb'
+ cluster:
+ saltstack_repo_bionic: "deb [arch=amd64] http://archive.repo.saltstack.com/apt/ubuntu/18.04/amd64/2017.7/ bionic main"
+ region:
+ host: ${_param:single_address}
+ port: 5240
+{%- if '-iec-' not in conf.MCP_DEPLOY_SCENARIO and conf.MCP_KERNEL_VER %}
+ curtin_vars:
+ amd64:
+ bionic: &curtin_vars_bionic
+ kernel_package:
+ enabled: True
+ value: 'linux-image-{{ conf.MCP_KERNEL_VER }}-generic'
+ extra_pkgs:
+ enabled: True
+ pkgs:
+ - linux-image-{{ conf.MCP_KERNEL_VER }}-generic
+ - linux-headers-{{ conf.MCP_KERNEL_VER }}-generic
+ - linux-modules-extra-{{ conf.MCP_KERNEL_VER }}-generic
+ arm64:
+ bionic:
+ <<: *curtin_vars_bionic
+{%- endif %}
+ linux:
+ system:
+ repo:
+ armband_3:
+ enabled: false
+ ~locale: ''
+ ~kernel:
+ sysctl:
+ net.ipv4.ip_forward: 1
+ iptables:
+ schema:
+ epoch: 1
+ service:
+ v4:
+ enabled: true
+ persistent_config: /etc/iptables/rules.v4
+ v6:
+ enabled: false
+ tables:
+ v4:
+ filter:
+ chains:
+ INPUT:
+ ruleset:
+ 10:
+ rule: -s ${_param:single_address}/${_param:opnfv_net_admin_mask}
+ 11:
+ rule: -d ${_param:single_address}/${_param:opnfv_net_admin_mask}
+ nat:
+ chains:
+ POSTROUTING:
+ policy: ACCEPT
+ ruleset:
+ 10:
+ rule: -s ${_param:single_address}/${_param:opnfv_net_admin_mask}
+ action: MASQUERADE
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
index 19475c717..fc5bbaa7b 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/init.yml.j2
@@ -7,35 +7,54 @@
##############################################################################
---
classes:
+ - system.defaults
+ - system.linux.system.single
- cluster.all-mcp-arch-common.opnfv
+ - cluster.all-mcp-arch-common.passwords
parameters:
_param:
+ openstack_version: stein
+ armband_repo_version: rocky
+ mcp_version: nightly
+
+ banner_company_name: OPNFV
+
salt_control_trusty_image: '' # Dummy value, to keep reclass 1.5.2 happy
- salt_control_xenial_image: salt://salt/files/control/images/base_image_opnfv_fuel_vcp.img
+ salt_control_bionic_image: salt://salt/files/control/images/base_image_opnfv_fuel_vcp.img
- # VMs spawned on Foundation Node / Jump Host net ifaces (max 4)
# VCP VMs spawned on KVM Hosts net ifaces (max 3)
- # NOTE(armband): Only x86 VCP VMs spawned via salt.control names differ
-
{%- if conf.MCP_JUMP_ARCH == 'aarch64' %}
- opnfv_fn_vm_primary_interface: enp1s0
- opnfv_fn_vm_secondary_interface: enp2s0
- opnfv_fn_vm_tertiary_interface: enp3s0
- opnfv_fn_vm_quaternary_interface: enp4s0
-
- opnfv_vcp_vm_primary_interface: ${_param:opnfv_fn_vm_primary_interface}
- opnfv_vcp_vm_secondary_interface: ${_param:opnfv_fn_vm_secondary_interface}
- opnfv_vcp_vm_tertiary_interface: ${_param:opnfv_fn_vm_tertiary_interface}
+ opnfv_vcp_vm_primary_interface: enp1s0
+ opnfv_vcp_vm_secondary_interface: enp2s0
+ opnfv_vcp_vm_tertiary_interface: enp3s0
{%- else %}
- opnfv_fn_vm_primary_interface: ens3
- opnfv_fn_vm_secondary_interface: ens4
- opnfv_fn_vm_tertiary_interface: ens5
- opnfv_fn_vm_quaternary_interface: ens6
-
opnfv_vcp_vm_primary_interface: ens2
opnfv_vcp_vm_secondary_interface: ens3
opnfv_vcp_vm_tertiary_interface: ens4
{%- endif %}
+ interface_mtu: {{ conf.idf.fuel.network.interface_mtu or 1500 }}
+
ntp_strata_host1: {{ conf.idf.fuel.network.ntp_strata_host1 or '1.pool.ntp.org' }}
ntp_strata_host2: {{ conf.idf.fuel.network.ntp_strata_host2 or '0.pool.ntp.org' }}
+
+ armband_key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v2.0.14 (GNU/Linux)
+
+ mQENBFagAroBCADWboNIjuF6lB1mWv2+EbvqY3lKl5mLKhr2DnSUkKeHUPBv8gNM
+ qK8Q00AMIyPiyEhgjA+dWizZ+5aBgxoiY7oMeLJ2Xym36U/8SYq2BWd3SGCbMNoz
+ SJDxDUSM/HFVs6atF1M3DY9oN65hSVnu4uy5Tu6asf6k4rhAyk0z4+pRcPBCu2vq
+ mnGi3COM/+9PShrEKeVOx5W2vRJywUFuq8EDvQnRoJ0GvM28JiJIanw17YwIPxhg
+ BKZVpZjan5X+ihVMXwA2h/G/FS5Omhd50RqV6LWSYs94VJJgYqHx8UMm7izcxI+P
+ ct3IcbD195bPbJ+SbuiFe45ZLsdY1MyGiU2BABEBAAG0K0VuZWEgQXJtYmFuZCBE
+ ZXZvcHMgVGVhbSA8YXJtYmFuZEBlbmVhLmNvbT6JATgEEwECACICGwMGCwkIBwMC
+ BhUIAgkKCwQWAgMBAh4BAheABQJaY3bYAAoJEN6rkLp5irHRoQMH/0PYl0A/6eWw
+ nQ/szhEFrr76Ln6wA4vEO+PiuWj9kTkZM2NaCnkisrIuHSPIVvOLfFmztbE6sKGe
+ t+a2b7Jqw48DZ/gq508aZE4Q307ookxdCOrzIu/796hFO34yXg3sqZoJh3VmKIjY
+ 4DL8yG1iAiQ5vOw3IFWQnATwIZUgaCcjmE7HGap+9ePuJfFuQ8mIG5cy28t8qocx
+ AB/B2tucfBMwomYxKqgbLI5AG7iSt58ajvrrNa9f8IX7Ihj/jiuXhUwX+geEp98K
+ IWVI1ftEthZvfBpZW4BS98J4z//dEPi31L4jb9RQXq3afF2RpXchDeUN85bW45nu
+ W/9PMAlgE/U=
+ =m+zE
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/init.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/init.yml.j2
index 894d3bcc6..755d2cb08 100644
--- a/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/init.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/init.yml.j2
@@ -27,3 +27,20 @@ parameters:
{{key}}: {{ network | ipnet_hostaddr(i) }}
{%- endfor %}
{%- endfor %}
+ salt:
+ minion:
+ tcp_keepalive: True
+ tcp_keepalive_idle: 60
+ linux:
+ network:
+ resolv:
+ dns:
+{%- for server in nm.dns_public %}
+ - {{ server }}
+{%- endfor %}
+{%- if '-iec-' not in conf.MCP_DEPLOY_SCENARIO and conf.MCP_KERNEL_VER %}
+ system:
+ kernel:
+ version: '{{ conf.MCP_KERNEL_VER }}'
+ headers: True
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/include/lab_proxy_pdf.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/lab_proxy_pdf.yml.j2
index 3f238d667..3f238d667 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/include/lab_proxy_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/lab_proxy_pdf.yml.j2
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/include/maas_proxy.yml.j2 b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/maas_proxy.yml.j2
index 58ea46cad..58ea46cad 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/include/maas_proxy.yml.j2
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/opnfv/maas_proxy.yml.j2
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/passwords.yml b/mcp/reclass/classes/cluster/all-mcp-arch-common/passwords.yml
new file mode 100644
index 000000000..f51563e7c
--- /dev/null
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/passwords.yml
@@ -0,0 +1,64 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+parameters:
+ _param:
+ opnfv_main_password: opnfv_secret
+
+ maas_admin_password: ${_param:opnfv_main_password}
+ maas_db_password: ${_param:opnfv_main_password}
+ infra_maas_database_password: ${_param:opnfv_main_password}
+
+ galera_server_maintenance_password: ${_param:opnfv_main_password}
+ galera_server_admin_password: ${_param:opnfv_main_password}
+ rabbitmq_secret_key: ${_param:opnfv_main_password}
+ rabbitmq_admin_password: ${_param:opnfv_main_password}
+ rabbitmq_openstack_password: ${_param:opnfv_main_password}
+ rabbitmq_cold_password: ${_param:opnfv_main_password}
+ mysql_admin_password: ${_param:opnfv_main_password}
+ mysql_cinder_password: ${_param:opnfv_main_password}
+ mysql_ceilometer_password: ${_param:opnfv_main_password}
+ mysql_glance_password: ${_param:opnfv_main_password}
+ mysql_grafana_password: ${_param:opnfv_main_password}
+ mysql_heat_password: ${_param:opnfv_main_password}
+ mysql_keystone_password: ${_param:opnfv_main_password}
+ mysql_neutron_password: ${_param:opnfv_main_password}
+ mysql_nova_password: ${_param:opnfv_main_password}
+ mysql_aodh_password: ${_param:opnfv_main_password}
+ mysql_designate_password: ${_param:opnfv_main_password}
+ keystone_aodh_password: ${_param:opnfv_main_password}
+ keystone_service_token: ${_param:opnfv_main_password}
+ keystone_admin_password: ${_param:opnfv_main_password}
+ keystone_ceilometer_password: ${_param:opnfv_main_password}
+ keystone_cinder_password: ${_param:opnfv_main_password}
+ keystone_glance_password: ${_param:opnfv_main_password}
+ keystone_heat_password: ${_param:opnfv_main_password}
+ keystone_keystone_password: ${_param:opnfv_main_password}
+ keystone_neutron_password: ${_param:opnfv_main_password}
+ keystone_nova_password: ${_param:opnfv_main_password}
+ keystone_designate_password: ${_param:opnfv_main_password}
+ mysql_barbican_password: ${_param:opnfv_main_password}
+ keystone_barbican_password: ${_param:opnfv_main_password}
+ metadata_password: ${_param:opnfv_main_password}
+ openstack_telemetry_keepalived_password: ${_param:opnfv_main_password}
+ mysql_panko_password: ${_param:opnfv_main_password}
+ keystone_panko_password: ${_param:opnfv_main_password}
+ mysql_gnocchi_password: ${_param:opnfv_main_password}
+ keystone_gnocchi_password: ${_param:opnfv_main_password}
+ mysql_tacker_password: ${_param:opnfv_main_password}
+ keystone_tacker_password: ${_param:opnfv_main_password}
+ heat_domain_admin_password: ${_param:opnfv_main_password}
+ ceilometer_influxdb_password: ${_param:opnfv_main_password}
+ ceilometer_secret_key: ${_param:opnfv_main_password}
+ openstack_telemetry_redis_password: ${_param:opnfv_main_password}
+
+ opendaylight_password: admin
+
+ barbican_simple_crypto_kek: "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY="
+ horizon_secret_key: opaesee8Que2yahJoh9fo0eefo1Aeyo6ahyei8zeiboh3aeth5loth7ieNa5xi5e
+ designate_bind9_rndc_key: 4pc+X4PDqb2q+5o72dISm72LM1Ds9X2EYZjqg+nmsS7FhdTwzFFY8l/iEDmHxnyjkA33EQC8H+z0fLLBunoitw==
diff --git a/mcp/reclass/classes/cluster/all-mcp-arch-common/uca_repo.yml b/mcp/reclass/classes/cluster/all-mcp-arch-common/uca_repo.yml
new file mode 100644
index 000000000..2ca6f01d4
--- /dev/null
+++ b/mcp/reclass/classes/cluster/all-mcp-arch-common/uca_repo.yml
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+parameters:
+ linux:
+ system:
+ repo:
+ uca:
+ # yamllint disable-line rule:line-length
+ source: "deb http://ubuntu-cloud.archive.canonical.com/ubuntu ${_param:linux_system_codename}-updates/${_param:openstack_version} main"
+ key: |
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1
+
+ mQINBFAqSlgBEADPKwXUwqbgoDYgR20zFypxSZlSbrttOKVPEMb0HSUx9Wj8VvNC
+ r+mT4E9wAyq7NTIs5ad2cUhXoyenrjcfGqK6k9R6yRHDbvAxCSWTnJjw7mzsajDN
+ ocXC6THKVW8BSjrh0aOBLpht6d5QCO2vyWxw65FKM65GOsbX03ZngUPMuOuiOEHQ
+ Zo97VSH2pSB+L+B3d9B0nw3QnU8qZMne+nVWYLYRXhCIxSv1/h39SXzHRgJoRUFH
+ vL2aiiVrn88NjqfDW15HFhVJcGOFuACZnRA0/EqTq0qNo3GziQO4mxuZi3bTVL5s
+ GABiYW9uIlokPqcS7Fa0FRVIU9R+bBdHZompcYnKAeGag+uRvuTqC3MMRcLUS9Oi
+ /P9I8fPARXUPwzYN3fagCGB8ffYVqMunnFs0L6td08BgvWwer+Buu4fPGsQ5OzMc
+ lgZ0TJmXyOlIW49lc1UXnORp4sm7HS6okA7P6URbqyGbaplSsNUVTgVbi+vc8/jY
+ dfExt/3HxVqgrPlq9htqYgwhYvGIbBAxmeFQD8Ak/ShSiWb1FdQ+f7Lty+4mZLfN
+ 8x4zPZ//7fD5d/PETPh9P0msF+lLFlP564+1j75wx+skFO4v1gGlBcDaeipkFzeo
+ zndAgpegydKSNTF4QK9iTYobTIwsYfGuS8rV21zE2saLM0CE3T90aHYB/wARAQAB
+ tD1DYW5vbmljYWwgQ2xvdWQgQXJjaGl2ZSBTaWduaW5nIEtleSA8ZnRwbWFzdGVy
+ QGNhbm9uaWNhbC5jb20+iQI3BBMBCAAhBQJQKkpYAhsDBQsJCAcDBRUKCQgLBRYC
+ AwEAAh4BAheAAAoJEF7bG2LsSSbqKxkQAIKtgImrk02YCDldg6tLt3b69ZK0kIVI
+ 3Xso/zCBZbrYFmgGQEFHAa58mIgpv5GcgHHxWjpX3n4tu2RM9EneKvFjFBstTTgo
+ yuCgFr7iblvs/aMW4jFJAiIbmjjXWVc0CVB/JlLqzBJ/MlHdR9OWmojN9ZzoIA+i
+ +tWlypgUot8iIxkR6JENxit5v9dN8i6anmnWybQ6PXFMuNi6GzQ0JgZIVs37n0ks
+ 2wh0N8hBjAKuUgqu4MPMwvNtz8FxEzyKwLNSMnjLAhzml/oje/Nj1GBB8roj5dmw
+ 7PSul5pAqQ5KTaXzl6gJN5vMEZzO4tEoGtRpA0/GTSXIlcx/SGkUK5+lqdQIMdyS
+ n8bImU6V6rDSoOaI9YWHZtpv5WeUsNTdf68jZsFCRD+2+NEmIqBVm11yhmUoasC6
+ dYw5l9P/PBdwmFm6NBUSEwxb+ROfpL1ICaZk9Jy++6akxhY//+cYEPLin02r43Z3
+ o5Piqujrs1R2Hs7kX84gL5SlBzTM4Ed+ob7KVtQHTefpbO35bQllkPNqfBsC8AIC
+ 8xvTP2S8FicYOPATEuiRWs7Kn31TWC2iwswRKEKVRmN0fdpu/UPdMikyoNu9szBZ
+ RxvkRAezh3WheJ6MW6Fmg9d+uTFJohZt5qHdpxYa4beuN4me8LF0TYzgfEbFT6b9
+ D6IyTFoT0LequQINBFAqSlgBEADmL3TEq5ejBYrA+64zo8FYvCF4gziPa5rCIJGZ
+ /gZXQ7pm5zek/lOe9C80mhxNWeLmrWMkMOWKCeaDMFpMBOQhZZmRdakOnH/xxO5x
+ +fRdOOhy+5GTRJiwkuGOV6rB9eYJ3UN9caP2hfipCMpJjlg3j/GwktjhuqcBHXhA
+ HMhzxEOIDE5hmpDqZ051f8LGXld9aSL8RctoYFM8sgafPVmICTCq0Wh03dr5c2JA
+ gEXy3ushYm/8i2WFmyldo7vbtTfx3DpmJc/EMpGKV+GxcI3/ERqSkde0kWlmfPZb
+ o/5+hRqSryqfQtRKnFEQgAqAhPIwXwOkjCpPnDNfrkvzVEtl2/BWP/1/SOqzXjk9
+ TIb1Q7MHANeFMrTCprzPLX6IdC4zLp+LpV91W2zygQJzPgWqH/Z/WFH4gXcBBqmI
+ 8bFpMPONYc9/67AWUABo2VOCojgtQmjxuFn+uGNw9PvxJAF3yjl781PVLUw3n66d
+ wHRmYj4hqxNDLywhhnL/CC7KUDtBnUU/CKn/0Xgm9oz3thuxG6i3F3pQgpp7MeMn
+ tKhLFWRXo9Bie8z/c0NV4K5HcpbGa8QPqoDseB5WaO4yGIBOt+nizM4DLrI+v07y
+ Xe3Jm7zBSpYSrGarZGK68qamS3XPzMshPdoXXz33bkQrTPpivGYQVRZuzd/R6b+6
+ IurV+QARAQABiQIfBBgBCAAJBQJQKkpYAhsMAAoJEF7bG2LsSSbq59EP/1U3815/
+ yHV3cf/JeHgh6WS/Oy2kRHp/kJt3ev/l/qIxfMIpyM3u/D6siORPTUXHPm3AaZrb
+ w0EDWByA3jHQEzlLIbsDGZgrnl+mxFuHwC1yEuW3xrzgjtGZCJureZ/BD6xfRuRc
+ mvnetAZv/z98VN/oj3rvYhUi71NApqSvMExpNBGrdO6gQlI5azhOu8xGNy4OSke8
+ J6pAsMUXIcEwjVEIvewJuqBW/3rj3Hh14tmWjQ7shNnYBuSJwbLeUW2e8bURnfXE
+ TxrCmXzDmQldD5GQWCcD5WDosk/HVHBmHlqrqy0VO2nE3c73dQlNcI4jVWeC4b4Q
+ SpYVsFz/6Iqy5ZQkCOpQ57MCf0B6P5nF92c5f3TYPMxHf0x3DrjDbUVZytxDiZZa
+ XsbZzsejbbc1bSNp4hb+IWhmWoFnq/hNHXzKPHBTapObnQju+9zUlQngV0BlPT62
+ hOHOw3Pv7suOuzzfuOO7qpz0uAy8cFKe7kBtLSFVjBwaG5JX89mgttYW+lw9Rmsb
+ p9Iw4KKFHIBLOwk7s+u0LUhP3d8neBI6NfkOYKZZCm3CuvkiOeQP9/2okFjtj+29
+ jEL+9KQwrGNFEVNe85Un5MJfYIjgyqX3nJcwypYxidntnhMhr2VD3HL2R/4CiswB
+ Oa4g9309p/+af/HU1smBrOfIeRoxb8jQoHu3
+ =xg4S
+ -----END PGP PUBLIC KEY BLOCK-----
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/glusterfs_repo.yml b/mcp/reclass/classes/cluster/mcp-common-ha/glusterfs_repo.yml
new file mode 100644
index 000000000..3ec73bec0
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/glusterfs_repo.yml
@@ -0,0 +1,24 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - system.linux.system.repo.keystorage.glusterfs
+parameters:
+ _param:
+ glusterfs_version: "3.13"
+ linux:
+ system:
+ repo:
+ mcp_glusterfs:
+ # yamllint disable-line rule:line-length
+ source: "deb http://ppa.launchpad.net/gluster/glusterfs-${_param:glusterfs_version}/ubuntu ${_param:linux_system_codename} main"
+ key: ${_param:linux_system_repo_mcp_glusterfs_key}
+ pin:
+ - package: '*'
+ pin: release o=LP-PPA-gluster-glusterfs-${_param:glusterfs_version}
+ priority: 1100
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
index f6e0baa11..0ecc2e364 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/config.yml.j2
@@ -8,7 +8,6 @@
{%- import 'net_map.j2' as nm with context %}
---
classes:
- - system.linux.system.repo.saltstack.xenial
- system.reclass.storage.system.physical_control_cluster
- system.reclass.storage.system.openstack_control_cluster
- system.reclass.storage.system.openstack_proxy_cluster
@@ -18,40 +17,11 @@ classes:
# - system.reclass.storage.system.stacklight_log_cluster
# - system.reclass.storage.system.stacklight_monitor_cluster
# - system.reclass.storage.system.stacklight_telemetry_cluster
- - system.reclass.storage.system.infra_maas_single
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
- cluster.all-mcp-arch-common.infra.config_pdf
parameters:
_param:
salt_master_host: ${_param:infra_config_deploy_address}
- single_address: ${_param:infra_config_address}
- deploy_address: ${_param:infra_config_deploy_address}
- pxe_admin_address: ${_param:opnfv_infra_config_pxe_admin_address}
- mcpcontrol_nic: ${_param:opnfv_fn_vm_primary_interface}
- single_nic: ${_param:opnfv_fn_vm_secondary_interface}
- pxe_admin_nic: ${_param:opnfv_fn_vm_tertiary_interface}
- linux:
- network:
- interface:
- mcpcontrol_int:
- enabled: true
- type: eth
- proto: dhcp
- name: ${_param:mcpcontrol_nic}
- single:
- enabled: true
- type: eth
- proto: static
- name: ${_param:single_nic}
- address: ${_param:single_address}
- netmask: ${_param:opnfv_net_mgmt_mask}
- pxe_admin_int:
- enabled: true
- type: eth
- proto: static
- name: ${_param:pxe_admin_nic}
- address: ${_param:pxe_admin_address}
- netmask: ${_param:opnfv_net_admin_mask}
salt:
master:
accept_policy: open_mode
@@ -65,7 +35,7 @@ parameters:
infra_kvm_node01:
params:
keepalived_vip_priority: 100
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node01_pxe_admin_address}
infra_kvm_node02:
{%- if not conf.MCP_VCP %}
@@ -74,16 +44,16 @@ parameters:
{%- endif %}
params:
keepalived_vip_priority: 101
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node02_pxe_admin_address}
infra_kvm_node03:
params:
keepalived_vip_priority: 102
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_infra_kvm_node03_pxe_admin_address}
openstack_telemetry_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
# create resources only from 1 controller
# to prevent race conditions
ceilometer_create_gnocchi_resources: true
@@ -91,33 +61,33 @@ parameters:
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node01_pxe_admin_address}
openstack_telemetry_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
redis_cluster_role: 'slave'
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node02_pxe_admin_address}
openstack_telemetry_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
redis_cluster_role: 'slave'
pxe_admin_address: ${_param:opnfv_openstack_telemetry_node03_pxe_admin_address}
openstack_message_queue_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node01_pxe_admin_address}
openstack_message_queue_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node02_pxe_admin_address}
openstack_message_queue_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_message_queue_node03_pxe_admin_address}
openstack_proxy_node01:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_proxy_node01_pxe_admin_address}
openstack_proxy_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_proxy_node02_pxe_admin_address}
# stacklight_log_node01:
# classes:
@@ -130,31 +100,31 @@ parameters:
classes:
- cluster.mcp-common-ha.openstack_control_init
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
# NOTE: When VCP is present, external_address is not used
external_address: ${_param:openstack_proxy_node01_address}
pxe_admin_address: ${_param:opnfv_openstack_control_node01_pxe_admin_address}
openstack_control_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
external_address: 0.0.0.0
pxe_admin_address: ${_param:opnfv_openstack_control_node02_pxe_admin_address}
openstack_control_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
external_address: ${_param:openstack_proxy_node02_address}
pxe_admin_address: ${_param:opnfv_openstack_control_node03_pxe_admin_address}
openstack_database_node01:
classes:
- cluster.mcp-common-ha.openstack_database_init
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node01_pxe_admin_address}
openstack_database_node02:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node02_pxe_admin_address}
openstack_database_node03:
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
pxe_admin_address: ${_param:opnfv_openstack_database_node03_pxe_admin_address}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/init.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/init.yml.j2
index 931dd1bab..2f4686767 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/init.yml.j2
@@ -7,22 +7,18 @@
##############################################################################
---
classes:
- - system.linux.system.single
- cluster.all-mcp-arch-common
# - cluster.mcp-common-ha.stacklight
# - cluster.mcp-common-ha.stacklight.client
parameters:
_param:
- apt_mk_version: nightly
- mcp_repo_version: 1.1
- salt_version: 2016.11
+ salt_version: 2017.7
cluster_domain: ${_param:cluster_name}.local
# stacklight_environment: ${_param:cluster_domain}
reclass_data_revision: master
reclass_config_master: ${_param:opnfv_infra_config_pxe_admin_address}
cluster_public_host: ${_param:openstack_proxy_address}
infra_config_hostname: cfg01
- infra_maas_database_password: opnfv_secret
# infra service addresses
infra_config_address: ${_param:opnfv_infra_config_address}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
index 868f324f6..37bc42225 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm.yml.j2
@@ -5,9 +5,9 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
classes:
- - system.linux.system.repo.glusterfs
- service.keepalived.cluster.single
- system.glusterfs.server.volume.glance
- system.glusterfs.server.volume.keystone
@@ -21,13 +21,14 @@ classes:
# - system.salt.control.cluster.stacklight_server_cluster
# - system.salt.control.cluster.stacklight_log_cluster
# - system.salt.control.cluster.stacklight_telemetry_cluster
+ - cluster.all-mcp-arch-common.backports
+ - cluster.mcp-common-ha.glusterfs_repo
- cluster.mcp-common-ha.infra.kvm_pdf
- - cluster.mcp-common-ha.include.maas_proxy
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
parameters:
_param:
- linux_system_codename: xenial
- glusterfs_version: '3.13'
+ linux_system_codename: bionic
cluster_vip_address: ${_param:infra_kvm_address}
cluster_node01_address: ${_param:infra_kvm_node01_address}
cluster_node02_address: ${_param:infra_kvm_node02_address}
@@ -40,6 +41,12 @@ parameters:
boot_options:
- spectre_v2=off
- nopti
+ - kpti=off
+ - nospec_store_bypass_disable
+ - noibrs
+ - noibpb
+ sysctl:
+ net.ipv4.ip_forward: 0
libvirt:
server:
service: libvirtd
@@ -47,6 +54,7 @@ parameters:
unix_sock_group: libvirt
salt:
control:
+ virt_service: libvirtd
size: # RAM 4096,8192,16384,32768,65536
# Default production sizing
openstack.control:
@@ -92,34 +100,44 @@ parameters:
cluster:
internal:
node:
- mdb01:
- image: ${_param:salt_control_xenial_image}
+ mdb01: &salt_control_bionic_image_common_attr
+ image: ${_param:salt_control_bionic_image}
+{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ seed: qemu-nbd
+ ~cloud_init: ~
+ machine: virt
+ cpu_mode: host-passthrough
+ loader:
+ readonly: 'yes'
+ type: pflash
+ path: /usr/share/AAVMF/AAVMF_CODE.fd
+{%- endif %}
mdb02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
mdb03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
ctl01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
ctl02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
ctl03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
dbs01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
dbs02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
dbs03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
msg01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
msg02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
msg03:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
prx01:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
prx02:
- image: ${_param:salt_control_xenial_image}
+ <<: *salt_control_bionic_image_common_attr
provider: kvm03.${_param:cluster_domain}
virt:
nic:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm_pdf.yml.j2
index 6754d13dd..484e53299 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/infra/kvm_pdf.yml.j2
@@ -13,8 +13,6 @@
{%- set vlans = { nm.vlan_admin: nm.ctl01.nic_admin, nm.vlan_mgmt: nm.ctl01.nic_mgmt, nm.vlan_public: nm.ctl01.nic_public } %}
---
parameters:
- _param:
- interface_mtu: 1500
linux:
network:
interface:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/infra/maas.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/infra/maas.yml.j2
deleted file mode 100644
index 29b12ab99..000000000
--- a/mcp/reclass/classes/cluster/mcp-common-ha/infra/maas.yml.j2
+++ /dev/null
@@ -1,174 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-{%- import 'net_map.j2' as nm with context %}
----
-# NOTE: pod_config is generated and transferred into its final location on
-# cfg01 only during deployment to prevent leaking sensitive data
-classes:
- - system.maas.region.single
- - service.maas.cluster.single
- - cluster.mcp-common-ha.include.lab_proxy_pdf
- - cluster.all-mcp-arch-common.opnfv.pod_config
-parameters:
- _param:
- mcpcontrol_interface: ${_param:opnfv_fn_vm_primary_interface}
- primary_interface: ${_param:opnfv_fn_vm_secondary_interface}
- pxe_admin_interface: ${_param:opnfv_fn_vm_tertiary_interface}
- interface_mtu: 1500
- # MaaS has issues using MTU > 1500 for PXE interface
- pxe_admin_interface_mtu: 1500
- linux_system_codename: xenial
- maas_admin_username: opnfv
- maas_admin_password: opnfv_secret
- maas_db_password: opnfv_secret
- dns_server01: '{{ nm.dns_public[0] }}'
- single_address: ${_param:infra_maas_node01_deploy_address}
- hwe_kernel: 'hwe-16.04'
- opnfv_maas_timeout_comissioning: {{ nm.maas_timeout_comissioning }}
- opnfv_maas_timeout_deploying: {{ nm.maas_timeout_deploying }}
- maas:
- region:
- boot_sources_delete_all_others: true
- boot_sources:
- resources_mirror:
- url: http://images.maas.io/ephemeral-v3/daily
- keyring_file: /usr/share/keyrings/ubuntu-cloudimage-keyring.gpg
- boot_sources_selections:
- xenial:
- url: "http://images.maas.io/ephemeral-v3/daily"
- os: "ubuntu"
- release: "${_param:linux_system_codename}"
- arches:
-{%- for arch in nm.cluster.arch %}
- - "{{ arch | dpkg_arch }}"
-{%- endfor %}
- subarches:
- - "generic"
- - "ga-16.04"
- - "hwe-16.04"
- labels: '"*"'
- fabrics:
- pxe_admin:
- name: 'pxe_admin'
- description: Fabric for PXE/admin
- vlans:
- 0:
- name: 'vlan 0'
- description: PXE/admin VLAN
- dhcp: true
- primary_rack: "${linux:network:hostname}"
- subnets:
- {{ nm.net_admin }}:
- name: {{ nm.net_admin }}
- cidr: {{ nm.net_admin }}
- gateway_ip: ${_param:single_address}
- fabric: ${maas:region:fabrics:pxe_admin:name}
- vlan: 0
- ipranges:
- 1:
- start: {{ nm.net_admin_pool_start }}
- end: {{ nm.net_admin_pool_end }}
- type: dynamic
- sshprefs:
- - '{{ conf.MAAS_SSH_KEY }}'
-{%- if 'aarch64' in nm.cluster.arch %}
- package_repositories:
- armband:
- name: armband
- enabled: '1'
- url: 'http://linux.enea.com/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename}'
- distributions: '${_param:openstack_version}-armband'
- components: 'main'
- arches: 'arm64'
- key: &armband_key |
- -----BEGIN PGP PUBLIC KEY BLOCK-----
- Version: GnuPG v2.0.14 (GNU/Linux)
-
- mQENBFagAroBCADWboNIjuF6lB1mWv2+EbvqY3lKl5mLKhr2DnSUkKeHUPBv8gNM
- qK8Q00AMIyPiyEhgjA+dWizZ+5aBgxoiY7oMeLJ2Xym36U/8SYq2BWd3SGCbMNoz
- SJDxDUSM/HFVs6atF1M3DY9oN65hSVnu4uy5Tu6asf6k4rhAyk0z4+pRcPBCu2vq
- mnGi3COM/+9PShrEKeVOx5W2vRJywUFuq8EDvQnRoJ0GvM28JiJIanw17YwIPxhg
- BKZVpZjan5X+ihVMXwA2h/G/FS5Omhd50RqV6LWSYs94VJJgYqHx8UMm7izcxI+P
- ct3IcbD195bPbJ+SbuiFe45ZLsdY1MyGiU2BABEBAAG0K0VuZWEgQXJtYmFuZCBE
- ZXZvcHMgVGVhbSA8YXJtYmFuZEBlbmVhLmNvbT6JATgEEwECACICGwMGCwkIBwMC
- BhUIAgkKCwQWAgMBAh4BAheABQJaY3bYAAoJEN6rkLp5irHRoQMH/0PYl0A/6eWw
- nQ/szhEFrr76Ln6wA4vEO+PiuWj9kTkZM2NaCnkisrIuHSPIVvOLfFmztbE6sKGe
- t+a2b7Jqw48DZ/gq508aZE4Q307ookxdCOrzIu/796hFO34yXg3sqZoJh3VmKIjY
- 4DL8yG1iAiQ5vOw3IFWQnATwIZUgaCcjmE7HGap+9ePuJfFuQ8mIG5cy28t8qocx
- AB/B2tucfBMwomYxKqgbLI5AG7iSt58ajvrrNa9f8IX7Ihj/jiuXhUwX+geEp98K
- IWVI1ftEthZvfBpZW4BS98J4z//dEPi31L4jb9RQXq3afF2RpXchDeUN85bW45nu
- W/9PMAlgE/U=
- =m+zE
- -----END PGP PUBLIC KEY BLOCK-----
-{%- endif %}
- machines:
- {%- set pxe_interface = conf.idf.net_config.admin.interface %}
- {#- We only support exactly 5 nodes for now, hardcoded order #}
- {%- set node_roles = ['kvm01', 'kvm02', 'kvm03', 'cmp001', 'cmp002'] %}
- {%- for node in conf.nodes %}
- {%- if node.node.type == 'baremetal' %}
- {{ node_roles[loop.index0] }}:
- interface:
- mac: {{ node.interfaces[pxe_interface].mac_address }}
- power_parameters:
- power_address: {{ node.remote_management.address.rsplit('/')[0] }}
- power_password: {{ node.remote_management.pass }}
- power_type: {{ node.remote_management.type }}
- power_user: {{ node.remote_management.user }}
- architecture: {{ node.node.arch | dpkg_arch }}/generic
- distro_series: xenial
- hwe_kernel: ${_param:hwe_kernel}
- {%- if loop.index0 >= node_roles.index('cmp001') %}
- disk_layout:
- type: lvm
- root_device: sda
- volume_group: vgroot
- volume_name: lvroot
- volume_size: 100
- {%- endif %}
- {%- endif %}
- {%- endfor %}
- salt_master_ip: ${_param:reclass_config_master}
- domain: ${_param:cluster_domain}
- maas_config:
- commissioning_distro_series: 'xenial'
- default_distro_series: 'xenial'
- default_osystem: 'ubuntu'
- default_storage_layout: 'lvm'
- enable_http_proxy: true
- disk_erase_with_secure_erase: false
- dnssec_validation: 'no'
- enable_third_party_drivers: true
- network_discovery: 'enabled'
- default_min_hwe_kernel: ${_param:hwe_kernel}
- cluster:
- saltstack_repo_xenial: "deb http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2016.11/ xenial main"
- linux:
- network:
- interface:
- mcpcontrol_interface:
- enabled: true
- name: ${_param:mcpcontrol_interface}
- type: eth
- proto: dhcp
- primary_interface:
- enabled: true
- name: ${_param:primary_interface}
- mtu: ${_param:interface_mtu}
- proto: static
- address: ${_param:infra_maas_node01_address}
- netmask: ${_param:opnfv_net_mgmt_mask}
- type: eth
- pxe_admin_interface:
- enabled: true
- name: ${_param:pxe_admin_interface}
- mtu: ${_param:pxe_admin_interface_mtu}
- proto: static
- address: ${_param:single_address}
- netmask: ${_param:opnfv_net_admin_mask}
- type: eth
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
index c7c6f2fab..af87d9c2f 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute.yml
@@ -7,8 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- - system.linux.system.repo.glusterfs
- system.glusterfs.client.cluster
- system.nova.compute.cluster
- system.nova.compute.nfv.hugepages
@@ -17,13 +15,16 @@ classes:
- system.cinder.volume.backend.lvm
- system.ceilometer.agent.cluster
- system.ceilometer.agent.polling.default
+ - service.barbican.client.cluster
+ - cluster.all-mcp-arch-common.backports
+ - cluster.mcp-common-ha.glusterfs_repo
- cluster.mcp-common-ha.openstack_compute_pdf
- - cluster.mcp-common-ha.include.maas_proxy
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
parameters:
_param:
cluster_vip_address: ${_param:openstack_control_address}
- cluster_local_address: ${_param:control_address}
+ cluster_local_address: ${_param:single_address}
cluster_node01_hostname: ${_param:openstack_control_node01_hostname}
cluster_node01_address: ${_param:openstack_control_node01_address}
cluster_node02_hostname: ${_param:openstack_control_node02_hostname}
@@ -31,12 +32,9 @@ parameters:
cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
cluster_node03_address: ${_param:openstack_control_node03_address}
nova_vncproxy_url: https://${_param:cluster_public_host}:6080
- interface_mtu: 1500
keepalived_vip_interface: br-ctl
keepalived_vip_virtual_router_id: 69
- linux_system_codename: xenial
- single_address: ${_param:control_address}
- glusterfs_version: '3.13'
+ linux_system_codename: bionic
glusterfs:
client:
volumes:
@@ -47,12 +45,20 @@ parameters:
opts: "defaults,backup-volfile-servers=${_param:cluster_node01_address}:${_param:cluster_node02_address}:${_param:cluster_node03_address}"
cinder:
volume:
+ my_ip: ${_param:single_address}
backend:
lvm-driver:
# Align system.cinder.volume.backend.lvm and MaaS data
volume_group: ${linux:storage:lvm:cinder-vg:name}
database:
connection_recycle_time: ${_param:db_connection_recycle_time}
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - cinder-volume
+ openiscsi_services:
+ - tgt
+ - iscsid
linux:
storage:
lvm:
@@ -67,11 +73,29 @@ parameters:
boot_options:
- spectre_v2=off
- nopti
+ - kpti=off
+ - nospec_store_bypass_disable
+ - noibrs
+ - noibpb
neutron:
gateway:
vlan_aware_vms: true
root_helper_daemon: false
+ dhcp_lease_duration: 3600
+ report_interval: 120
nova:
compute:
+ libvirt_service: libvirtd
+ libvirt_bin: /etc/default/libvirtd
disk_cachemodes: file=directsync,block=none
preallocate_images: space
+ heal_instance_info_cache_interval: 300
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ image:
+ verify_glance_signatures: false
+ pkgs:
+ - nova-compute
+ - python3-novaclient
+ - pm-utils
+ - sysfsutils
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute_pdf.yml.j2
index 51a6dbd68..0b1c5bbf2 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_compute_pdf.yml.j2
@@ -15,6 +15,7 @@
parameters:
_param:
# Should later be determined via PDF/IDF, AArch64 has ESP on /dev/sda1
+{%- if nm.cmp001.idx < conf.nodes | length %}
{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
~cinder_lvm_devices: ['/dev/vdb']
{%- elif conf.nodes[nm.cmp001.idx].node.arch == 'aarch64' or
@@ -23,6 +24,7 @@ parameters:
{%- else %}
~cinder_lvm_devices: ['/dev/sda1']
{%- endif %}
+{%- endif %}
linux:
network:
bridge: openvswitch
@@ -30,7 +32,7 @@ parameters:
# PXE/admin is always untagged on computes
pxe_admin_int:
enabled: true
- name: {{ nm.cmp001.nic_admin }}
+ name: ${_param:pxe_admin_interface}
proto: static
type: eth
address: ${_param:pxe_admin_address}
@@ -42,6 +44,9 @@ parameters:
{%- if nm.cmp001.nic_admin in nics %}
{%- do nics.pop(nm.cmp001.nic_admin) %}
{%- endif %}
+{%- if ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) in nics %}
+ {%- do nics.pop(nm.cmp001.nic_public) %}
+{%- endif %}
{{ ma.linux_network_interfaces_nic(nics) }}
@@ -66,11 +71,12 @@ parameters:
- {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
- ovs_port_{{ nm.cmp001.nic_public }}:
+ noifupdown: true
+ {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}:
enabled: true
- name: {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}
proto: manual
ovs_port_type: OVSPort
type: ovs_port
ovs_bridge: br-floating
bridge: br-floating
+ mtu: ${_param:interface_mtu}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
index 0189e038c..b3ab9e2c7 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control.yml.j2
@@ -7,8 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- - system.linux.system.repo.glusterfs
- system.ceilometer.client
- system.memcached.server.single
- system.keystone.server.cluster
@@ -20,11 +18,17 @@ classes:
- system.heat.server.cluster
- system.designate.server.cluster
- system.designate.server.backend.bind
+ - system.barbican.server.cluster
+ - system.apache.server.site.barbican
+ - service.barbican.server.plugin.simple_crypto
+ - system.apache.server.single
- system.bind.server.single
- system.haproxy.proxy.listen.openstack.placement
- system.glusterfs.client.cluster
- system.glusterfs.client.volume.glance
- system.glusterfs.client.volume.keystone
+ - cluster.all-mcp-arch-common.backports
+ - cluster.mcp-common-ha.glusterfs_repo
{%- if not conf.MCP_VCP %}
# sync from kvm
- service.keepalived.cluster.single
@@ -42,13 +46,13 @@ classes:
# - system.salt.control.cluster.stacklight_log_cluster
# - system.salt.control.cluster.stacklight_telemetry_cluster
- cluster.mcp-common-ha.infra.kvm_pdf
- - cluster.mcp-common-ha.include.maas_proxy
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
{%- endif %}
parameters:
_param:
{%- if not conf.MCP_VCP %}
- linux_system_codename: xenial # sync from kvm
+ linux_system_codename: bionic # sync from kvm
# For NOVCP, we switch keepalived VIPs, to keep cluster_vip_address in ctl
single_nic: br-ctl # for keepalive_vip_interface interpolation
control_nic: ~ # Dummy value to keep reclass 1.5.2 happy
@@ -66,28 +70,60 @@ parameters:
cluster_node03_hostname: ${_param:openstack_control_node03_hostname}
cluster_node03_address: ${_param:openstack_control_node03_address}
nova_vncproxy_url: https://${_param:cluster_public_host}:6080
- glusterfs_version: '3.13'
+ barbican_integration_enabled: 'false'
+ fernet_rotation_driver: 'shared_filesystem'
+ credential_rotation_driver: 'shared_filesystem'
+ common_conn_recycle_time: &db_conn_recycle_time
+ database:
+ connection_recycle_time: ${_param:db_connection_recycle_time}
nova:
- controller: &db_conn_recycle_time
- database:
- connection_recycle_time: ${_param:db_connection_recycle_time}
+ controller:
+ <<: *db_conn_recycle_time
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - nova-api
+ - nova-conductor
+ - nova-consoleauth
+ - nova-scheduler
+ - nova-novncproxy
+ - python3-novaclient
cinder:
controller:
+ pkgs:
+ - cinder-api
+ - cinder-scheduler
<<: *db_conn_recycle_time
neutron:
server:
<<: *db_conn_recycle_time
vlan_aware_vms: true
root_helper_daemon: false
+ agent_down_time: 300
+ global_physnet_mtu: ${_param:interface_mtu}
+ backend:
+ external_mtu: ${_param:interface_mtu}
+ pkgs:
+ - neutron-server
keystone:
server:
<<: *db_conn_recycle_time
cacert: /etc/ssl/certs/mcp_os_cacert
openrc_extra:
- volume_device_name: vdc
+ volume_device_name: sdc
+ pkgs:
+ - keystone
+ - python3-memcache
+ - python3-openstackclient
glance:
server:
<<: *db_conn_recycle_time
+ identity:
+ barbican_endpoint: ${barbican:server:host_href}
+ pkgs:
+ - glance
+ services:
+ - glance-api
{%- if conf.MCP_VCP %}
heat:
server:
@@ -104,6 +140,9 @@ parameters:
host: ${_param:openstack_proxy_control_address}
port: 8003
protocol: http
+ apache:
+ server:
+ mod_wsgi: libapache2-mod-wsgi-py3
{%- else %}
libvirt:
server:
@@ -120,7 +159,8 @@ parameters:
apache:
server:
bind:
- ~ports: ~
+ listen_default_ports: false
+ mod_wsgi: libapache2-mod-wsgi-py3
# sync from common-ha kvm role
glusterfs:
server:
@@ -146,9 +186,17 @@ parameters:
listen:
heat_cloudwatch_api:
enabled: false
- neutron_api:
- # Set source balancing
- type: heat
+ barbican:
+ server:
+ ks_notifications_enable: true
+ store:
+ software:
+ crypto_plugin: simple_crypto
+ store_plugin: store_crypto
+ global_default: true
+ database:
+ connection_recycle_time: ${_param:db_connection_recycle_time}
+ host: ${_param:openstack_database_address}
bind:
server:
control:
@@ -164,6 +212,9 @@ parameters:
keys:
- designate
designate:
+ _support:
+ sphinx:
+ enabled: False # Workaround broken meta/sphinx.yml in salt-formula-designate
server:
pools:
default:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control_init.yml b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control_init.yml
index 0664c5399..aaa5e65f0 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control_init.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_control_init.yml
@@ -9,21 +9,31 @@
classes:
- system.keystone.client.single
- system.keystone.client.service.aodh
- - system.keystone.client.service.ceilometer
- system.keystone.client.service.nova21
- system.keystone.client.service.nova-placement
- system.keystone.client.service.cinder3
- system.keystone.client.service.designate
+ - system.keystone.client.service.ceilometer
- system.keystone.client.service.gnocchi
- system.keystone.client.service.panko
+ - system.keystone.client.service.barbican
- system.keystone.client.v3.service.keystone
parameters:
+ _param:
+ ceilometer_endpoint_status: absent
keystone:
client:
enabled: true
resources:
v3:
enabled: true
+ services:
+ ceilometer:
+ status: absent
+ # required only for Rally validation
+ cinder:
+ type: volume
+ description: OpenStack Volume Service
server:
identity:
admin:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_database.yml b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_database.yml
index 89c485e0f..9ed3f70cd 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_database.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_database.yml
@@ -7,10 +7,8 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- system.galera.server.cluster
- system.galera.server.database.aodh
- - system.galera.server.database.ceilometer
- system.galera.server.database.cinder
- system.galera.server.database.designate
- system.galera.server.database.glance
@@ -21,6 +19,8 @@ classes:
- system.galera.server.database.nova
- system.galera.server.database.neutron
- system.galera.server.database.panko
+ - system.galera.server.database.barbican
+ - cluster.all-mcp-arch-common.backports
parameters:
_param:
keepalived_vip_interface: ${_param:single_nic}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
index b0f28f9f1..a55485ea0 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_init.yml.j2
@@ -9,9 +9,6 @@
---
parameters:
_param:
-
- openstack_version: queens
-
# openstack service addresses
{%- if conf.MCP_VCP %}
openstack_proxy_control_address: ${_param:opnfv_openstack_proxy_control_address}
@@ -111,16 +108,6 @@ parameters:
openstack_compute_node01_hostname: cmp001
openstack_compute_node02_hostname: cmp002
- # opendaylight options
-{%- if conf.MCP_VCP %}
- opendaylight_server_node01_hostname: odl01
- opendaylight_server_node01_single_address: ${_param:opnfv_opendaylight_server_node01_single_address}
-{%- else %}
- opendaylight_control_hostname: ${_param:openstack_control_node02_hostname}
- opendaylight_server_node01_hostname: ${_param:opendaylight_control_hostname}
- opendaylight_server_node01_single_address: ${_param:opnfv_openstack_control_node02_address}
-{%- endif %}
-
openstack_region: RegionOne
admin_email: root@localhost
db_connection_recycle_time: 300
@@ -134,23 +121,16 @@ parameters:
neutron_compute_agent_mode: legacy
neutron_compute_external_access: 'True'
galera_server_cluster_name: openstack_cluster
- galera_server_maintenance_password: opnfv_secret
- galera_server_admin_password: opnfv_secret
- rabbitmq_secret_key: opnfv_secret
- rabbitmq_admin_password: opnfv_secret
- rabbitmq_openstack_password: opnfv_secret
glance_version: ${_param:openstack_version}
glance_service_host: ${_param:openstack_control_address}
keystone_version: ${_param:openstack_version}
keystone_service_host: ${_param:openstack_control_address}
heat_version: ${_param:openstack_version}
heat_service_host: ${_param:openstack_control_address}
- heat_domain_admin_password: opnfv_secret
cinder_version: ${_param:openstack_version}
cinder_service_host: ${_param:openstack_control_address}
ceilometer_version: ${_param:openstack_version}
ceilometer_service_host: ${_param:openstack_telemetry_address}
- ceilometer_influxdb_password: opnfv_secret
nova_version: ${_param:openstack_version}
nova_service_host: ${_param:openstack_control_address}
neutron_version: ${_param:openstack_version}
@@ -161,51 +141,26 @@ parameters:
glusterfs_service_host: ${_param:openstack_control_address}
{%- endif %}
mysql_admin_user: root
- mysql_admin_password: opnfv_secret
- mysql_cinder_password: opnfv_secret
- mysql_ceilometer_password: opnfv_secret
- mysql_glance_password: opnfv_secret
- mysql_grafana_password: opnfv_secret
- mysql_heat_password: opnfv_secret
- mysql_keystone_password: opnfv_secret
- mysql_neutron_password: opnfv_secret
- mysql_nova_password: opnfv_secret
- mysql_aodh_password: opnfv_secret
- mysql_designate_password: opnfv_secret
aodh_version: ${_param:openstack_version}
- keystone_aodh_password: opnfv_secret
- keystone_service_token: opnfv_secret
- keystone_admin_password: opnfv_secret
- keystone_ceilometer_password: opnfv_secret
- keystone_cinder_password: opnfv_secret
- keystone_glance_password: opnfv_secret
- keystone_heat_password: opnfv_secret
- keystone_keystone_password: opnfv_secret
- keystone_neutron_password: opnfv_secret
- keystone_nova_password: opnfv_secret
- keystone_designate_password: opnfv_secret
- ceilometer_secret_key: opnfv_secret
+ barbican_version: ${_param:openstack_version}
+ barbican_service_host: ${_param:openstack_control_address}
+ apache_barbican_api_address: ${_param:single_address}
+ barbican_integration_enabled: true
horizon_version: ${_param:openstack_version}
- horizon_secret_key: opaesee8Que2yahJoh9fo0eefo1Aeyo6ahyei8zeiboh3aeth5loth7ieNa5xi5e
horizon_identity_host: ${_param:openstack_control_address}
horizon_identity_encryption: none
horizon_identity_version: 3
- metadata_password: opnfv_secret
- openstack_telemetry_keepalived_password: opnfv_secret
+ apache_mods_status_enabled: false
+ nginx_server_site_nginx_proxy_openstack_web_enabled: true
aodh_service_host: ${_param:openstack_telemetry_address}
- gnocchi_version: 4.2
+ gnocchi_version: 4.3
gnocchi_service_host: ${_param:openstack_telemetry_address}
- mysql_gnocchi_password: opnfv_secret
- keystone_gnocchi_password: opnfv_secret
panko_version: ${_param:openstack_version}
panko_service_host: ${_param:openstack_telemetry_address}
- mysql_panko_password: opnfv_secret
- keystone_panko_password: opnfv_secret
ceilometer_agent_default_polling_interval: 180
ceilometer_agent_default_polling_meters:
- "*"
designate_service_host: ${_param:openstack_control_address}
- designate_bind9_rndc_key: 4pc+X4PDqb2q+5o72dISm72LM1Ds9X2EYZjqg+nmsS7FhdTwzFFY8l/iEDmHxnyjkA33EQC8H+z0fLLBunoitw==
designate_domain_id: 5186883b-91fb-4891-bd49-e6769234a8fc
designate_pool_ns_records:
- hostname: 'ns1.example.org.'
@@ -243,22 +198,22 @@ parameters:
# billometer_identity_token: ${_param:keystone_service_token}
linux:
system:
- repo:
- uca:
- source: "deb http://ubuntu-cloud.archive.canonical.com/ubuntu xenial-updates/${_param:openstack_version} main"
- key_id: EC4926EA
- key_server: keyserver.ubuntu.com
- pin:
- - pin: 'release o=Canonical'
- priority: 1200
- package: 'python-pymysql libvirt* *qemu*'
{%- if 'aarch64' in nm.cluster.arch %}
+ repo:
armband_3: # Should be in sync with the repo config generated via curtin/MaaS
- source: "deb http://linux.enea.com/mcp-repos/${_param:openstack_version}/${_param:linux_system_codename} ${_param:openstack_version}-armband main"
- pin:
- - pin: 'release a=${_param:openstack_version}-armband'
- priority: 1201
+ source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial ${_param:armband_repo_version}-armband main"
+ key: ${_param:armband_key}
+ pinning:
+ 15:
+ enabled: true
+ pin: 'release a=${_param:armband_repo_version}-armband'
+ priority: 15
package: '*'
+ 1200:
+ enabled: true
+ pin: 'release a=${_param:armband_repo_version}-armband'
+ priority: 1200
+ package: 'qemu-efi'
{%- endif %}
kernel:
sysctl:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_biport.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_biport.yml.j2
index 3576acc2f..3b302aca8 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_biport.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_biport.yml.j2
@@ -9,8 +9,8 @@
---
{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-common-ha.include.maas_proxy
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
parameters:
_param:
pxe_admin_interface: ${_param:opnfv_vcp_vm_primary_interface}
@@ -28,6 +28,8 @@ parameters:
gateway: {{ nm.net_admin_gw }}
name_servers:
- {{ nm.net_admin_gw }}
+ noifupdown: true
+ mtu: ${_param:interface_mtu}
single:
enabled: true
type: eth
@@ -35,4 +37,5 @@ parameters:
name: ${_param:single_nic}
address: ${_param:single_address}
netmask: ${_param:opnfv_net_public_mask}
+ mtu: ${_param:interface_mtu}
{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_triport.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_triport.yml.j2
index 1fa22aa7f..8815de99b 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_triport.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_interface_vcp_triport.yml.j2
@@ -10,8 +10,8 @@
---
{%- if conf.MCP_VCP %}
classes:
- - cluster.mcp-common-ha.include.maas_proxy
- - cluster.mcp-common-ha.include.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
parameters:
_param:
pxe_admin_interface: ${_param:opnfv_vcp_vm_primary_interface}
@@ -27,6 +27,8 @@ parameters:
name: ${_param:pxe_admin_interface}
address: ${_param:pxe_admin_address}
netmask: ${_param:opnfv_net_admin_mask}
+ noifupdown: true
+ mtu: ${_param:interface_mtu}
single_int:
enabled: true
type: eth
@@ -36,6 +38,7 @@ parameters:
netmask: ${_param:opnfv_net_public_mask}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
+ mtu: ${_param:interface_mtu}
control_int:
enabled: true
type: eth
@@ -43,6 +46,7 @@ parameters:
name: ${_param:control_nic}
address: ${_param:control_address}
netmask: ${_param:opnfv_net_mgmt_mask}
+ mtu: ${_param:interface_mtu}
{%- else %}
{#- For NOVCP scenarios, base config is in kvm_pdf, only add/override gw #}
parameters:
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_message_queue.yml b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_message_queue.yml
index 855e63267..1871c2efa 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_message_queue.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_message_queue.yml
@@ -7,9 +7,9 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- system.rabbitmq.server.cluster
- system.rabbitmq.server.vhost.openstack
+ - cluster.all-mcp-arch-common.backports
parameters:
_param:
keepalived_vip_interface: ${_param:single_nic}
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
index d7ccff532..31bfeddb4 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_proxy.yml.j2
@@ -7,23 +7,25 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- system.nginx.server.single
- system.nginx.server.proxy.openstack_api
- system.nginx.server.proxy.openstack_vnc
- system.nginx.server.proxy.openstack_web
- system.nginx.server.proxy.openstack.aodh
- - system.nginx.server.proxy.openstack.ceilometer
+ - system.nginx.server.proxy.openstack.barbican
+ - system.apache.server.single
- system.horizon.server.single
- system.salt.minion.cert.proxy
- system.sphinx.server.doc.reclass
- service.keepalived.cluster.single
- system.keepalived.cluster.instance.openstack_web_public_vip
+ - cluster.all-mcp-arch-common.backports
parameters:
_param:
cluster_vip_address: ${_param:openstack_proxy_address}
keepalived_openstack_web_public_vip_address: ${_param:cluster_vip_address}
keepalived_openstack_web_public_vip_interface: ${_param:single_nic}
+ keepalived_openstack_web_public_vip_password: ${_param:opnfv_main_password}
keepalived_vip_address: ${_param:openstack_proxy_control_address}
keepalived_vip_interface: ${_param:control_nic}
keepalived_vip_virtual_router_id: 240
@@ -48,8 +50,6 @@ parameters:
address: ${_param:openstack_proxy_address}
nginx_proxy_openstack_api_aodh:
<<: *nginx_openstack_proxy_address
- nginx_proxy_openstack_api_ceilometer:
- <<: *nginx_openstack_proxy_address
nginx_proxy_openstack_api_cinder:
<<: *nginx_openstack_proxy_address
nginx_proxy_openstack_api_glance:
@@ -93,3 +93,8 @@ parameters:
vrrp_scripts:
check_pidof:
args: 'nginx'
+ apache:
+ server:
+ mod_wsgi: libapache2-mod-wsgi-py3
+ bind:
+ listen_default_ports: false
diff --git a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
index c55ea0049..776e520d2 100644
--- a/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-ha/openstack_telemetry.yml.j2
@@ -7,7 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.mirror.v1.openstack
- service.redis.server.single
- system.ceilometer.server.cluster
- system.ceilometer.server.coordination.redis
@@ -23,6 +22,7 @@ classes:
- system.gnocchi.common.storage.redis
- system.gnocchi.common.coordination.redis
- system.panko.server.cluster
+ - cluster.all-mcp-arch-common.backports
parameters:
_param:
keepalived_openstack_telemetry_vip_interface: ${_param:single_nic}
@@ -42,15 +42,21 @@ parameters:
openstack_telemetry_redis_url: redis://${_param:redis_sentinel_node01_address}:26379?sentinel=master_1&sentinel_fallback=${_param:redis_sentinel_node02_address}:26379&sentinel_fallback=${_param:redis_sentinel_node03_address}:26379
gnocchi_coordination_url: ${_param:openstack_telemetry_redis_url}
gnocchi_storage_incoming_redis_url: ${_param:openstack_telemetry_redis_url}
+ linux:
+ system:
+ sysfs:
+ transparent_hugepages:
+ kernel/mm/transparent_hugepage/enabled: never
redis:
server:
- version: 3.0
+ version: 5.0
appendfsync: 'no'
bind:
address: ${_param:single_address}
cluster:
enabled: true
mode: sentinel
+ password: ${_param:opnfv_main_password}
role: ${_param:redis_cluster_role}
quorum: 2
master:
@@ -69,6 +75,7 @@ parameters:
- python-memcache
apache:
server:
+ mod_wsgi: libapache2-mod-wsgi-py3
~modules:
- rewrite
{%- if conf.MCP_VCP %} {#- wsgi module will be enabled by a different class inherited later #}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/haproxy_openstack_api.yml b/mcp/reclass/classes/cluster/mcp-common-noha/haproxy_openstack_api.yml
index 9fe5247a4..dd3bc4761 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/haproxy_openstack_api.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/haproxy_openstack_api.yml
@@ -8,7 +8,7 @@
---
parameters:
_param:
- haproxy_check: check inter 15s fastinter 2s downinter 4s rise 3 fall 3
+ haproxy_check: check inter 30m fastinter 2s downinter 4s rise 3 fall 3
haproxy:
proxy:
listen:
@@ -131,25 +131,36 @@ parameters:
host: ${_param:cluster_node01_address}
port: 8775
params: ${_param:haproxy_check}
- ceilometer_api:
- type: general-service
- check: false
+ aodh-api:
+ type: openstack-service
+ service_name: aodh
binds:
- address: ${_param:cluster_vip_address}
- port: 8777
+ port: 8042
servers:
- - name: ${_param:cluster_node01_hostname}
+ - name: ctl01
host: ${_param:cluster_node01_address}
- port: 8777
+ port: 8042
params: ${_param:haproxy_check}
- aodh-api:
+ barbican_api:
type: openstack-service
- service_name: aodh
+ service_name: barbican
binds:
- address: ${_param:cluster_vip_address}
- port: 8042
+ port: 9311
servers:
- name: ctl01
host: ${_param:cluster_node01_address}
- port: 8042
+ port: 9311
+ params: ${_param:haproxy_check}
+ barbican_admin_api:
+ type: openstack-service
+ service_name: barbican
+ binds:
+ - address: ${_param:cluster_vip_address}
+ port: 9312
+ servers:
+ - name: ctl01
+ host: ${_param:cluster_node01_address}
+ port: 9312
params: ${_param:haproxy_check}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/infra/config.yml.j2
index ac53e8225..90e6ffc8d 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/infra/config.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/infra/config.yml.j2
@@ -8,37 +8,14 @@
{%- import 'net_map.j2' as nm with context %}
---
classes:
- - system.mysql.client.single
- cluster.all-mcp-arch-common.infra.config_pdf
parameters:
_param:
openstack_control_node01_hostname: ctl01
- reclass_config_master: ${_param:opnfv_infra_config_pxe_admin_address}
+ pxe_admin_address: ${_param:reclass_config_master}
single_address: ${_param:opnfv_infra_config_address}
salt_master_host: 127.0.0.1
salt_minion_ca_host: ${linux:network:fqdn}
- linux:
- network:
- interface:
- mcpcontrol_int:
- enabled: true
- type: eth
- proto: dhcp
- name: ${_param:opnfv_fn_vm_primary_interface}
- single_int:
- enabled: true
- name: ${_param:opnfv_fn_vm_secondary_interface}
- type: eth
- proto: static
- address: ${_param:single_address}
- netmask: ${_param:opnfv_net_mgmt_mask}
- pxe_admin_int:
- enabled: true
- type: eth
- proto: static
- name: ${_param:opnfv_fn_vm_tertiary_interface}
- address: ${_param:opnfv_infra_config_pxe_admin_address}
- netmask: ${_param:opnfv_net_admin_mask}
salt:
master:
file_recv: true
@@ -53,7 +30,7 @@ parameters:
classes:
- cluster.${_param:cluster_name}.openstack.control
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
salt_master_host: ${_param:reclass_config_master}
single_address: ${_param:openstack_control_node01_address}
pxe_admin_address: ${_param:opnfv_openstack_control_node01_pxe_admin_address}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml b/mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml.j2
index 772d0880b..d3e07e106 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/infra/init.yml.j2
@@ -5,10 +5,20 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
+classes:
+ - cluster.all-mcp-arch-common
parameters:
_param:
+ # infra service addresses
+ infra_config_address: ${_param:opnfv_infra_config_address}
+ infra_config_deploy_address: {{ conf.SALT_MASTER }}
cluster_domain: ${_param:cluster_name}.local
+ reclass_config_master: ${_param:opnfv_infra_config_pxe_admin_address}
+ infra_maas_node01_hostname: mas01
+ infra_maas_node01_address: ${_param:opnfv_infra_maas_node01_address}
+ infra_maas_node01_deploy_address: ${_param:opnfv_infra_maas_node01_deploy_address}
linux:
system:
apt:
@@ -20,12 +30,19 @@ parameters:
network:
host:
cfg01:
- address: ${_param:infra_config_address}
+ address: ${_param:opnfv_infra_config_address}
names:
- cfg01
- cfg01.${_param:cluster_domain}
cfg:
- address: ${_param:infra_config_address}
+ address: ${_param:opnfv_infra_config_address}
names:
- cfg
- cfg.${_param:cluster_domain}
+{%- if nm.cluster.has_baremetal_nodes %}
+ mas01:
+ address: ${_param:infra_maas_node01_address}
+ names:
+ - ${_param:infra_maas_node01_hostname}
+ - ${_param:infra_maas_node01_hostname}.${_param:cluster_domain}
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml b/mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml
index 462ab8312..4b5a4dec8 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/init_options.yml
@@ -10,8 +10,6 @@ classes:
- cluster.all-mcp-arch-common
parameters:
_param:
- # infra service addresses
- infra_config_address: ${_param:opnfv_infra_config_address}
# openstack service addresses
openstack_control_address: ${_param:opnfv_openstack_control_node01_address}
openstack_control_node01_address: ${_param:opnfv_openstack_control_node01_address}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute.yml b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute.yml
index 671f6eb07..edaf5190a 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute.yml
@@ -15,19 +15,17 @@ classes:
- system.ceilometer.client.nova_compute
- system.ceilometer.client.cinder_volume
- system.ceilometer.agent.polling.default
- - system.linux.system.repo.mcp.openstack
+ - service.barbican.client.single
+ - cluster.all-mcp-arch-common.backports
- cluster.mcp-common-noha.openstack_compute_pdf
parameters:
- _param:
- interface_mtu: 9000
- linux_system_codename: xenial
- single_address: ${_param:control_address}
nova:
compute:
libvirt_service: libvirtd
libvirt_bin: /etc/default/libvirtd
disk_cachemodes: file=directsync,block=none
vncproxy_url: http://${_param:cluster_vip_address}:6080
+ heal_instance_info_cache_interval: 300
bind:
vnc_address: ${_param:single_address}
network:
@@ -35,11 +33,21 @@ parameters:
user: neutron
tenant: service
password: ${_param:keystone_neutron_password}
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ image:
+ verify_glance_signatures: false
+ pkgs:
+ - nova-compute
+ - python3-novaclient
+ - pm-utils
+ - sysfsutils
neutron:
compute:
notification: true
vlan_aware_vms: true
root_helper_daemon: false
+ report_interval: 120
agent_mode: ${_param:neutron_compute_agent_mode}
message_queue:
host: ${_param:openstack_control_address}
@@ -47,6 +55,11 @@ parameters:
host: ${_param:openstack_control_address}
cinder:
volume:
+ backend:
+ lvm-driver:
+ # Align system.cinder.volume.backend.lvm and MaaS data
+ volume_group: ${linux:storage:lvm:cinder-vg:name}
+ my_ip: ${_param:single_address}
database:
host: ${_param:cluster_local_address}
identity:
@@ -55,6 +68,13 @@ parameters:
host: ${_param:cluster_local_address}
message_queue:
host: ${_param:cluster_local_address}
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - cinder-volume
+ openiscsi_services:
+ - tgt
+ - iscsid
nfs:
client:
mount:
@@ -63,6 +83,11 @@ parameters:
fstype: nfs
device: ${_param:openstack_control_address}:/srv/nova/instances
linux:
+ storage:
+ lvm:
+ # Align with both system.cinder.volume.backend.lvm and MaaS data
+ cinder-vg:
+ name: vgroot
system:
kernel:
sysctl:
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute_pdf.yml.j2
index aebd88828..b63555339 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_compute_pdf.yml.j2
@@ -7,10 +7,19 @@
##############################################################################
{#- NOTE: br-{mgmt,ctl} are cross-referenced, careful when changing names #}
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.cmp001.nic_mgmt: True, nm.cmp001.nic_private: True } %}
+{%- set vlans = { nm.vlan_mgmt: nm.cmp001.nic_mgmt } %}
+{%- if '-ovs-' not in conf.MCP_DEPLOY_SCENARIO and '-fdio-' not in conf.MCP_DEPLOY_SCENARIO %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
+{%- do vlans.update({ vlan_private_start: nm.cmp001.nic_private }) %}
+{%- endif %}
---
parameters:
_param:
# Should later be determined via PDF/IDF, AArch64 has ESP on /dev/sda1
+{%- if nm.cmp001.idx < conf.nodes | length %}
{%- if conf.nodes[nm.cmp001.idx].node.type == 'virtual' %}
~cinder_lvm_devices: ['/dev/vdb']
{%- elif conf.nodes[nm.cmp001.idx].node.arch == 'aarch64' or
@@ -19,32 +28,44 @@ parameters:
{%- else %}
~cinder_lvm_devices: ['/dev/sda1']
{%- endif %}
- primary_interface: {{ nm.cmp001.nic_mgmt }}
- tenant_interface: {{ nm.cmp001.nic_private }}
- external_interface: {{ nm.cmp001.nic_public }}
+{%- endif %}
linux:
network:
+{%- if '-fdio-' not in conf.MCP_DEPLOY_SCENARIO %}
+ ovs_nowait: true
bridge: openvswitch
+{%- else %}
+ dpdk:
+ enabled: true
+ driver: "${_param:compute_dpdk_driver}"
+ vpp:
+ enabled: true
+ decimal_interface_names: true
+ # Reuse ovs-dpdk socket mem configuration from IDF
+ dpdk_socket_mem: ${_param:compute_ovs_dpdk_socket_mem}
+ main_core: ${linux:system:kernel:isolcpu}
+ gid: 'neutron'
+{%- endif %}
interface:
pxe_admin_int:
enabled: true
- name: {{ nm.cmp001.nic_admin }}
+ name: ${_param:pxe_admin_interface}
proto: static
type: eth
address: ${_param:pxe_admin_address}
netmask: ${_param:opnfv_net_admin_mask}
mtu: ${_param:interface_mtu}
- primary_interface:
- enabled: true
- name: ${_param:primary_interface}
- proto: manual
- type: eth
- tenant_interface:
- enabled: true
- name: ${_param:tenant_interface}
- mtu: ${_param:interface_mtu}
- proto: manual
- type: eth
+ noifupdown: true
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.cmp001.nic_admin in nics %}
+ {%- do nics.pop(nm.cmp001.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
br-mgmt:
enabled: true
type: bridge
@@ -52,4 +73,4 @@ parameters:
address: ${_param:single_address}
netmask: ${_param:opnfv_net_mgmt_mask}
use_interfaces:
- - ${_param:primary_interface}
+ - {{ ma.interface_str(nm.cmp001.nic_mgmt, nm.vlan_mgmt) }}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml.j2
index f458281ce..e383edd16 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control.yml.j2
@@ -8,7 +8,6 @@
---
classes:
- system.linux.system.lowmem
- - system.linux.system.repo.mcp.openstack
- service.nfs.server
- system.ceilometer.client
- system.ceilometer.client.neutron
@@ -26,6 +25,7 @@ classes:
- system.keystone.client.service.aodh
- system.keystone.client.service.gnocchi
- system.keystone.client.service.panko
+ - system.keystone.client.service.barbican
- system.glance.control.single
- system.nova.control.single
- system.cinder.control.single
@@ -34,56 +34,73 @@ classes:
- service.mysql.server.single
- system.galera.server.database.cinder
- system.galera.server.database.glance
- - system.galera.server.database.grafana
+ - system.galera.server.database.neutron
- system.galera.server.database.heat
- system.galera.server.database.keystone
- system.galera.server.database.nova
- - system.galera.server.database.ceilometer
- system.galera.server.database.aodh
- system.galera.server.database.gnocchi
- system.galera.server.database.panko
+ - system.galera.server.database.barbican
+ - system.barbican.server.single
+ - service.barbican.server.plugin.simple_crypto
- service.redis.server.single
- service.ceilometer.server.single
- - system.ceilometer.server.coordination.redis
- system.ceilometer.server.backend.default
- system.aodh.server.single
- - system.aodh.server.coordination.redis
- system.gnocchi.server.single
- - system.gnocchi.common.storage.incoming.redis
- - system.gnocchi.common.storage.redis
- - system.gnocchi.common.coordination.redis
- service.panko.server.single
+ - system.apache.server.site.aodh
- system.apache.server.site.gnocchi
- system.apache.server.site.panko
+ - system.apache.server.site.barbican
+ - system.apache.server.single
- system.horizon.server.single
+ - system.nginx.server.single
+ - system.nginx.server.proxy.openstack_web
- service.haproxy.proxy.single
+{%- if '-sfc-' in conf.MCP_DEPLOY_SCENARIO %}
+ - system.keystone.client.service.tacker
+ - system.galera.server.database.tacker
+ - service.tacker.server.single
+{%- endif %}
+ - cluster.all-mcp-arch-common.backports
- cluster.mcp-common-noha.haproxy_openstack_api
- cluster.mcp-common-noha.openstack_control_pdf
parameters:
_param:
- linux_system_codename: xenial
ceilometer_create_gnocchi_resources: 'True'
+ ceilometer_endpoint_status: absent
+ barbican_integration_enabled: 'false'
linux:
system:
package:
python-msgpack:
version: latest
+ sysfs:
+ transparent_hugepages:
+ kernel/mm/transparent_hugepage/enabled: never
keystone:
server:
admin_email: ${_param:admin_email}
openrc_extra:
- volume_device_name: vdc
+ volume_device_name: sdc
pkgs:
- keystone
- - python-psycopg2
- - python-mysqldb
- - python-openstackclient
- - python-tornado
+ - python3-memcache
+ - python3-openstackclient
client:
enabled: true
resources:
v3:
enabled: true
+ services:
+ ceilometer:
+ status: absent
+ # required only for Rally validation
+ cinder:
+ type: volume
+ description: OpenStack Volume Service
server:
identity:
admin:
@@ -99,10 +116,23 @@ parameters:
engine: file
images: []
workers: 1
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ identity:
+ barbican_endpoint: ${barbican:server:host_href}
+ pkgs:
+ - glance
+ services:
+ - glance-api
+ cinder:
+ controller:
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - cinder-api
+ - cinder-scheduler
nova:
controller:
- networking: dvr
- cpu_allocation: 54
metadata:
password: ${_param:metadata_password}
bind:
@@ -111,6 +141,15 @@ parameters:
novncproxy_port: 6080
vncproxy_url: http://${_param:cluster_vip_address}:6080
workers: 1
+ barbican:
+ enabled: ${_param:barbican_integration_enabled}
+ pkgs:
+ - nova-api
+ - nova-conductor
+ - nova-consoleauth
+ - nova-scheduler
+ - nova-novncproxy
+ - python3-novaclient
horizon:
server:
# yamllint disable-line rule:truthy
@@ -139,10 +178,18 @@ parameters:
~database: ~
redis:
server:
- version: 3.0
+ version: 5.0
appendfsync: 'no'
bind:
address: ${_param:single_address}
+ gnocchi:
+ common:
+ storage:
+ driver: redis
+ redis_url: redis://${_param:single_address}:6379
+ incoming:
+ driver: redis
+ redis_url: redis://${_param:single_address}:6379
nfs:
server:
share:
@@ -159,9 +206,42 @@ parameters:
server:
vlan_aware_vms: true
root_helper_daemon: false
+ agent_down_time: 300
+ global_physnet_mtu: ${_param:interface_mtu}
+ backend:
+ external_mtu: ${_param:interface_mtu}
+{%- if '-bgpvpn-' in conf.MCP_DEPLOY_SCENARIO %}
+ bgp_vpn:
+ enabled: True
+ driver: opendaylight_v2
+{%- endif %}
apache:
server:
+ bind:
+ listen_default_ports: false
site:
- gnocchi:
+ gnocchi: &wsgi_threads
wsgi:
threads: 1
+ barbican:
+ <<: *wsgi_threads
+ barbican_admin:
+ <<: *wsgi_threads
+ mod_wsgi: libapache2-mod-wsgi-py3
+ barbican:
+ server:
+ ks_notifications_enable: true
+ store:
+ software:
+ crypto_plugin: simple_crypto
+ store_plugin: store_crypto
+ global_default: true
+ rabbitmq:
+ server:
+ env_variables:
+ hostname: localhost
+ nginx:
+ server:
+ site:
+ nginx_ssl_redirect_openstack_web:
+ enabled: false
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control_pdf.yml.j2
index b0b55afb9..06df3b845 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_control_pdf.yml.j2
@@ -6,6 +6,10 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.ctl01.nic_mgmt: True, nm.ctl01.nic_public: True } %}
+{%- set vlans = { nm.vlan_mgmt: nm.ctl01.nic_mgmt, nm.vlan_public: nm.ctl01.nic_public } %}
---
parameters:
linux:
@@ -18,19 +22,35 @@ parameters:
type: eth
address: ${_param:pxe_admin_address}
netmask: ${_param:opnfv_net_admin_mask}
- single_int:
+ mtu: ${_param:interface_mtu}
+ noifupdown: true
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.ctl01.nic_admin in nics %}
+ {%- do nics.pop(nm.ctl01.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+ br-ctl:
enabled: true
- name: {{ nm.ctl01.nic_mgmt }}
- type: eth
+ type: bridge
proto: static
address: ${_param:single_address}
netmask: ${_param:opnfv_net_mgmt_mask}
- public_int:
+ noifupdown: true
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_mgmt, nm.vlan_mgmt) }}
+ br-ext:
enabled: true
- name: {{ nm.ctl01.nic_public }}
- type: eth
+ type: bridge
proto: static
address: ${_param:cluster_public_host}
netmask: ${_param:opnfv_net_public_mask}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}
+ noifupdown: true
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway.yml b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway.yml
index 0062adf16..e59263c99 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway.yml
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway.yml
@@ -7,15 +7,14 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.openstack
+ - cluster.all-mcp-arch-common.backports
- cluster.mcp-common-noha.openstack_gateway_pdf
parameters:
- _param:
- interface_mtu: 9000
- linux_system_codename: xenial
neutron:
gateway:
notification: true
agent_mode: ${_param:neutron_gateway_agent_mode}
vlan_aware_vms: true
root_helper_daemon: false
+ dhcp_lease_duration: 3600
+ report_interval: 120
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway_pdf.yml.j2
index 6bd61a2d4..00d8fbd8c 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_gateway_pdf.yml.j2
@@ -7,14 +7,39 @@
##############################################################################
---
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.ctl01.nic_mgmt: True, nm.ctl01.nic_private: True } %}
+{%- set vlans = { nm.vlan_mgmt: nm.ctl01.nic_mgmt } %}
+{%- if '-fdio-' in conf.MCP_DEPLOY_SCENARIO %}
+{%- do nics.update({ nm.ctl01.nic_public: True }) %}
+{%- do vlans.update({ nm.vlan_public: nm.ctl01.nic_public }) %}
+{%- elif '-ovs-' not in conf.MCP_DEPLOY_SCENARIO %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
+{%- do vlans.update({ vlan_private_start: nm.ctl01.nic_private }) %}
+{%- endif %}
parameters:
- _param:
- primary_interface: {{ nm.ctl01.nic_mgmt }}
- tenant_interface: {{ nm.ctl01.nic_private }}
- external_interface: {{ nm.ctl01.nic_public }}
linux:
network:
+{%- if '-fdio-' not in conf.MCP_DEPLOY_SCENARIO %}
+{%- set floating_br_type = 'ovs_bridge' %}
bridge: openvswitch
+{%- else %}
+{%- set floating_br_type = 'bridge' %}
+ vpp:
+ enabled: true
+ decimal_interface_names: true
+ # Reuse ovs-dpdk socket mem configuration from IDF
+ dpdk_socket_mem: ${_param:compute_ovs_dpdk_socket_mem}
+ main_core: ${linux:system:kernel:isolcpu}
+ gid: 'neutron'
+ commands: |
+ create tap host-if-name vpp_ext_tap host-bridge br-floating rx-ring-size 1024 tx-ring-size 1024
+ set interface state ${_param:external_vpp_tap} up
+ dpdk:
+ enabled: true
+ driver: "${_param:compute_dpdk_driver}"
+{%- endif %}
interface:
pxe_admin_int:
enabled: true
@@ -24,30 +49,38 @@ parameters:
address: ${_param:pxe_admin_address}
netmask: ${_param:opnfv_net_admin_mask}
mtu: ${_param:interface_mtu}
- primary_interface:
+ noifupdown: true
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.ctl01.nic_admin in nics %}
+ {%- do nics.pop(nm.ctl01.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+{%- if '-fdio-' not in conf.MCP_DEPLOY_SCENARIO %}
+ {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}:
enabled: true
- name: ${_param:primary_interface}
mtu: ${_param:interface_mtu}
proto: manual
- type: eth
- tenant_interface:
- enabled: true
- name: ${_param:tenant_interface}
- mtu: ${_param:interface_mtu}
- proto: manual
- type: eth
- external_interface:
+ ovs_port_type: OVSPort
+ type: ovs_port
+ ovs_bridge: br-floating
+ bridge: br-floating
+{%- endif %}
+ br-floating:
enabled: true
- name: ${_param:external_interface}
- mtu: ${_param:interface_mtu}
- proto: manual
- type: eth
+ type: {{ floating_br_type }}
+ proto: static
+ address: ${_param:external_address}
+ netmask: ${_param:opnfv_net_public_mask}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
- br-floating:
- enabled: true
- type: ovs_bridge
- mtu: ${_param:interface_mtu}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}
+ noifupdown: true
br-mgmt:
enabled: true
type: bridge
@@ -56,19 +89,4 @@ parameters:
netmask: ${_param:opnfv_net_mgmt_mask}
mtu: ${_param:interface_mtu}
use_interfaces:
- - ${_param:primary_interface}
- float-to-ex:
- enabled: true
- type: ovs_port
- mtu: ${_param:interface_mtu}
- bridge: br-floating
- br-ex:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
- address: ${_param:external_address}
- netmask: ${_param:opnfv_net_public_mask}
- use_interfaces:
- - ${_param:external_interface}
- use_ovs_ports:
- - float-to-ex
+ - {{ ma.interface_str(nm.ctl01.nic_mgmt, nm.vlan_mgmt) }}
diff --git a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2 b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
index 872156574..3f3ff0ab8 100644
--- a/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-common-noha/openstack_init.yml.j2
@@ -9,9 +9,6 @@
---
parameters:
_param:
- openstack_version: queens
- apt_mk_version: nightly
- mcp_repo_version: 1.1
openstack_region: RegionOne
admin_email: root@localhost
cluster_public_protocol: http
@@ -27,8 +24,6 @@ parameters:
neutron_compute_agent_mode: legacy
neutron_compute_external_access: 'False'
galera_server_cluster_name: openstack_cluster
- galera_server_maintenance_password: opnfv_secret
- galera_server_admin_password: opnfv_secret
cluster_vip_address: ${_param:cluster_public_host}
cluster_local_address: ${_param:openstack_control_address}
cluster_node01_hostname: ctl01
@@ -37,17 +32,12 @@ parameters:
cluster_node02_address: ${_param:opnfv_openstack_control_node02_address}
cluster_node03_hostname: ctl03
cluster_node03_address: ${_param:opnfv_openstack_control_node03_address}
- rabbitmq_secret_key: opnfv_secret
- rabbitmq_admin_password: opnfv_secret
- rabbitmq_openstack_password: opnfv_secret
- rabbitmq_cold_password: opnfv_secret
glance_version: ${_param:openstack_version}
glance_service_host: ${_param:cluster_local_address}
keystone_version: ${_param:openstack_version}
keystone_service_host: ${_param:cluster_local_address}
heat_version: ${_param:openstack_version}
heat_service_host: ${_param:cluster_local_address}
- heat_domain_admin_password: opnfv_secret
ceilometer_version: ${_param:openstack_version}
ceilometer_service_host: ${_param:cluster_local_address}
ceilometer_database_host: ${_param:cluster_local_address}
@@ -59,43 +49,34 @@ parameters:
neutron_version: ${_param:openstack_version}
neutron_service_host: ${_param:cluster_local_address}
mysql_admin_user: root
- mysql_admin_password: opnfv_secret
- mysql_cinder_password: opnfv_secret
- mysql_ceilometer_password: opnfv_secret
- mysql_glance_password: opnfv_secret
- mysql_grafana_password: opnfv_secret
- mysql_heat_password: opnfv_secret
- mysql_keystone_password: opnfv_secret
- mysql_neutron_password: opnfv_secret
- mysql_nova_password: opnfv_secret
- mysql_aodh_password: opnfv_secret
- keystone_service_token: opnfv_secret
- keystone_admin_password: opnfv_secret
- keystone_ceilometer_password: opnfv_secret
- keystone_cinder_password: opnfv_secret
- keystone_glance_password: opnfv_secret
- keystone_heat_password: opnfv_secret
- keystone_keystone_password: opnfv_secret
- keystone_neutron_password: opnfv_secret
- keystone_nova_password: opnfv_secret
- ceilometer_secret_key: opnfv_secret
- metadata_password: opnfv_secret
horizon_version: ${_param:openstack_version}
horizon_secret_key: opaesee8Que2yahJoh9fo0eefo1Aeyo6ahyei8zeiboh3aeth5loth7ieNa5xi5e
horizon_identity_host: ${_param:cluster_vip_address}
horizon_identity_encryption: none
horizon_identity_version: 3
+ horizon_public_protocol: http
+ horizon_public_port: 80
+ apache_mods_status_enabled: false
+ nginx_server_site_nginx_proxy_openstack_web_enabled: true
+ nginx_proxy_ssl:
+ enabled: false
+
+ barbican_version: ${_param:openstack_version}
+ barbican_service_host: ${_param:cluster_local_address}
+ apache_barbican_api_address: ${_param:single_address}
+ barbican_simple_crypto_kek: "YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXoxMjM0NTY="
+ barbican_integration_enabled: true
+
+{%- if '-sfc-' in conf.MCP_DEPLOY_SCENARIO %}
+ tacker_service_host: ${_param:cluster_local_address}
+{%- endif %}
+
aodh_version: ${_param:openstack_version}
- keystone_aodh_password: opnfv_secret
aodh_service_host: ${_param:cluster_local_address}
- gnocchi_version: 4.2
+ gnocchi_version: 4.3
gnocchi_service_host: ${_param:cluster_local_address}
- mysql_gnocchi_password: opnfv_secret
- keystone_gnocchi_password: opnfv_secret
panko_version: ${_param:openstack_version}
panko_service_host: ${_param:cluster_local_address}
- mysql_panko_password: opnfv_secret
- keystone_panko_password: opnfv_secret
ceilometer_agent_default_polling_interval: 180
ceilometer_agent_default_polling_meters:
- "*"
@@ -106,6 +87,23 @@ parameters:
net.ipv4.tcp_congestion_control: yeah
net.ipv4.tcp_slow_start_after_idle: 0
net.ipv4.tcp_fin_timeout: 30
+{%- if 'aarch64' in nm.cluster.arch %}
+ repo:
+ armband_3: # Should be in sync with the repo config generated via curtin/MaaS
+ source: "deb http://linux.enea.com/mcp-repos/${_param:armband_repo_version}/xenial ${_param:armband_repo_version}-armband main"
+ key: ${_param:armband_key}
+ pinning:
+ 15:
+ enabled: true
+ pin: 'release a=${_param:armband_repo_version}-armband'
+ priority: 15
+ package: '*'
+ 1200:
+ enabled: true
+ pin: 'release a=${_param:armband_repo_version}-armband'
+ priority: 1200
+ package: 'qemu-efi'
+{%- endif %}
network:
host:
ctl:
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/config.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/config.yml
new file mode 100644
index 000000000..1bf5f0014
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/config.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.infra.config
+ - cluster.mcp-fdio-ha.infra
+ - cluster.all-mcp-arch-common.infra.config_pdf
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init.yml
new file mode 100644
index 000000000..3ab122e13
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.infra
+ - cluster.mcp-fdio-ha.openstack
+parameters:
+ _param:
+ cluster_name: mcp-fdio-ha
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init_vcp.yml.j2 b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init_vcp.yml.j2
new file mode 100644
index 000000000..8ab411876
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/init_vcp.yml.j2
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+{%- if conf.MCP_VCP %}
+classes:
+ - cluster.mcp-fdio-ha.infra
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/kvm.yml.j2
new file mode 100644
index 000000000..dbbea5235
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/kvm.yml.j2
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+{%- if conf.MCP_VCP %}
+# NOTE(armband): we don't want to pull in salt.control for novcp
+classes:
+ - cluster.mcp-common-ha.infra.kvm
+ - cluster.mcp-fdio-ha.infra
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/maas.yml
new file mode 100644
index 000000000..55c737f3f
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-fdio-ha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/init.yml
new file mode 100644
index 000000000..daeecfcc7
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/init.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.all-mcp-arch-common
+ - cluster.mcp-fdio-ha.infra
+ - cluster.mcp-fdio-ha.openstack
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute.yml
new file mode 100644
index 000000000..ffc3b5309
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_compute
+ - cluster.mcp-fdio-ha.openstack.compute_pdf
+ - cluster.mcp-fdio-ha.infra
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute_pdf.yml.j2
new file mode 100644
index 000000000..f3844ad8c
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/compute_pdf.yml.j2
@@ -0,0 +1,36 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.cmp001.nic_private: True } %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
+{%- set vlans = { vlan_private_start: nm.cmp001.nic_private } %}
+---
+parameters:
+ linux:
+ network:
+ interface:
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.cmp001.nic_admin in nics %}
+ {%- do nics.pop(nm.cmp001.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+ br-mesh:
+ enabled: true
+ type: bridge
+ address: ${_param:tenant_address}
+ netmask: ${_param:opnfv_net_private_mask}
+ mtu: 1500
+ use_interfaces:
+ - {{ ma.interface_str(nm.cmp001.nic_private, vlan_private_start) }}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/control.yml
new file mode 100644
index 000000000..a9bb44907
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/control.yml
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - system.neutron.control.openvswitch.cluster
+ - cluster.mcp-common-ha.openstack_interface_vcp_biport
+ - cluster.mcp-common-ha.openstack_control
+ - cluster.mcp-fdio-ha.infra
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/database.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/database.yml
new file mode 100644
index 000000000..7ba9e5660
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/database.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_interface_vcp_biport
+ - cluster.mcp-common-ha.openstack_database
+ - cluster.mcp-fdio-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/init.yml
new file mode 100644
index 000000000..8aa203d0c
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/init.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_init
+ - cluster.all-mcp-arch-common.fdio_repo
+parameters:
+ _param:
+ neutron_tenant_network_types: "flat,vxlan"
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/message_queue.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/message_queue.yml
new file mode 100644
index 000000000..175215502
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/message_queue.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_interface_vcp_biport
+ - cluster.mcp-common-ha.openstack_message_queue
+ - cluster.mcp-fdio-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/proxy.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/proxy.yml
new file mode 100644
index 000000000..bafbbaa6a
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/proxy.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_interface_vcp_triport
+ - cluster.mcp-common-ha.openstack_proxy
+ - cluster.mcp-fdio-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/telemetry.yml b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/telemetry.yml
new file mode 100644
index 000000000..9cb8fcd5e
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-ha/openstack/telemetry.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-ha.openstack_interface_vcp_biport
+ - cluster.mcp-common-ha.openstack_telemetry
+ - cluster.mcp-fdio-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/config.yml.j2
new file mode 100644
index 000000000..0c5eef8c2
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/config.yml.j2
@@ -0,0 +1,58 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Until SDF is implemented, hardcode gtw01 node index in IDF as ctl01 +1 #}
+{%- set gi = nm.ctl01.idx + 1 %}
+---
+classes:
+ - system.reclass.storage.system.openstack_gateway_single
+ - cluster.mcp-common-noha.infra.config
+ - cluster.mcp-fdio-noha
+ - cluster.all-mcp-arch-common.infra.config_pdf
+parameters:
+ reclass:
+ storage:
+ node:
+ openstack_gateway_node01:
+ params:
+ tenant_address: ${_param:opnfv_openstack_gateway_node01_tenant_address}
+ external_address: ${_param:opnfv_openstack_gateway_node01_external_address}
+ pxe_admin_address: ${_param:opnfv_openstack_gateway_node01_pxe_admin_address}
+{%- if '-fdio-' in conf.MCP_DEPLOY_SCENARIO %}
+{%- set private_speed = conf.nodes[gi].interfaces[nm.idx_private].speed %}
+{%- set private_pci = conf.idf.fuel.network.node[gi].busaddr[nm.idx_private] %}
+ # We reuse compute-specific configuration from IDF, so we don't have
+ # to rework everything in both Pharos and Fuel
+ # However, OVS-related configuration is unused and only DPDK is relevant
+ {%- if conf.idf.fuel.reclass is defined %}
+ {%- if conf.idf.fuel.reclass.node[gi].compute_params.dpdk is defined %}
+ {#- Can't dump json here due to dpdk0_* below, explicitly create yaml #}
+ {%- set _dpdk = conf.idf.fuel.reclass.node[gi].compute_params.dpdk %}
+ {%- set private_drv = _dpdk.dpdk0_driver %}
+ {%- for _i in _dpdk %}
+ {{ _i }}: '"{{ _dpdk[_i] }}"'
+ {%- endfor %}
+ {%- endif %}
+ {%- else %}
+ compute_hugepages_size: 2M
+ compute_hugepages_count: 13312
+ compute_hugepages_mount: /mnt/hugepages_2M
+ compute_kernel_isolcpu: 3,8,9,10,11
+ compute_dpdk_driver: uio
+ compute_ovs_pmd_cpu_mask: '"0x708"'
+ compute_ovs_dpdk_socket_mem: '"4096,4096"'
+ compute_ovs_dpdk_lcore_mask: '"0x800"'
+ compute_ovs_memory_channels: '"2"'
+ dpdk0_driver: igb_uio
+ dpdk0_n_rxq: 2
+ {%- endif %}
+ dpdk0_name: {{ conf.idf.fuel.network.node[gi].interfaces[nm.idx_private] }}
+ dpdk0_pci: '"{{ conf.idf.fuel.network.node[gi].busaddr[nm.idx_private] }}"'
+ dpdk0_vpp: {{ ma.vpp_interface_str(private_speed, private_pci, private_drv or '') }}
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/init.yml
new file mode 100644
index 000000000..b1d890dfb
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/init.yml
@@ -0,0 +1,14 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.infra
+
+parameters:
+ _param:
+ cluster_name: mcp-fdio-noha
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/maas.yml
new file mode 100644
index 000000000..e64e9a137
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-fdio-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/init.yml
new file mode 100644
index 000000000..6e4f952fd
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/init.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.init_options
+ - cluster.mcp-fdio-noha.infra
+ - cluster.mcp-fdio-noha.openstack
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/compute.yml.j2 b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/compute.yml.j2
new file mode 100644
index 000000000..14b8a268b
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/compute.yml.j2
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+classes:
+ - service.neutron.compute.single
+ - system.nova.compute.nfv.hugepages
+ - cluster.mcp-common-noha.openstack_compute
+ - cluster.mcp-fdio-noha
+parameters:
+ nova:
+ compute:
+ # yamllint disable-line rule:truthy
+ vif_plugging_is_fatal: False
+ vif_plugging_timeout: 10
+ neutron:
+ compute:
+ backend:
+ router: 'vpp-router'
+ tenant_network_types: "${_param:neutron_tenant_network_types}"
+ ~mechanism:
+ vpp:
+ driver: vpp
+ etcd_port: ${_param:node_port}
+ etcd_host: ${_param:node_address}
+ l3_hosts: ${_param:openstack_gateway_node01_hostname}
+ physnets:
+ physnet1:
+ vpp_interface: ${_param:external_vpp_tap}
+ physnet2:
+ vpp_interface: ${_param:dpdk0_vpp}
+ linux:
+ system:
+ kernel:
+ isolcpu: 1 # NOTE: Hardcoded for now
+ boot_options:
+ - spectre_v2=off
+ - nopti
+ - kpti=off
+ - nospec_store_bypass_disable
+ - noibrs
+ - noibpb
+ - intel_iommu=on
+ - iommu=pt
+ - nohz_full=${linux:system:kernel:isolcpu}
+ - rcu_nocbs=${linux:system:kernel:isolcpu}
+ - iommu.passthrough=1
+ network:
+ interface:
+ dpdk0:
+ name: ${_param:dpdk0_name}
+ pci: ${_param:dpdk0_pci}
+ driver: ${_param:dpdk0_driver}
+ enabled: true
+ type: dpdk_vpp_port
+ mtu: ${_param:interface_mtu}
+ {{ nm.cmp001.nic_private }}:
+ type: dpdk # Not a meaningful type, just match 'dpdk' for filtering
+ pxe_admin_int:
+ # For scenarios without public network on cmp, set admin gw
+ gateway: {{ nm.net_admin_gw }}
+ name_servers:
+ - {{ nm.net_admin_gw }}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/control.yml
new file mode 100644
index 000000000..afce77f4b
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/control.yml
@@ -0,0 +1,76 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.openstack_control
+ - cluster.mcp-fdio-noha
+ - system.neutron.control.single
+ - service.etcd.server.single
+ - system.galera.server.database.neutron
+# NOTE: All this configuration should later be moved to reclass.system as
+# neutron.control.vpp.single
+parameters:
+ _param:
+ # yamllint disable rule:truthy
+ neutron_control_dvr: True
+ neutron_l3_ha: False
+ neutron_enable_qos: False
+ neutron_enable_vlan_aware_vms: False
+ neutron_enable_bgp_vpn: False
+ # yamllint enable rule:truthy
+ neutron_global_physnet_mtu: 1500
+ neutron_external_mtu: 1500
+ neutron_bgp_vpn_driver: bagpipe
+ internal_protocol: 'http'
+ neutron_firewall_driver: 'iptables_hybrid'
+ openstack_node_role: primary
+ nova:
+ controller:
+ # yamllint disable-line rule:truthy
+ vif_plugging_is_fatal: False
+ vif_plugging_timeout: 10
+ neutron:
+ server:
+ role: ${_param:openstack_node_role}
+ global_physnet_mtu: ${_param:neutron_global_physnet_mtu}
+ l3_ha: ${_param:neutron_l3_ha}
+ dvr: ${_param:neutron_control_dvr}
+ qos: ${_param:neutron_enable_qos}
+ vlan_aware_vms: ${_param:neutron_enable_vlan_aware_vms}
+ firewall_driver: ${_param:neutron_firewall_driver}
+ bgp_vpn:
+ enabled: ${_param:neutron_enable_bgp_vpn}
+ driver: ${_param:neutron_bgp_vpn_driver}
+ backend:
+ engine: ml2
+ router: 'vpp-router'
+ tenant_network_types: "${_param:neutron_tenant_network_types}"
+ external_mtu: ${_param:neutron_external_mtu}
+ mechanism:
+ vpp:
+ driver: vpp
+ etcd_port: ${_param:node_port}
+ etcd_host: ${_param:node_address}
+ l3_hosts: ${_param:openstack_gateway_node01_hostname}
+ physnets:
+ physnet1:
+ vpp_interface: ${_param:external_vpp_tap}
+ physnet2:
+ # NOTE: Not a meaningful interface name, just avoid a filter-out
+ vpp_interface: 'dummy'
+ vlan_range: '${_param:opnfv_net_tenant_vlan}'
+ compute:
+ region: ${_param:openstack_region}
+ database:
+ host: ${_param:openstack_database_address}
+ identity:
+ region: ${_param:openstack_region}
+ protocol: ${_param:internal_protocol}
+ message_queue:
+ members:
+ - host: ${_param:single_address}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/gateway.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/gateway.yml
new file mode 100644
index 000000000..c330b677c
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/gateway.yml
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.openstack_gateway
+ - service.neutron.gateway.single
+ - cluster.mcp-fdio-noha
+parameters:
+ _param:
+ compute_hugepages_size: 1G
+ compute_hugepages_mount: /mnt/hugepages_1G
+ neutron:
+ gateway:
+ agents:
+ l3:
+ interface_driver: neutron.agent.linux.interface.BridgeInterfaceDriver
+ dhcp:
+ interface_driver: neutron.agent.linux.interface.BridgeInterfaceDriver
+ backend:
+ router: 'vpp-router'
+ tenant_network_types: "${_param:neutron_tenant_network_types}"
+ ~mechanism:
+ vpp:
+ driver: vpp
+ etcd_port: ${_param:node_port}
+ etcd_host: ${_param:node_address}
+ l3_hosts: ${_param:openstack_gateway_node01_hostname}
+ physnets:
+ physnet1:
+ vpp_interface: ${_param:external_vpp_tap}
+ physnet2:
+ vpp_interface: ${_param:dpdk0_vpp}
+ linux:
+ system:
+ kernel:
+ hugepages:
+ large:
+ default: true
+ size: ${_param:compute_hugepages_size}
+ count: ${_param:compute_hugepages_count}
+ mount_point: ${_param:compute_hugepages_mount}
+ isolcpu: 1 # NOTE: Hardcoded for now
+ boot_options:
+ - spectre_v2=off
+ - nopti
+ - kpti=off
+ - nospec_store_bypass_disable
+ - noibrs
+ - noibpb
+ - intel_iommu=on
+ - iommu=pt
+ - nohz_full=${linux:system:kernel:isolcpu}
+ - rcu_nocbs=${linux:system:kernel:isolcpu}
+ - iommu.passthrough=1
+ network:
+ interface:
+ dpdk0:
+ name: ${_param:dpdk0_name}
+ pci: ${_param:dpdk0_pci}
+ driver: ${_param:dpdk0_driver}
+ enabled: true
+ type: dpdk_vpp_port
+ mtu: ${_param:interface_mtu}
diff --git a/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/init.yml b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/init.yml
new file mode 100644
index 000000000..858da65a7
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-fdio-noha/openstack/init.yml
@@ -0,0 +1,30 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.openstack_init
+ - cluster.all-mcp-arch-common.fdio_repo
+parameters:
+ _param:
+ openstack_gateway_node01_hostname: 'gtw01'
+ neutron_tenant_network_types: "vlan"
+ etcd_initial_token: ${_param:opnfv_main_password}
+ node_address: ${_param:cluster_node01_address}
+ node_hostname: ${_param:cluster_node01_hostname}
+ node_port: 4001
+ external_vpp_tap: 'tap0'
+ linux:
+ system:
+ file:
+ /etc/systemd/network/99-default.link:
+ contents: |
+ # Workaround tap/bridge MAC generation issue
+ # https://github.com/systemd/systemd/issues/3374
+ [Link]
+ NamePolicy=kernel database onboard slot path
+ MACAddressPolicy=none
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/iec.yml.j2 b/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/iec.yml.j2
new file mode 100644
index 000000000..ccd378b73
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/iec.yml.j2
@@ -0,0 +1,117 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- if '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+{%- set nics = {} %}
+{%- set vlans = {} %}
+{%- else %}
+{%- set nics = { nm.ctl01.nic_mgmt: True, nm.ctl01.nic_public: True } %}
+{%- set vlans = { nm.vlan_mgmt: nm.ctl01.nic_mgmt, nm.vlan_public: nm.ctl01.nic_public } %}
+{%- endif %}
+---
+classes:
+ - cluster.mcp-iec-noha
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
+parameters:
+ _param:
+{%- if '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+ pxe_admin_interface: ${_param:opnfv_vcp_vm_primary_interface}
+ external_nic: ${_param:opnfv_vcp_vm_secondary_interface}
+ single_nic: ${_param:opnfv_vcp_vm_tertiary_interface}
+{%- else %}
+ pxe_admin_interface: {{ nm.ctl01.nic_admin }}
+ external_nic: {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}
+ single_nic: {{ ma.interface_str(nm.ctl01.nic_mgmt, nm.vlan_mgmt) }}
+{%- endif %}
+ linux:
+{%- if 'centos' not in conf.MCP_OS %}
+{%- set proto_manual = 'manual' %}
+{%- else %}
+{%- set proto_manual = 'none' %}
+ system:
+ file:
+ /etc/gshadow:
+ group: root
+ /etc/shadow:
+ group: root
+ /etc/udev/rules.d/70-persistent-net.rules:
+ contents: ''
+{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ /etc/modprobe.d/vfat.conf:
+ contents: ''
+ /boot/efi/EFI/centos/grub.cfg:
+ source: /boot/grub2/grub.cfg
+{%- endif %}
+ kernel:
+ boot_options:
+ - net.ifnames=1
+ - biosdevname=1
+ modules:
+ - br_netfilter
+ at:
+ enabled: False
+ cron:
+ enabled: False
+{%- endif %}
+ network:
+ interface:
+ pxe_admin_int:
+ enabled: true
+ name: ${_param:pxe_admin_interface}
+ type: eth
+ address: ${_param:pxe_admin_address}
+ netmask: ${_param:opnfv_net_admin_mask}
+ mtu: ${_param:interface_mtu}
+ noifupdown: true
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.ctl01.nic_admin in nics %}
+ {%- do nics.pop(nm.ctl01.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics, proto_manual) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans, proto_manual) }}
+
+{%- if '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+ single:
+ enabled: true
+ type: eth
+ proto: {{ proto_manual }}
+ name: ${_param:single_nic}
+ mtu: ${_param:interface_mtu}
+ external:
+ enabled: true
+ type: eth
+ proto: {{ proto_manual }}
+ name: ${_param:external_nic}
+ mtu: ${_param:interface_mtu}
+{%- endif %}
+ br-ctl:
+ enabled: true
+ type: bridge
+ address: ${_param:single_address}
+ netmask: ${_param:opnfv_net_mgmt_mask}
+ noifupdown: true
+ use_interfaces:
+ - ${_param:single_nic}
+ mtu: ${_param:interface_mtu}
+ br-ex:
+ enabled: true
+ type: bridge
+ address: ${_param:external_address}
+ netmask: ${_param:opnfv_net_public_mask}
+ noifupdown: true
+ use_interfaces:
+ - ${_param:external_nic}
+ mtu: ${_param:interface_mtu}
+ gateway: ${_param:opnfv_net_public_gw}
+ name_servers: {{ nm.dns_public }}
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/init.yml b/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/init.yml
new file mode 100644
index 000000000..18bdf215a
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/akraino/init.yml
@@ -0,0 +1,32 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+parameters:
+ _param:
+ # NOTE(armband): reuse Openstack definitions
+ akraino_iec_node01_address: ${_param:opnfv_openstack_control_node01_address}
+ akraino_iec_node02_address: ${_param:opnfv_openstack_control_node02_address}
+ akraino_iec_node03_address: ${_param:opnfv_openstack_control_node03_address}
+ linux:
+ network:
+ host:
+ iec01:
+ address: ${_param:akraino_iec_node01_address}
+ names:
+ - iec01
+ - iec01.${_param:cluster_domain}
+ iec02:
+ address: ${_param:akraino_iec_node02_address}
+ names:
+ - iec02
+ - iec02.${_param:cluster_domain}
+ iec03:
+ address: ${_param:akraino_iec_node03_address}
+ names:
+ - iec03
+ - iec03.${_param:cluster_domain}
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/config.yml.j2
new file mode 100644
index 000000000..9a1c9b8d1
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/config.yml.j2
@@ -0,0 +1,57 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+classes:
+ - cluster.mcp-common-noha.infra.config
+ - cluster.mcp-iec-noha
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
+ - cluster.all-mcp-arch-common.infra.config_pdf
+parameters:
+ reclass:
+ storage:
+ ~node:
+{%- if nm.cluster.has_baremetal_nodes %}
+{#- Since we overwrite the ~node key, we need to re-add maas node explicitly #}
+ infra_maas_node01:
+ name: ${_param:infra_maas_node01_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.infra.maas
+ params:
+ salt_master_host: ${_param:infra_config_deploy_address}
+ linux_system_codename: ${_param:infra_maas_system_codename}
+ single_address: ${_param:infra_maas_node01_deploy_address}
+{%- endif %}
+{%- if '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+{%- for i in range(1, 4) %}
+ infra_kvm_node0{{ i }}:
+ name: ${_param:infra_kvm_node0{{ i }}_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.infra.kvm
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:linux_system_codename}
+ single_address: ${_param:opnfv_infra_kvm_node0{{ i }}_address}
+ pxe_admin_address: ${_param:opnfv_infra_kvm_node0{{ i }}_pxe_admin_address}
+{%- endfor %}
+{%- endif %}
+{%- for i in range(1, 4) %}
+ akraino_iec_node0{{ i }}:
+ name: ${_param:akraino_iec_node0{{ i }}_hostname}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.akraino.iec
+ params:
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:linux_system_codename}
+ single_address: ${_param:opnfv_openstack_control_node0{{ i }}_address}
+ external_address: ${_param:opnfv_openstack_control_node0{{ i }}_external_address}
+ pxe_admin_address: ${_param:opnfv_openstack_control_node0{{ i }}_pxe_admin_address}
+{%- endfor %}
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/init.yml.j2 b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/init.yml.j2
new file mode 100644
index 000000000..1b68b6c44
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/init.yml.j2
@@ -0,0 +1,31 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.infra
+parameters:
+ _param:
+ cluster_name: mcp-iec-noha
+ infra_kvm_node01_hostname: kvm01
+ infra_kvm_node02_hostname: kvm02
+ infra_kvm_node03_hostname: kvm03
+ akraino_iec_node01_hostname: iec01
+ akraino_iec_node02_hostname: iec02
+ akraino_iec_node03_hostname: iec03
+{%- if '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+ linux:
+ network:
+ host:
+{%- for i in range(1, 4) %}
+ kvm0{{ i }}:
+ address: ${_param:opnfv_infra_kvm_node0{{ i }}_address}
+ names:
+ - ${_param:infra_kvm_node0{{ i }}_hostname}
+ - ${_param:infra_kvm_node0{{ i }}_hostname}.${_param:cluster_domain}
+{%- endfor %}
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2
new file mode 100644
index 000000000..34372c69c
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/kvm.yml.j2
@@ -0,0 +1,112 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{#- NOTE: br-{mgmt,ctl} are cross-referenced, careful when changing names #}
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.ctl01.nic_admin: True, nm.ctl01.nic_mgmt: True, nm.ctl01.nic_public: True } %}
+{%- set vlans = { nm.vlan_admin: nm.ctl01.nic_admin, nm.vlan_mgmt: nm.ctl01.nic_mgmt, nm.vlan_public: nm.ctl01.nic_public } %}
+---
+classes:
+ - system.salt.control.virt
+ - cluster.all-mcp-arch-common.opnfv.maas_proxy
+ - cluster.all-mcp-arch-common.opnfv.lab_proxy_pdf
+ - cluster.mcp-iec-noha.infra
+parameters:
+ _param:
+ linux_system_codename: bionic
+ linux:
+ network:
+ interface:
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+ br-mgmt:
+ enabled: true
+ proto: static
+ address: ${_param:pxe_admin_address}
+ netmask: ${_param:opnfv_net_admin_mask}
+ gateway: {{ nm.net_admin_gw }}
+ name_servers:
+ - {{ nm.net_admin_gw }}
+ type: bridge
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_admin, nm.vlan_admin) }}
+ noifupdown: true
+ br-ctl:
+ enabled: true
+ type: bridge
+ proto: static
+ address: ${_param:single_address}
+ netmask: ${_param:opnfv_net_mgmt_mask}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_mgmt, nm.vlan_mgmt) }}
+ noifupdown: true
+ br-ex:
+ enabled: true
+ proto: manual
+ netmask: ${_param:opnfv_net_public_mask}
+ type: bridge
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}
+ noifupdown: true
+ system:
+ kernel:
+ boot_options:
+ - spectre_v2=off
+ - nopti
+ - kpti=off
+ sysctl:
+ net.ipv4.ip_forward: 0
+ libvirt:
+ server:
+ service: libvirtd
+ config_sys: /etc/default/libvirtd
+ unix_sock_group: libvirt
+ salt:
+ control:
+ size:
+ akraino.iec:
+ cpu: 8
+ ram: 12288
+ disk_profile: small
+ net_profile: default
+ cluster:
+ internal:
+ domain: ${_param:cluster_domain}
+ engine: virt
+ node:
+{%- for i in range(1, 4) %}
+ iec0{{ i }}:
+ name: ${_param:akraino_iec_node0{{ i }}_hostname}
+ provider: ${_param:infra_kvm_node0{{ i }}_hostname}.${_param:cluster_domain}
+ size: akraino.iec
+ image: ${_param:salt_control_bionic_image}
+{%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ machine: virt
+ cpu_mode: host-passthrough
+ loader:
+ readonly: 'yes'
+ type: pflash
+ path: /usr/share/AAVMF/AAVMF_CODE.fd
+{%- endif %}
+{%- endfor %}
+ virt:
+ nic:
+ default:
+ eth2:
+ bridge: br-mgmt
+ model: virtio
+ eth1:
+ bridge: br-ex
+ model: virtio
+ eth0:
+ bridge: br-ctl
+ model: virtio
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/maas.yml
new file mode 100644
index 000000000..393eb73a4
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-iec-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-iec-noha/init.yml b/mcp/reclass/classes/cluster/mcp-iec-noha/init.yml
new file mode 100644
index 000000000..e0224ebac
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-iec-noha/init.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.init_options
+ - cluster.mcp-iec-noha.infra
+ - cluster.mcp-iec-noha.akraino
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/config.yml.j2
new file mode 100644
index 000000000..e3cd67bdf
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/config.yml.j2
@@ -0,0 +1,79 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+classes:
+ - system.reclass.storage.salt
+ - system.reclass.storage.system.kubernetes_control_single
+{%- if nm.cluster.has_baremetal_nodes %}
+ - system.reclass.storage.system.infra_maas_single
+{%- endif %}
+ - system.salt.master.api
+ - system.salt.master.single
+ - system.salt.minion.ca.salt_master
+ - system.salt.minion.cert.k8s_server_single
+ - cluster.mcp-k8s-calico-noha
+parameters:
+ _param:
+ salt_master_environment_repository: 'https://github.com/salt-formulas'
+ salt_master_environment_revision: master
+ reclass_data_repository: local
+ salt_master_base_environment: prd
+ salt_master_host: 127.0.0.1
+ salt_minion_ca_host: ${linux:network:fqdn}
+ # yamllint disable-line rule:line-length
+ salt_api_password_hash: "$6$sGnRlxGf$al5jMCetLP.vfI/fTl3Z0N7Za1aeiexL487jAtyRABVfT3NlwZxQGVhO7S1N8OwS/34VHYwZQA8lkXwKMN/GS1"
+ kubernetes_control_node01_deploy_address: ${_param:opnfv_openstack_control_node01_pxe_admin_address}
+ kubernetes_control_system_codename: bionic
+ linux:
+ system:
+ user:
+ salt:
+ home: /home/salt
+ salt:
+ master:
+ accept_policy: open_mode
+ file_recv: true
+ worker_threads: 4
+ command_timeout: 20
+ minion:
+ mine:
+ module:
+ x509.get_pem_entries: ['/etc/pki/all_cas/*']
+ reclass:
+ storage:
+ data_source:
+ engine: local
+ node:
+ kubernetes_control_node01:
+ params:
+ pxe_admin_interface: {{ nm.ctl01.nic_admin }}
+ pxe_admin_address: ${_param:opnfv_openstack_control_node01_pxe_admin_address}
+ # We support per-node (not only per-role) compute configuration via IDF
+{%- for cmp in range(1, nm.cmp_nodes + 1) %}
+ {%- set n = '%02d' | format(cmp) %}
+ {%- set i = nm.cmp001.idx + cmp - 1 %}
+
+ {%- set admin = nm.net_admin_hosts | length + nm.start_ip[nm.net_admin] + loop.index %}
+ {%- set mgmt = nm.net_mgmt_hosts | length + nm.start_ip[nm.net_mgmt] + loop.index %}
+ {%- set pub = nm.net_public_hosts | length + nm.start_ip[nm.net_public] + loop.index %}
+ {%- set pri = nm.net_private_hosts | length + nm.start_ip[nm.net_private] + loop.index %}
+ kubernetes_compute_node{{ n }}:
+ name: cmp{{ '%03d' | format(cmp) }}
+ domain: ${_param:cluster_domain}
+ classes:
+ - cluster.${_param:cluster_name}.kubernetes.compute
+ params:
+ pxe_admin_address: {{ nm.net_admin | ipnet_hostaddr(admin) }}
+ pxe_admin_interface: {{ conf.idf.fuel.network.node[i].interfaces[nm.idx_admin] }}
+ single_address: {{ nm.net_mgmt | ipnet_hostaddr(mgmt) }}
+ tenant_address: {{ nm.net_private | ipnet_hostaddr(pri) }}
+ external_address: {{ nm.net_public | ipnet_hostaddr(pub) }}
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:kubernetes_control_system_codename}
+{%- endfor %}
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/init.yml b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/init.yml
new file mode 100644
index 000000000..b01eeeda1
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/infra/init.yml
@@ -0,0 +1,13 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.infra
+parameters:
+ _param:
+ cluster_name: mcp-k8s-calico-noha
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/init.yml b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/init.yml
new file mode 100644
index 000000000..f464dca54
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/init.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-common-noha.init_options
+ - cluster.mcp-k8s-calico-noha.kubernetes
+ - cluster.mcp-k8s-calico-noha.infra
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/common.yml.j2 b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/common.yml.j2
new file mode 100644
index 000000000..bd6e48fce
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/common.yml.j2
@@ -0,0 +1,75 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+classes:
+ - system.kubernetes.pool.single
+ - system.salt.minion.cert.k8s_client_single
+ - system.salt.minion.cert.etcd_client_single
+ - cluster.all-mcp-arch-common.backports
+ - cluster.mcp-common-noha.openstack_compute_pdf
+parameters:
+ _param:
+ kubernetes_containerd_package: containerd
+ kubernetes:
+ common:
+ hyperkube:
+ source: ${_param:kubernetes_hyperkube_source}
+ source_hash: ${_param:kubernetes_hyperkube_source_hash}
+ pause_image: ${_param:kubernetes_pause_image}
+ pool:
+ proxy:
+ daemon_opts:
+ cluster-cidr: ${_param:calico_private_network}/${_param:calico_private_netmask}
+ kubelet:
+ address: ${_param:single_address}
+ fail_on_swap: ${_param:kubelet_fail_on_swap}
+ network:
+ calico:
+ enabled: true
+ no_default_pools: false
+ image: ${_param:kubernetes_calico_image}
+ calicoctl_image: ${_param:kubernetes_calico_calicoctl_image}
+ cni_image: ${_param:kubernetes_calico_cni_image}
+ kube_controllers_image: ${_param:kubernetes_calico_kube_controllers_image}
+ birdcl_source: ${_param:kubernetes_calico_birdcl_source}
+ birdcl_source_hash: ${_param:kubernetes_calico_birdcl_source_hash}
+ calicoctl_source: ${_param:kubernetes_calico_calicoctl_source}
+ calicoctl_source_hash: ${_param:kubernetes_calico_calicoctl_source_hash}
+ cni_ipam_source: ${_param:kubernetes_calico_cni_ipam_source}
+ cni_ipam_source_hash: ${_param:kubernetes_calico_cni_ipam_source_hash}
+ cni_source: ${_param:kubernetes_calico_cni_source}
+ cni_source_hash: ${_param:kubernetes_calico_cni_source_hash}
+ etcd:
+ ssl:
+ enabled: true
+ policy:
+ enabled: ${_param:kubernetes_calico_policy_enabled}
+ linux:
+ system:
+ kernel:
+ sysctl:
+ # The default operating system limits on mmap counts is likely to be too low,
+ # which may result in out of memory exceptions.
+ vm.max_map_count: 262144
+ network:
+ interface:
+ br-mgmt:
+ post_up_cmds:
+ - ip r rep 10.254.0.0/16 via ${_param:single_address}
+ pxe_admin_int:
+ gateway: {{ nm.net_admin_gw }}
+ name_servers:
+ - {{ nm.net_admin_gw }}
+ storage:
+ enabled: true
+ swap:
+ img:
+ enabled: false
+ engine: file
+ device: /swap.img
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/compute.yml b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/compute.yml
new file mode 100644
index 000000000..f2ab4e9e8
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/compute.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - system.linux.network.hosts
+ - cluster.mcp-k8s-calico-noha.kubernetes.common
+ - cluster.mcp-k8s-calico-noha
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/control.yml b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/control.yml
new file mode 100644
index 000000000..25c17dc65
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/control.yml
@@ -0,0 +1,99 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - service.etcd.server.single
+ - service.kubernetes.control.cluster
+ - system.salt.minion.cert.etcd_server_single
+ - system.kubernetes.master.single
+ - system.kubernetes.master.auth.rbac
+ - system.kubernetes.control.roles.cluster-admin
+ - cluster.mcp-k8s-calico-noha.kubernetes.common
+ - cluster.mcp-k8s-calico-noha
+parameters:
+ _param:
+ docker_image_etcd: quay.io/coreos/etcd:v3.3.12
+ kubernetes_etcd_repo: https://github.com/etcd-io/etcd/releases/download
+ kubernetes_etcd_source: ${_param:kubernetes_etcd_repo}/v3.3.12/etcd-v3.3.12-linux-amd64.tar.gz
+ kubernetes_etcd_source_hash: md5=079af00546443b686df31e7ec605135e
+ etcd:
+ server:
+ image: ${_param:docker_image_etcd}
+ source:
+ engine: archive
+ etcd_source: ${_param:kubernetes_etcd_source}
+ etcd_source_hash: ${_param:kubernetes_etcd_source_hash}
+ setup:
+ calico:
+ key: /calico/ipam/v2/assignment/ipv4/block/${_param:calico_private_network}-${_param:calico_private_netmask}
+ value: '{"masquerade":true,"cidr":"${_param:calico_private_network}/${_param:calico_private_netmask}"}'
+ ssl:
+ enabled: true
+ kubernetes:
+ common:
+ addons:
+ virtlet:
+ enabled: ${_param:kubernetes_virtlet_enabled}
+ namespace: ${_param:kubernetes_addon_namespace}
+ image: ${_param:kubernetes_virtlet_image}
+ criproxy_version: ${_param:kubernetes_criproxy_version}
+ criproxy_source: ${_param:kubernetes_criproxy_checksum}
+ hosts:
+ - ${_param:kubernetes_compute01_hostname}
+ dashboard:
+ enabled: ${_param:kubernetes_dashboard}
+ image: ${_param:kubernetes_dashboard_image}
+ helm:
+ enabled: ${_param:kubernetes_helm_enabled}
+ netchecker:
+ enabled: ${_param:kubernetes_netchecker_enabled}
+ agent_probeurls: ${_param:kubernetes_netchecker_agent_probeurls}
+ externaldns:
+ enabled: ${_param:kubernetes_externaldns_enabled}
+ image: ${_param:kubernetes_externaldns_image}
+ provider: ${_param:kubernetes_externaldns_provider}
+ metallb:
+ enabled: ${_param:kubernetes_metallb_enabled}
+ addresses:
+ - ${_param:kubernetes_metallb_addresses_pool}
+ ingress-nginx:
+ enabled: ${_param:kubernetes_ingressnginx_enabled}
+ metrics-server:
+ enabled: ${_param:kubernetes_metrics_server_enabled}
+ master:
+ apiserver:
+ insecure_address: 0.0.0.0
+ kubelet:
+ address: ${_param:single_address}
+ fail_on_swap: ${_param:kubelet_fail_on_swap}
+ etcd:
+ ssl:
+ enabled: true
+ network:
+ calico:
+ enabled: true
+ image: ${_param:kubernetes_calico_image}
+ calicoctl_image: ${_param:kubernetes_calico_calicoctl_image}
+ cni_image: ${_param:kubernetes_calico_cni_image}
+ kube_controllers_image: ${_param:kubernetes_calico_kube_controllers_image}
+ birdcl_source: ${_param:kubernetes_calico_birdcl_source}
+ birdcl_source_hash: ${_param:kubernetes_calico_birdcl_source_hash}
+ calicoctl_source: ${_param:kubernetes_calico_calicoctl_source}
+ calicoctl_source_hash: ${_param:kubernetes_calico_calicoctl_source_hash}
+ cni_ipam_source: ${_param:kubernetes_calico_cni_ipam_source}
+ cni_ipam_source_hash: ${_param:kubernetes_calico_cni_ipam_source_hash}
+ cni_source: ${_param:kubernetes_calico_cni_source}
+ cni_source_hash: ${_param:kubernetes_calico_cni_source_hash}
+ etcd:
+ ssl:
+ enabled: true
+ policy:
+ enabled: ${_param:kubernetes_calico_policy_enabled}
+ namespace:
+ netchecker:
+ enabled: true
diff --git a/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/init.yml.j2 b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/init.yml.j2
new file mode 100644
index 000000000..ef8785aa4
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-k8s-calico-noha/kubernetes/init.yml.j2
@@ -0,0 +1,108 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+---
+parameters:
+ _param:
+ salt_minion_ca_authority: salt_master_ca
+
+ # kubelet
+ kubelet_fail_on_swap: true
+
+ # kubernetes settings
+ kubernetes_admin_user: admin
+ kubernetes_admin_password: sbPfel23ZigJF3Bm
+ kubernetes_admin_token: PpP6Mm3pAoPVqcKOKUu0x1dh7b1959Fi
+ kubernetes_kubelet_token: JJ2PKHxjiU6EYvIt18BqwdSK1HvWh8pt
+ kubernetes_kube-proxy_token: jT0hJk9L6cIw5UpYDNhsRwcj3Z2n62B6
+ kubernetes_scheduler_token: VgkUHfrW07zNxrb0ucFyX7NBnSJN9Xp6
+ kubernetes_controller-manager_token: uXrdZ1YKF6qlYm3sHje2iEXMGAGDWOIU
+ kubernetes_dns_token: 0S1I4iJeFjq5fopPwwCwTp3xFpEZfeUl
+ etcd_initial_token: IN7KaRMSo3xkGxkjAAPtkRkAgqN4ZNRq
+ kubernetes_netchecker_agent_probeurls: "http://ipinfo.io"
+
+ # addresses and hostnames
+ kubernetes_internal_api_address: 10.254.0.1
+ kubernetes_internal_dns_address: 10.254.0.10
+ kubernetes_control_hostname: ctl
+ kubernetes_control_node01_hostname: ctl01
+ kubernetes_compute01_hostname: cmp001
+ kubernetes_compute02_hostname: cmp002
+ kubernetes_control_node01_address: ${_param:openstack_control_address}
+ kubernetes_control_address: ${_param:kubernetes_control_node01_address}
+ master_address: ${_param:kubernetes_control_node01_address}
+ cluster_local_address: ${_param:single_address}
+
+ # cert
+ control_address: ${_param:kubernetes_control_node01_address}
+
+ # etcd stuff
+ node_hostname: ${_param:kubernetes_control_node01_hostname}
+ node_address: ${_param:kubernetes_control_node01_address}
+ node_port: 4001
+
+ # calico
+ calico_private_network: 192.168.0.0
+ calico_private_netmask: 16
+
+ # coredns
+ kubernetes_externaldns_provider: coredns
+ kubernetes_metallb_addresses_pool: 172.16.10.70-172.16.10.95
+
+ # switches of addons
+ kubernetes_kubedns_enabled: false
+ kubernetes_externaldns_enabled: false
+ kubernetes_coredns_enabled: true
+ kubernetes_dashboard: false
+ kubernetes_virtlet_enabled: false
+ kubernetes_flannel_enabled: false
+ kubernetes_genie_enabled: false
+ kubernetes_calico_enabled: true
+ kubernetes_opencontrail_enabled: false
+ kubernetes_contrail_network_controller_enabled: false
+ kubernetes_metallb_enabled: false
+ kubernetes_ingressnginx_enabled: false
+ kubernetes_rbd_enabled: false
+ kubernetes_helm_enabled: false
+ kubernetes_netchecker_enabled: true
+ kubernetes_calico_policy_enabled: false
+ kubernetes_metrics_server_enabled: false
+
+ kubernetes_ingressnginx_controller_replicas: 1
+ kubernetes_virtlet_use_apparmor: false
+
+ kubernetes_addon_namespace: kube-system
+
+
+ # Cloud providers parameters
+ kubernetes_cloudprovider_enabled: false
+ kubernetes_cloudprovider_type: 'openstack'
+
+ linux:
+ system:
+ kernel:
+ sysctl:
+ net.ipv4.tcp_congestion_control: yeah
+ net.ipv4.tcp_slow_start_after_idle: 0
+ net.ipv4.tcp_fin_timeout: 30
+ network:
+ host:
+ ctl01:
+ address: ${_param:kubernetes_control_node01_address}
+ names:
+ - ctl01
+ - ctl01.${_param:cluster_domain}
+{%- for cmp in range(1, nm.cmp_nodes + 1) %}
+ {%- set h = 'cmp%03d' | format(cmp) %}
+ {%- set mgmt = nm.net_mgmt_hosts | length + nm.start_ip[nm.net_mgmt] + loop.index %}
+ {{ h }}:
+ address: {{ nm.net_mgmt | ipnet_hostaddr(mgmt) }}
+ names:
+ - {{ h }}
+ - {{ h }}.${_param:cluster_domain}
+{%- endfor %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml.j2
index f72993f89..950c49355 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/config.yml.j2
@@ -7,17 +7,26 @@
##############################################################################
---
classes:
- - system.reclass.storage.system.opendaylight_control_single
- cluster.mcp-common-ha.infra.config
- cluster.mcp-odl-ha.infra
parameters:
+ _param:
+ opendaylight_server_node01_hostname: odl01
+ opendaylight_server_node02_hostname: odl02
+ opendaylight_server_node03_hostname: odl03
reclass:
storage:
node:
- opendaylight_control_node01:
+{%- for i in range(1, 4) %}
+ opendaylight_control_node0{{ i }}:
+ name: ${_param:opendaylight_server_node0{{ i }}_hostname}
+ domain: ${_param:cluster_domain}
classes:
- cluster.${_param:cluster_name}.opendaylight.control
params:
- linux_system_codename: xenial
- single_address: ${_param:opendaylight_server_node01_single_address}
- pxe_admin_address: ${_param:opnfv_opendaylight_server_node01_pxe_admin_address}
+ salt_master_host: ${_param:reclass_config_master}
+ linux_system_codename: ${_param:linux_system_codename}
+ single_address: ${_param:opendaylight_server_node0{{ i }}_address}
+ pxe_admin_address: ${_param:opnfv_opendaylight_server_node0{{ i }}_pxe_admin_address}
+ keepalived_vip_priority: 10{{ i }}
+{%- endfor %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
index ab0da39b3..9ff091941 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/kvm.yml.j2
@@ -5,6 +5,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
{%- if conf.MCP_VCP %}
# NOTE(armband): we don't want to pull in salt.control for novcp
@@ -24,9 +25,19 @@ parameters:
cluster:
internal:
node:
- odl01:
- name: ${_param:opendaylight_server_node01_hostname}
- provider: ${_param:infra_kvm_node02_hostname}.${_param:cluster_domain}
- image: ${_param:salt_control_xenial_image}
+ {%- for i in range(1, 4) %}
+ odl0{{ i }}:
+ name: ${_param:opendaylight_server_node0{{ i }}_hostname}
+ provider: ${_param:infra_kvm_node0{{ i }}_hostname}.${_param:cluster_domain}
+ image: ${_param:salt_control_bionic_image}
size: opendaylight.server
+ {%- if conf.nodes[nm.ctl01.idx].node.arch == 'aarch64' %}
+ machine: virt
+ cpu_mode: host-passthrough
+ loader:
+ readonly: 'yes'
+ type: pflash
+ path: /usr/share/AAVMF/AAVMF_CODE.fd
+ {%- endif %}
+ {%- endfor %}
{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
index c06643089..3a87ab558 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/infra/maas.yml.j2
@@ -8,10 +8,5 @@
{%- import 'net_map.j2' as nm with context %}
---
classes:
- - cluster.mcp-common-ha.infra.maas
- cluster.mcp-odl-ha.infra
-{%- if 'aarch64' not in nm.cluster.arch %}
-parameters:
- _param:
- hwe_kernel: 'ga-16.04'
-{%- endif %}
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
index 685cd9ec1..23d1072d7 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/opendaylight/control.yml.j2
@@ -7,14 +7,14 @@
##############################################################################
---
classes:
- - service.opendaylight.server.single
+ - service.opendaylight.server.cluster
- cluster.mcp-common-ha.openstack_interface_vcp_biport
{%- if conf.MCP_VCP %}
- cluster.mcp-odl-ha
{%- endif %}
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
opendaylight:
server:
odl_bind_ip: ${_param:single_address}
@@ -22,9 +22,12 @@ parameters:
java_min_mem: 6g
java_max_mem: 6g
router_enabled: true
+ netvirt_natservice:
+ nat_mode: conntrack
karaf_features:
odl_default:
- odl-restconf-all
- odl-aaa-authn
netvirt:
- odl-netvirt-openstack
+ seed_nodes_list: {%- for i in range(1, 4) %} ${_param:opendaylight_server_node0{{ i }}_address}{%- endfor %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/compute.yml
index 992d1c8bf..3a49a69be 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/compute.yml
@@ -15,3 +15,20 @@ parameters:
neutron:
gateway:
agent_mode: ${_param:neutron_gateway_agent_mode}
+ backend:
+ ovsdb_connection: tcp:127.0.0.1:6640
+ opendaylight:
+ ovsdb_server_iface: ptcp:6640:127.0.0.1
+ linux:
+ system:
+ file:
+ /var/tmp/odl_hostconfig.patch:
+ contents: |
+ 420c420
+ < if datapath_types.find(datapath_type) >= 0)
+ ---
+ > if datapath_type in datapath_types)
+ 460c460
+ < return subprocess.check_output(command_line).strip() # nosec
+ ---
+ > return subprocess.check_output(command_line).strip().decode() # nosec
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/control.yml
index 7b03f29e4..fe5a29714 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/control.yml
@@ -17,7 +17,11 @@ parameters:
openrc_extra:
# For HA, all public services are available through nginx on prx
sdn_controller_ip: ${_param:cluster_public_host}
- sdn_username: admin # Hardcoded to default ODL values for now
- sdn_password: admin
+ sdn_controller_user: admin # Hardcoded to default ODL values for now
+ sdn_controller_password: ${_param:opendaylight_password}
sdn_controller_webport: ${_param:opendaylight_rest_port}
sdn_controller_restconfport: ${_param:opendaylight_rest_port}
+ neutron:
+ server:
+ backend:
+ password: ${_param:opendaylight_password}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/init.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/init.yml.j2
index 6301e737a..1dd02bb8a 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/init.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-ha/openstack/init.yml.j2
@@ -10,18 +10,30 @@ classes:
- cluster.mcp-common-ha.openstack_init
parameters:
_param:
+ neutron_tenant_network_types: "flat,vxlan"
+
# opendaylight options
- opendaylight_service_host: ${_param:opendaylight_server_node01_single_address}
+ opendaylight_service_host: ${_param:opnfv_opendaylight_server_address}
opendaylight_rest_port: 8282
-
- neutron_tenant_network_types: "flat,vxlan"
{%- if conf.MCP_VCP %}
+ opendaylight_server_node01_hostname: odl01
+ opendaylight_server_node02_hostname: odl02
+ opendaylight_server_node03_hostname: odl03
+ opendaylight_server_node01_address: ${_param:opnfv_opendaylight_server_node01_address}
+ opendaylight_server_node02_address: ${_param:opnfv_opendaylight_server_node02_address}
+ opendaylight_server_node03_address: ${_param:opnfv_opendaylight_server_node03_address}
linux:
network:
host:
- odl01:
- address: ${_param:opendaylight_service_host}
+ {%- for i in range(1, 4) %}
+ odl0{{ i }}:
+ address: ${_param:opendaylight_server_node0{{ i }}_address}
names:
- - ${_param:opendaylight_server_node01_hostname}
- - ${_param:opendaylight_server_node01_hostname}.${_param:cluster_domain}
+ - ${_param:opendaylight_server_node0{{ i }}_hostname}
+ - ${_param:opendaylight_server_node0{{ i }}_hostname}.${_param:cluster_domain}
+ {%- endfor %}
+{%- else %}
+ opendaylight_control_hostname: ${_param:openstack_control_node02_hostname}
+ opendaylight_server_node01_hostname: ${_param:opendaylight_control_hostname}
+ opendaylight_server_node01_address: ${_param:opnfv_openstack_control_node02_address}
{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/infra/config.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-noha/infra/config.yml.j2
index 9e7dda947..9b84a84b7 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/infra/config.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/infra/config.yml.j2
@@ -20,7 +20,7 @@ parameters:
classes:
- cluster.${_param:cluster_name}.opendaylight.control
params:
- linux_system_codename: xenial
+ linux_system_codename: bionic
single_address: ${_param:opendaylight_service_host}
pxe_admin_address: ${_param:opnfv_opendaylight_server_node01_pxe_admin_address}
openstack_gateway_node01:
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/infra/maas.yml
new file mode 100644
index 000000000..b91ba2c33
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-odl-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/init.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/init.yml
index a595bf0b5..64b2a16f9 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/init.yml
@@ -7,11 +7,10 @@
##############################################################################
---
classes:
- - system.linux.system.single
- cluster.mcp-common-noha.init_options
- cluster.mcp-odl-noha.infra
- cluster.mcp-odl-noha.openstack
parameters:
_param:
- opendaylight_service_host: ${_param:opnfv_opendaylight_server_node01_single_address}
+ opendaylight_service_host: ${_param:opnfv_opendaylight_server_node01_address}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml.j2
index 536ebfad7..c3d0d187d 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control.yml.j2
@@ -7,24 +7,27 @@
##############################################################################
---
classes:
- - system.linux.system.repo.mcp.openstack
- - system.linux.system.repo.mcp.extra
- service.opendaylight.server.single
+ - service.quagga.server.single
+ - cluster.all-mcp-arch-common.backports
- cluster.mcp-odl-noha
- cluster.mcp-odl-noha.opendaylight.control_pdf
parameters:
- _param:
- linux_system_codename: xenial
opendaylight:
server:
odl_bind_ip: ${_param:single_address}
odl_rest_port: ${_param:opendaylight_rest_port}
- java_min_mem: 3g
- java_max_mem: 3g
+ java_min_mem: 4g
+ java_max_mem: 4g
router_enabled: true
+ netvirt_natservice:
+ nat_mode: conntrack
karaf_features:
odl_default:
- odl-restconf-all
- odl-aaa-authn
netvirt:
- odl-netvirt-openstack
+{%- if '-sfc-' in conf.MCP_DEPLOY_SCENARIO %}
+ - odl-netvirt-sfc
+{%- endif %}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control_pdf.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control_pdf.yml.j2
index 5bb591765..b21131dfe 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control_pdf.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/opendaylight/control_pdf.yml.j2
@@ -6,6 +6,14 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- Filter-out NIC duplicates by constructing a dict (used NICs only) #}
+{%- set nics = { nm.ctl01.nic_mgmt: True } %}
+{%- set vlans = { nm.vlan_mgmt: nm.ctl01.nic_mgmt } %}
+{%- if '-bgpvpn-' in conf.MCP_DEPLOY_SCENARIO %}
+ {%- do nics.update({nm.ctl01.nic_public: True}) %}
+ {%- do vlans.update({nm.vlan_public: nm.ctl01.nic_public}) %}
+{%- endif %}
---
parameters:
linux:
@@ -18,13 +26,38 @@ parameters:
type: eth
address: ${_param:pxe_admin_address}
netmask: ${_param:opnfv_net_admin_mask}
+ mtu: ${_param:interface_mtu}
+ noifupdown: true
gateway: {{ nm.net_admin_gw }}
name_servers:
- {{ nm.net_admin_gw }}
- single_int:
+{%- if '-bgpvpn-' in conf.MCP_DEPLOY_SCENARIO %}
+ br-ext:
enabled: true
- name: {{ nm.ctl01.nic_mgmt }}
- type: eth
+ type: bridge
+ proto: static
+ address: ${_param:opnfv_opendaylight_server_external_address}
+ netmask: ${_param:opnfv_net_public_mask}
+ mtu: ${_param:interface_mtu}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_public, nm.vlan_public) }}
+ noifupdown: true
+{%- endif %}
+
+{#- prevent duplicates for tagged mgmt on the same physical interface as PXE/admin #}
+{%- if nm.ctl01.nic_admin in nics %}
+ {%- do nics.pop(nm.ctl01.nic_admin) %}
+{%- endif %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+ br-ctl:
+ enabled: true
+ type: bridge
proto: static
address: ${_param:single_address}
netmask: ${_param:opnfv_net_mgmt_mask}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_mgmt, nm.vlan_mgmt) }}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/compute.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/compute.yml.j2
index 18b73d7ea..44ebb86b1 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/compute.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/compute.yml.j2
@@ -6,21 +6,32 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
---
classes:
- service.neutron.compute.single
- service.neutron.compute.opendaylight.single
-{%- if conf.MCP_DPDK_MODE %}
+{%- if '-ovs-' in conf.MCP_DEPLOY_SCENARIO %}
- system.nova.compute.nfv.hugepages
- system.neutron.compute.nfv.dpdk
{%- endif %}
- cluster.mcp-common-noha.openstack_compute
- cluster.mcp-odl-noha
parameters:
+ nova:
+ compute:
+ vif_plugging_is_fatal: false
+ vif_plugging_timeout: 60
+ neutron:
+ compute:
+ opendaylight:
+ ovsdb_server_iface: ptcp:6640:127.0.0.1
linux:
network:
+ ovs_nowait: false
interface:
-{%- if conf.MCP_DPDK_MODE %}
+{%- if '-ovs-' in conf.MCP_DEPLOY_SCENARIO %}
dpdk0:
name: ${_param:dpdk0_name}
pci: ${_param:dpdk0_pci}
@@ -29,14 +40,48 @@ parameters:
bridge: br-prv
type: dpdk_ovs_port
n_rxq: ${_param:dpdk0_n_rxq}
+ mtu: ${_param:interface_mtu}
br-prv:
enabled: true
type: dpdk_ovs_bridge
proto: static
address: ${_param:tenant_address}
netmask: ${_param:opnfv_net_private_mask}
- tenant_interface:
+ {{ nm.cmp001.nic_private }}:
type: dpdk # Not a meaningful type, just match 'dpdk' for filtering
+
+{%- set nics = { nm.cmp001.nic_public: True } %}
+{%- set vlans = { nm.vlan_public: nm.cmp001.nic_public } %}
+
+{{ ma.linux_network_interfaces_nic(nics) }}
+
+{{ ma.linux_network_interfaces_vlan(vlans) }}
+
+ br-floating:
+ enabled: true
+ type: ovs_bridge
+ datapath_type: netdev
+ use_interfaces:
+ - float-to-ex
+ float-to-ex:
+ enabled: true
+ type: ovs_port
+ mtu: ${_param:interface_mtu}
+ bridge: br-floating
+ ovs_bridge: br-floating
+ noifupdown: true
+ br-ex:
+ enabled: true
+ type: bridge
+ address: ${_param:external_address}
+ netmask: ${_param:opnfv_net_public_mask}
+ use_interfaces:
+ - {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}
+ use_ovs_ports:
+ - float-to-ex
+ gateway: ${_param:opnfv_net_public_gw}
+ name_servers: {{ nm.dns_public }}
+ noifupdown: true
{%- else %}
br-mesh:
enabled: true
@@ -45,32 +90,36 @@ parameters:
address: ${_param:tenant_address}
netmask: ${_param:opnfv_net_private_mask}
use_interfaces:
- - ${_param:tenant_interface}
-{%- endif %}
- external_interface:
+ - {{ ma.interface_str(nm.cmp001.nic_private, vlan_private_start) }}
+ {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}:
enabled: true
- type: eth
- name: ${_param:external_interface}
mtu: ${_param:interface_mtu}
proto: manual
- br-floating:
- enabled: true
- type: ovs_bridge
- mtu: ${_param:interface_mtu}
- float-to-ex:
- enabled: true
+ ovs_port_type: OVSPort
type: ovs_port
- mtu: ${_param:interface_mtu}
+ ovs_bridge: br-floating
bridge: br-floating
- br-ex:
+ br-floating:
enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
+ type: ovs_bridge
+ proto: static
address: ${_param:external_address}
netmask: ${_param:opnfv_net_public_mask}
use_interfaces:
- - ${_param:external_interface}
- use_ovs_ports:
- - float-to-ex
+ - {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
+ noifupdown: true
+{%- endif %}
+ system:
+ file:
+ /var/tmp/odl_hostconfig.patch:
+ contents: |
+ 420c420
+ < if datapath_types.find(datapath_type) >= 0)
+ ---
+ > if datapath_type in datapath_types)
+ 460c460
+ < return subprocess.check_output(command_line).strip() # nosec
+ ---
+ > return subprocess.check_output(command_line).strip().decode() # nosec
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/control.yml
index c9c683fc7..4b0beb5be 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/control.yml
@@ -30,7 +30,11 @@ parameters:
openrc_extra:
# For noHA, all public services are available through haproxy on ctl
sdn_controller_ip: ${_param:cluster_vip_address}
- sdn_username: admin # Hardcoded to default ODL values for now
- sdn_password: admin
+ sdn_controller_user: admin # Hardcoded to default ODL values for now
+ sdn_controller_password: ${_param:opendaylight_password}
sdn_controller_webport: ${_param:opendaylight_rest_port}
sdn_controller_restconfport: ${_param:opendaylight_rest_port}
+ neutron:
+ server:
+ backend:
+ password: ${_param:opendaylight_password}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml
deleted file mode 100644
index 678740f40..000000000
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml
+++ /dev/null
@@ -1,25 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
----
-classes:
- - cluster.mcp-common-noha.openstack_gateway
- - service.neutron.gateway.opendaylight.single
- - cluster.mcp-odl-noha
-parameters:
- linux:
- network:
- interface:
- br-mesh:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
- proto: static
- address: ${_param:tenant_address}
- netmask: ${_param:opnfv_net_private_mask}
- use_interfaces:
- - ${_param:tenant_interface}
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml.j2 b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml.j2
new file mode 100644
index 000000000..946cdda03
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/gateway.yml.j2
@@ -0,0 +1,57 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
+---
+classes:
+ - cluster.mcp-common-noha.openstack_gateway
+ - service.neutron.gateway.opendaylight.single
+ - cluster.mcp-odl-noha
+parameters:
+ linux:
+ network:
+ interface:
+{%- if '-ovs-' in conf.MCP_DEPLOY_SCENARIO %}
+ {{ nm.ctl01.nic_private }}:
+ ovs_port_type: OVSPort
+ type: ovs_port
+ bridge: br-prv
+ ovs_bridge: br-prv
+ br-prv:
+ enabled: true
+ type: ovs_bridge
+ mtu: ${_param:interface_mtu}
+ proto: static
+ address: ${_param:tenant_address}
+ netmask: ${_param:opnfv_net_private_mask}
+ use_interfaces:
+ - {{ nm.ctl01.nic_private }}
+{%- else %}
+ br-mesh:
+ enabled: true
+ type: bridge
+ mtu: ${_param:interface_mtu}
+ proto: static
+ address: ${_param:tenant_address}
+ netmask: ${_param:opnfv_net_private_mask}
+ use_interfaces:
+ - {{ ma.interface_str(nm.ctl01.nic_private, vlan_private_start) }}
+{%- endif %}
+ system:
+ file:
+ /var/tmp/odl_hostconfig.patch:
+ contents: |
+ 420c420
+ < if datapath_types.find(datapath_type) >= 0)
+ ---
+ > if datapath_type in datapath_types)
+ 460c460
+ < return subprocess.check_output(command_line).strip() # nosec
+ ---
+ > return subprocess.check_output(command_line).strip().decode() # nosec
diff --git a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/init.yml b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/init.yml
index a3918b231..87c41b048 100644
--- a/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-odl-noha/openstack/init.yml
@@ -17,7 +17,7 @@ parameters:
network:
host:
odl01:
- address: ${_param:opnfv_opendaylight_server_node01_single_address}
+ address: ${_param:opnfv_opendaylight_server_node01_address}
names:
- odl01
- odl01.${_param:cluster_domain}
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml
index 72a451652..5007749d1 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/infra/maas.yml
@@ -7,5 +7,5 @@
##############################################################################
---
classes:
- - cluster.mcp-common-ha.infra.maas
- cluster.mcp-ovn-ha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/compute.yml
index 7afb40e52..9af431b9a 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/compute.yml
@@ -7,11 +7,11 @@
##############################################################################
---
classes:
+ - service.neutron.compute.ovn.single
- cluster.mcp-common-ha.openstack_compute
- cluster.mcp-ovn-ha.openstack.compute_pdf
- cluster.mcp-ovn-ha.infra
parameters:
- nova:
- compute:
- libvirt_service: libvirtd
- libvirt_bin: /etc/default/libvirtd
+ neutron:
+ gateway:
+ ~message_queue: ~
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/control.yml
index 811957600..94ca6ebb8 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/control.yml
@@ -7,7 +7,47 @@
##############################################################################
---
classes:
- - system.neutron.control.openvswitch.cluster
+ - system.neutron.control.cluster
- cluster.mcp-common-ha.openstack_interface_vcp_biport
- cluster.mcp-common-ha.openstack_control
- cluster.mcp-ovn-ha.infra
+parameters:
+ _param:
+ neutron_control_dvr: "False"
+ neutron_l3_ha: "False"
+ neutron_global_physnet_mtu: 1500
+ neutron_external_mtu: 1500
+ neutron_enable_qos: "False"
+ neutron_enable_vlan_aware_vms: "False"
+ neutron:
+ server:
+ global_physnet_mtu: ${_param:neutron_global_physnet_mtu}
+ l3_ha: ${_param:neutron_l3_ha}
+ dvr: ${_param:neutron_control_dvr}
+ qos: ${_param:neutron_enable_qos}
+ vlan_aware_vms: ${_param:neutron_enable_vlan_aware_vms}
+ backend:
+ engine: ovn
+ tenant_network_types: "${_param:neutron_tenant_network_types}"
+ external_mtu: ${_param:neutron_external_mtu}
+ mechanism:
+ ovn:
+ driver: ovn
+ ovn:
+ metadata_enabled: true
+ compute:
+ region: ${_param:openstack_region}
+ database:
+ host: ${_param:opnfv_openstack_database_address}
+ identity:
+ region: ${_param:openstack_region}
+ message_queue:
+ members:
+ - host: ${_param:openstack_message_queue_node01_address}
+ - host: ${_param:openstack_message_queue_node02_address}
+ - host: ${_param:openstack_message_queue_node03_address}
+ ovn_ctl_opts:
+ db-nb-create-insecure-remote: 'yes'
+ db-sb-create-insecure-remote: 'yes'
+ db-nb-addr: ${_param:cluster_vip_address}
+ db-sb-addr: ${_param:cluster_vip_address}
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/database.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/database.yml
index b8e441a36..f0e96daa6 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/database.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/database.yml
@@ -9,4 +9,4 @@
classes:
- cluster.mcp-common-ha.openstack_interface_vcp_biport
- cluster.mcp-common-ha.openstack_database
- - cluster.mcp-ovn-ha.infra_vcp
+ - cluster.mcp-ovn-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/init.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/init.yml
index 9dbfd59a1..737af52e3 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/init.yml
@@ -10,4 +10,4 @@ classes:
- cluster.mcp-common-ha.openstack_init
parameters:
_param:
- neutron_tenant_network_types: "flat,vxlan"
+ neutron_tenant_network_types: "geneve,flat"
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/message_queue.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/message_queue.yml
index de0561d31..9b2f5c1c0 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/message_queue.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/message_queue.yml
@@ -9,4 +9,4 @@
classes:
- cluster.mcp-common-ha.openstack_interface_vcp_biport
- cluster.mcp-common-ha.openstack_message_queue
- - cluster.mcp-ovn-ha.infra_vcp
+ - cluster.mcp-ovn-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/proxy.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/proxy.yml
index 95b78758c..3979af548 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/proxy.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/proxy.yml
@@ -9,4 +9,4 @@
classes:
- cluster.mcp-common-ha.openstack_interface_vcp_triport
- cluster.mcp-common-ha.openstack_proxy
- - cluster.mcp-ovn-ha.infra_vcp
+ - cluster.mcp-ovn-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/telemetry.yml b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/telemetry.yml
index eb7910faa..aee142c43 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/telemetry.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-ha/openstack/telemetry.yml
@@ -9,4 +9,4 @@
classes:
- cluster.mcp-common-ha.openstack_interface_vcp_biport
- cluster.mcp-common-ha.openstack_telemetry
- - cluster.mcp-ovn-ha.infra_vcp
+ - cluster.mcp-ovn-ha.infra.init_vcp
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovn-noha/infra/maas.yml
new file mode 100644
index 000000000..359ef36bb
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-ovn-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-ovn-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-noha/init.yml b/mcp/reclass/classes/cluster/mcp-ovn-noha/init.yml
index d4b6d85b7..82f4632bd 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-noha/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-noha/init.yml
@@ -7,7 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.single
- cluster.mcp-common-noha.init_options
- cluster.mcp-ovn-noha.infra
- cluster.mcp-ovn-noha.openstack
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/compute.yml.j2 b/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/compute.yml.j2
index 89ba3b074..ec6a1e7d3 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/compute.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/compute.yml.j2
@@ -6,6 +6,8 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
---
classes:
- service.neutron.compute.ovn.single
@@ -15,15 +17,18 @@ parameters:
neutron:
compute:
controller_vip: ${_param:cluster_local_address}
+ ~message_queue: ~
linux:
network:
interface:
- external_interface:
+ {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}:
enabled: true
- name: ${_param:external_interface}
mtu: ${_param:interface_mtu}
proto: manual
- type: eth
+ ovs_port_type: OVSPort
+ type: ovs_port
+ ovs_bridge: br-floating
+ bridge: br-floating
br-mesh:
enabled: true
type: bridge
@@ -31,25 +36,16 @@ parameters:
address: ${_param:tenant_address}
netmask: ${_param:opnfv_net_private_mask}
use_interfaces:
- - ${_param:tenant_interface}
+ - {{ ma.interface_str(nm.cmp001.nic_private, vlan_private_start) }}
br-floating:
enabled: true
type: ovs_bridge
mtu: ${_param:interface_mtu}
- float-to-ex:
- enabled: true
- type: ovs_port
- mtu: ${_param:interface_mtu}
- bridge: br-floating
- br-ex:
- enabled: true
- type: bridge
- mtu: ${_param:interface_mtu}
+ proto: static
address: ${_param:external_address}
netmask: ${_param:opnfv_net_public_mask}
use_interfaces:
- - ${_param:external_interface}
- use_ovs_ports:
- - float-to-ex
+ - {{ ma.interface_str(nm.cmp001.nic_public, nm.vlan_public) }}
gateway: ${_param:opnfv_net_public_gw}
name_servers: {{ nm.dns_public }}
+ noifupdown: true
diff --git a/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/control.yml
index 235beff26..f99a460bd 100644
--- a/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovn-noha/openstack/control.yml
@@ -10,3 +10,9 @@ classes:
- system.neutron.control.ovn.single
- cluster.mcp-common-noha.openstack_control
- cluster.mcp-ovn-noha
+parameters:
+ neutron:
+ server:
+ backend:
+ ovn:
+ metadata_enabled: true
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml
index f3d605494..2187ba78f 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/infra/maas.yml
@@ -7,5 +7,5 @@
##############################################################################
---
classes:
- - cluster.mcp-common-ha.infra.maas
- cluster.mcp-ovs-dpdk-ha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/openstack/compute.yml
index 106a2a7ac..52e63dfd6 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/openstack/compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-ha/openstack/compute.yml
@@ -18,10 +18,6 @@ parameters:
vhost_socket_dir: ${_param:compute_ovs_vhost_socket_dir}
backend:
tenant_vlan_range: ${_param:neutron_tenant_vlan_range}
- nova:
- compute:
- libvirt_service: libvirtd
- libvirt_bin: /etc/default/libvirtd
linux:
system:
kernel:
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/infra/maas.yml
new file mode 100644
index 000000000..49d214304
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-ovs-dpdk-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/init.yml b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/init.yml
index 96e2c9425..35c3e7655 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/init.yml
@@ -7,7 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.single
- cluster.mcp-common-noha.init_options
- cluster.mcp-ovs-dpdk-noha.infra
- cluster.mcp-ovs-dpdk-noha.openstack
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/compute.yml.j2 b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/compute.yml.j2
index 3e4eeceab..25fc82624 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/compute.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/compute.yml.j2
@@ -34,8 +34,9 @@ parameters:
bridge: br-prv
type: dpdk_ovs_port
n_rxq: ${_param:dpdk0_n_rxq}
+ mtu: ${_param:interface_mtu}
br-prv:
enabled: true
type: dpdk_ovs_bridge
- tenant_interface:
+ {{ nm.cmp001.nic_private }}:
type: dpdk # Not a meaningful type, just match 'dpdk' for filtering
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml.j2
index 2f9aee6fd..c45b75569 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-dpdk-noha/openstack/gateway.yml.j2
@@ -5,6 +5,7 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
---
classes:
- cluster.mcp-common-noha.openstack_gateway
@@ -18,9 +19,14 @@ parameters:
linux:
network:
interface:
- tenant_interface:
+ {{ nm.ctl01.nic_private }}:
+ ovs_port_type: OVSPort
+ type: ovs_port
+ bridge: br-prv
ovs_bridge: br-prv
br-prv:
enabled: true
type: ovs_bridge
mtu: ${_param:interface_mtu}
+ use_interfaces:
+ - {{ nm.ctl01.nic_private }}
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml
index c9102ea70..154675f79 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-ha/infra/maas.yml
@@ -7,5 +7,5 @@
##############################################################################
---
classes:
- - cluster.mcp-common-ha.infra.maas
- cluster.mcp-ovs-ha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-ha/openstack/compute.yml b/mcp/reclass/classes/cluster/mcp-ovs-ha/openstack/compute.yml
index 1e157cfa7..2507f2bae 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-ha/openstack/compute.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-ha/openstack/compute.yml
@@ -10,8 +10,3 @@ classes:
- cluster.mcp-common-ha.openstack_compute
- cluster.mcp-ovs-ha.openstack.compute_pdf
- cluster.mcp-ovs-ha.infra
-parameters:
- nova:
- compute:
- libvirt_service: libvirtd
- libvirt_bin: /etc/default/libvirtd
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-noha/infra/maas.yml b/mcp/reclass/classes/cluster/mcp-ovs-noha/infra/maas.yml
new file mode 100644
index 000000000..0d54d3be1
--- /dev/null
+++ b/mcp/reclass/classes/cluster/mcp-ovs-noha/infra/maas.yml
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-ovs-noha.infra
+ - cluster.all-mcp-arch-common.infra.maas
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-noha/init.yml b/mcp/reclass/classes/cluster/mcp-ovs-noha/init.yml
index a453af50b..24de77a24 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-noha/init.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-noha/init.yml
@@ -7,7 +7,6 @@
##############################################################################
---
classes:
- - system.linux.system.single
- cluster.mcp-common-noha.init_options
- cluster.mcp-ovs-noha.infra
- cluster.mcp-ovs-noha.openstack
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/compute.yml.j2 b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/compute.yml.j2
index c949de4f4..2707c7f5e 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/compute.yml.j2
+++ b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/compute.yml.j2
@@ -6,9 +6,12 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
---
classes:
- service.neutron.compute.single
+ - system.nova.compute.nfv.hugepages
- cluster.mcp-common-noha.openstack_compute
- cluster.mcp-ovs-noha
parameters:
@@ -27,4 +30,8 @@ parameters:
address: ${_param:tenant_address}
netmask: ${_param:opnfv_net_private_mask}
use_interfaces:
- - ${_param:tenant_interface}
+ - {{ ma.interface_str(nm.cmp001.nic_private, vlan_private_start) }}
+ system:
+ package:
+ cgroup-tools:
+ version: latest
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/control.yml b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/control.yml
index 939cb2834..dd0245344 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/control.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/control.yml
@@ -10,3 +10,9 @@ classes:
- system.neutron.control.openvswitch.single
- cluster.mcp-common-noha.openstack_control
- cluster.mcp-ovs-noha
+parameters:
+ nova:
+ controller:
+ scheduler_default_filters: "DifferentHostFilter,SameHostFilter,RetryFilter,AvailabilityZoneFilter,RamFilter,\
+ CoreFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,\
+ ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,NUMATopologyFilter"
diff --git a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml.j2
index 5c5547cfc..685402da8 100644
--- a/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml
+++ b/mcp/reclass/classes/cluster/mcp-ovs-noha/openstack/gateway.yml.j2
@@ -5,6 +5,9 @@
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{%- set vlan_private_start = (nm.vlan_private | string).rsplit('-')[0] %}
---
classes:
- cluster.mcp-common-noha.openstack_gateway
@@ -22,4 +25,4 @@ parameters:
address: ${_param:tenant_address}
netmask: ${_param:opnfv_net_private_mask}
use_interfaces:
- - ${_param:tenant_interface}
+ - {{ ma.interface_str(nm.ctl01.nic_private, vlan_private_start) }}
diff --git a/mcp/reclass/classes/system b/mcp/reclass/classes/system
-Subproject 0d3fc1ed410c463df962315621aff4b2235825d
+Subproject 6176bde8f5a5e3b723149830242ada46c5126e3
diff --git a/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml
new file mode 100644
index 000000000..8a17ec1ab
--- /dev/null
+++ b/mcp/reclass/nodes/cfg01.mcp-fdio-ha.local.yml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-fdio-ha.infra.config
+parameters:
+ _param:
+ linux_system_codename: bionic
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: mcp-fdio-ha.local
diff --git a/mcp/config/scenario/os-nosdn-nofeature-noha.yaml b/mcp/reclass/nodes/cfg01.mcp-fdio-noha.local.yml
index 5c5e77ceb..29bd93de7 100644
--- a/mcp/config/scenario/os-nosdn-nofeature-noha.yaml
+++ b/mcp/reclass/nodes/cfg01.mcp-fdio-noha.local.yml
@@ -6,21 +6,13 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-cluster:
- domain: mcp-ovs-noha.local
- states:
- - openstack_noha
- - neutron_gateway
- - networks
-virtual:
- nodes:
- - cfg01
- - ctl01
- - cmp001
- - cmp002
- - gtw01
- ctl01:
- vcpus: 4
- ram: 14336
- gtw01:
- ram: 2048
+classes:
+ - cluster.mcp-fdio-noha.infra.config
+parameters:
+ _param:
+ linux_system_codename: bionic
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: mcp-fdio-noha.local
diff --git a/mcp/reclass/nodes/cfg01.mcp-iec-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-iec-noha.local.yml
new file mode 100644
index 000000000..82f7b155e
--- /dev/null
+++ b/mcp/reclass/nodes/cfg01.mcp-iec-noha.local.yml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-iec-noha.infra.config
+parameters:
+ _param:
+ linux_system_codename: bionic
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: mcp-iec-noha.local
diff --git a/mcp/reclass/nodes/cfg01.mcp-k8s-calico-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-k8s-calico-noha.local.yml
new file mode 100644
index 000000000..cd2fd1a57
--- /dev/null
+++ b/mcp/reclass/nodes/cfg01.mcp-k8s-calico-noha.local.yml
@@ -0,0 +1,18 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+classes:
+ - cluster.mcp-k8s-calico-noha.infra.config
+parameters:
+ _param:
+ linux_system_codename: bionic
+ reclass_data_revision: master
+ linux:
+ system:
+ name: cfg01
+ domain: mcp-k8s-calico-noha.local
diff --git a/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
index dbf3a4adf..f0d912e8b 100644
--- a/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-odl-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-odl-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-odl-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-odl-noha.local.yml
index 4fd327242..fcad8a6cf 100644
--- a/mcp/reclass/nodes/cfg01.mcp-odl-noha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-odl-noha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-odl-noha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
index 00fc19eb4..6d4a8bef3 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovn-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovn-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovn-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovn-noha.local.yml
index 6ae0367b9..22014d7a2 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovn-noha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovn-noha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovn-noha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
index d51b66da5..9ad516f18 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-dpdk-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-noha.local.yml
index 30b61848e..d1ba70a91 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-noha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-dpdk-noha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-dpdk-noha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
index d5171277d..8f1cc2cd5 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-ha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-ha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/reclass/nodes/cfg01.mcp-ovs-noha.local.yml b/mcp/reclass/nodes/cfg01.mcp-ovs-noha.local.yml
index 658061b26..9d1f0265a 100644
--- a/mcp/reclass/nodes/cfg01.mcp-ovs-noha.local.yml
+++ b/mcp/reclass/nodes/cfg01.mcp-ovs-noha.local.yml
@@ -10,7 +10,7 @@ classes:
- cluster.mcp-ovs-noha.infra.config
parameters:
_param:
- linux_system_codename: xenial
+ linux_system_codename: bionic
reclass_data_revision: master
linux:
system:
diff --git a/mcp/salt-formulas/maas/machines/delete.sls b/mcp/salt-formulas/maas/machines/delete.sls
deleted file mode 100644
index 2903f9226..000000000
--- a/mcp/salt-formulas/maas/machines/delete.sls
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-{%- from "maas/map.jinja" import region with context %}
-
-maas_login_admin:
- cmd.run:
- - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials"
- - unless: 'test -e /var/lib/maas/.maas_credentials'
-
-# TODO: implement delete_machine via _modules/maas.py
-delete_machine:
- cmd.run:
- - name: "maas login {{ region.admin.username }} http://{{ region.bind.host }}:5240/MAAS/api/2.0 - < /var/lib/maas/.maas_credentials && maas opnfv machine delete {{ pillar['system_id'] }}"
- - require:
- - cmd: maas_login_admin
diff --git a/mcp/salt-formulas/maas/machines/mark_broken_fixed.sls b/mcp/salt-formulas/maas/machines/mark_broken_fixed.sls
deleted file mode 100644
index 46691bb09..000000000
--- a/mcp/salt-formulas/maas/machines/mark_broken_fixed.sls
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-{%- from "maas/map.jinja" import region with context %}
-
-maas_login_admin:
- cmd.run:
- - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials"
- - unless: 'test -e /var/lib/maas/.maas_credentials'
-
-# TODO: implement mark_broken_fixed_machine via _modules/maas.py
-mark_broken_fixed_machine:
- cmd.run:
- - name: "maas login {{ region.admin.username }} http://{{ region.bind.host }}:5240/MAAS/api/2.0 - < /var/lib/maas/.maas_credentials && maas opnfv machine mark-broken {{ pillar['system_id'] }} && sleep 10 && maas opnfv machine mark-fixed {{ pillar['system_id'] }} && maas opnfv machine test {{ pillar['system_id'] }} testing_scripts=fio"
- - require:
- - cmd: maas_login_admin
diff --git a/mcp/salt-formulas/maas/machines/override_failed_testing.sls b/mcp/salt-formulas/maas/machines/override_failed_testing.sls
deleted file mode 100644
index e7fe1d267..000000000
--- a/mcp/salt-formulas/maas/machines/override_failed_testing.sls
+++ /dev/null
@@ -1,20 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-{%- from "maas/map.jinja" import region with context %}
-
-maas_login_admin:
- cmd.run:
- - name: "maas-region apikey --username {{ region.admin.username }} > /var/lib/maas/.maas_credentials"
- - unless: 'test -e /var/lib/maas/.maas_credentials'
-
-# TODO: implement override_failed_testing via _modules/maas.py
-mark_broken_fixed_machine:
- cmd.run:
- - name: "maas login {{ region.admin.username }} http://{{ region.bind.host }}:5240/MAAS/api/2.0 - < /var/lib/maas/.maas_credentials && maas opnfv machine override-failed-testing {{ pillar['system_id'] }}"
- - require:
- - cmd: maas_login_admin
diff --git a/mcp/salt-formulas/maas/pxe_nat.sls b/mcp/salt-formulas/maas/pxe_nat.sls
deleted file mode 100644
index 8a03c4fdb..000000000
--- a/mcp/salt-formulas/maas/pxe_nat.sls
+++ /dev/null
@@ -1,37 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-net.ipv4.ip_forward:
- sysctl.present:
- - value: 1
-
-iptables_pxe_nat:
- iptables.append:
- - table: nat
- - chain: POSTROUTING
- - jump: MASQUERADE
- - destination: 0/0
- - source: {{ salt['pillar.get']('_param:single_address') }}/{{ salt['pillar.get']('_param:opnfv_net_admin_mask') }}
- - save: True
-
-iptables_pxe_source:
- iptables.append:
- - table: filter
- - chain: INPUT
- - jump: ACCEPT
- - destination: 0/0
- - source: {{ salt['pillar.get']('_param:single_address') }}/{{ salt['pillar.get']('_param:opnfv_net_admin_mask') }}
- - save: True
-
-iptables_pxe_destination:
- iptables.append:
- - table: filter
- - chain: INPUT
- - jump: ACCEPT
- - destination: {{ salt['pillar.get']('_param:single_address') }}/{{ salt['pillar.get']('_param:opnfv_net_admin_mask') }}
- - source: 0/0
- - save: True
diff --git a/mcp/salt-formulas/opendaylight/server.sls b/mcp/salt-formulas/opendaylight/server.sls
deleted file mode 100644
index 206dc52a2..000000000
--- a/mcp/salt-formulas/opendaylight/server.sls
+++ /dev/null
@@ -1,113 +0,0 @@
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc. and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-{% from "opendaylight/map.jinja" import server with context %}
-{% from "linux/map.jinja" import system with context %}
-
-{%- if server.enabled %}
-
-opendaylight_repo_key:
- cmd.run:
- - name: "apt-key adv --keyserver keyserver.ubuntu.com --recv 44C05248"
-{%- if system.proxy is defined and system.proxy.pkg is defined %}
- - env:
-{%- if system.proxy.pkg.http is defined %}
- - http_proxy: {{ system.proxy.pkg.http }}
-{%- endif %}
-{%- if system.proxy.pkg.https is defined %}
- - https_proxy: {{ system.proxy.pkg.https }}
-{%- endif %}
-{%- endif %}
-
-opendaylight_repo:
- pkgrepo.managed:
- # NOTE(armband): PPA handling behind proxy broken, define it explicitly
- # https://github.com/saltstack/salt/pull/45224
- # - ppa: {{ server.repo }}
- - human_name: opendaylight-ppa
- - name: deb http://ppa.launchpad.net/odl-team/{{ server.version }}/ubuntu xenial main
- - file: /etc/apt/sources.list.d/odl-team-ubuntu-{{ server.version }}-xenial.list
-
-{%- if grains['saltversioninfo'] < [2017, 7] %}
-service.mask:
- module.run:
- - m_name: opendaylight
-{%- else %}
-opendaylight_service_mask:
- service.masked:
- - name: opendaylight
-{%- endif %}
-
-opendaylight:
- pkg.installed:
- - require:
- - pkgrepo: opendaylight_repo
- - require_in:
- - file: /opt/opendaylight/etc/jetty.xml
- - file: /opt/opendaylight/bin/setenv
- - ini: /opt/opendaylight/etc/org.apache.karaf.features.cfg
- - ini: /opt/opendaylight/etc/org.ops4j.pax.web.cfg
- service.running:
- - enable: true
-{%- if grains['saltversioninfo'] >= [2017, 7] %}
- - unmask: true
-{%- endif %}
- - watch:
- - file: /opt/opendaylight/etc/jetty.xml
- - file: /opt/opendaylight/bin/setenv
- - ini: /opt/opendaylight/etc/org.apache.karaf.features.cfg
- - ini: /opt/opendaylight/etc/org.ops4j.pax.web.cfg
-
-/opt/opendaylight/etc/jetty.xml:
- file.managed:
- - source: salt://opendaylight/files/jetty.xml
- - template: jinja
- - user: odl
- - group: odl
-
-/opt/opendaylight/bin/setenv:
- file.managed:
- - source: salt://opendaylight/files/setenv.shell
- - mode: 0755
- - use:
- - file: /opt/opendaylight/etc/jetty.xml
-
-{% set features %}
-{%- for f in server.karaf_features.itervalues() -%}
-{{ f | join(',') }}{%- if not loop.last %},{%- endif %}
-{%- endfor %}
-{% endset %}
-
-/opt/opendaylight/etc/org.apache.karaf.features.cfg:
- ini.options_present:
- - sections:
- featuresBoot: {{ features }}
-
-/opt/opendaylight/etc/org.ops4j.pax.web.cfg:
- ini.options_present:
- - sections:
- org.ops4j.pax.web.listening.addresses: {{ server.odl_bind_ip }}
- org.osgi.service.http.port: {{ server.odl_rest_port }}
-
-{%- if server.get('router_enabled', false) %}
-/opt/opendaylight/etc/custom.properties:
- ini.options_present:
- - sections:
- ovsdb.l3.fwd.enabled: 'yes'
- ovsdb.of.version: 1.3
- - require:
- - pkg: opendaylight
- - watch_in:
- - service: opendaylight
-{%- endif %}
-
-{%- if grains['cpuarch'] == 'aarch64' %}
-opendaylight-leveldbjni:
- pkg.installed
-{%- endif %}
-
-{%- endif %}
diff --git a/mcp/salt-formulas/opnfv/route_wrapper.sls b/mcp/salt-formulas/opnfv/route_wrapper.sls
deleted file mode 100644
index 6132f317e..000000000
--- a/mcp/salt-formulas/opnfv/route_wrapper.sls
+++ /dev/null
@@ -1,27 +0,0 @@
-##############################################################################
-# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-/usr/local/sbin/route:
- file.managed:
- - contents: |
- #!/bin/sh
-
- # Workaround salt-managed routes breaking ifup when route already exists
- route_binary='/sbin/route'
- route_output=$("${route_binary}" "$@" 2>&1)
- route_return=$?
-
- if [ -n "${route_output}" ]; then
- if echo "${route_output}" | grep -q 'SIOCADDRT: File exists'; then
- exit 0
- fi
- echo "${route_output}"
- fi
- exit "${route_return}"
- - user: root
- - group: root
- - mode: 755
diff --git a/mcp/salt-formulas/salt-formula-aodh b/mcp/salt-formulas/salt-formula-aodh
new file mode 160000
+Subproject 858785a84cdbfb3add01158d40237af0d41e4b4
diff --git a/mcp/salt-formulas/salt-formula-apache b/mcp/salt-formulas/salt-formula-apache
new file mode 160000
+Subproject 41d31d33354eeff85cd65b78fae0d5af274172b
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/files/nova-libvirt-aarch64-rollup.diff b/mcp/salt-formulas/salt-formula-armband/armband/files/nova-libvirt-aarch64-rollup.diff
new file mode 100644
index 000000000..1ecbf2973
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/files/nova-libvirt-aarch64-rollup.diff
@@ -0,0 +1,27 @@
+From: Charalampos Kominos <Charalampos.Kominos@enea.com>
+Date: Wed, 2 May 2018 14:20:47 +0200
+Subject: [PATCH] Allow libvirt to honor root device naming
+
+Current behaviour in upstream nova is for rootfs to be in /dev/vda
+which is the default behaviour when using virtio driver. However when
+other devices are requested either by glance or by CLI, nova ignores
+that naming and still tries to attach to vda which fails.
+
+Manually applied in https://review.openstack.org/#/c/214314/
+
+JIRA: ARMBAND-376
+
+Signed-off-by: Charalampos Kominos <charalampos.kominos@enea.com>
+---
+
+--- a/nova/virt/libvirt/driver.py
++++ b/nova/virt/libvirt/driver.py
+@@ -8257,6 +8257,8 @@
+ "Ignoring supplied device name: %(device_name)s. "
+ "Libvirt can't honour user-supplied dev names",
+ {'device_name': bdm.device_name}, instance=instance)
++ if instance.root_device_name == bdm.device_name:
++ instance.root_device_name = None
+ bdm.device_name = None
+ block_device_info = driver.get_block_device_info(instance,
+ block_device_mapping)
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/init.sls b/mcp/salt-formulas/salt-formula-armband/armband/init.sls
new file mode 100644
index 000000000..8a8cf2ab1
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/init.sls
@@ -0,0 +1,7 @@
+include:
+ - armband.qemu_efi
+ - armband.vgabios
+ {%- if salt['pkg.version']('python-nova') %}
+ - armband.nova_libvirt
+ - armband.nova_config
+ {%- endif %}
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/nova_config.sls b/mcp/salt-formulas/salt-formula-armband/armband/nova_config.sls
new file mode 100644
index 000000000..b0e17b718
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/nova_config.sls
@@ -0,0 +1,37 @@
+{% if grains['cpuarch'] == 'aarch64' %}
+{% if grains['virtual'] == 'kvm' %}
+nova_virt_type:
+ file.replace:
+ - name: "/etc/nova/nova.conf"
+ - pattern: '^virt_type\s*=.*$'
+ - repl: "virt_type = qemu"
+nova_compute_virt_type:
+ file.replace:
+ - name: "/etc/nova/nova-compute.conf"
+ - pattern: '^virt_type\s*=.*$'
+ - repl: "virt_type = qemu"
+{% endif %}
+nova_pointer_model:
+ file.replace:
+ - name: "/etc/nova/nova.conf"
+ - pattern: '^#pointer_model\s*=.*$'
+ - repl: "pointer_model = ps2mouse"
+nova_cpu_mode:
+ file.replace:
+ - name: "/etc/nova/nova.conf"
+ - pattern: '^cpu_mode\s*=\s*host-passthrough'
+ - repl: "cpu_mode = custom"
+nova_cpu_model:
+ file.replace:
+ - name: "/etc/nova/nova.conf"
+ - pattern: '^#cpu_model\s*=.*$'
+ {% if grains['virtual'] == 'kvm' %}
+ - repl: "cpu_model = cortex-a57"
+ {% else %}
+ - repl: "cpu_model = host"
+ {% endif %}
+restart_nova-compute:
+ cmd:
+ - run
+ - name: "service nova-compute restart"
+{% endif %}
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/nova_libvirt.sls b/mcp/salt-formulas/salt-formula-armband/armband/nova_libvirt.sls
new file mode 100644
index 000000000..9d26e86a4
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/nova_libvirt.sls
@@ -0,0 +1,9 @@
+{% if grains['cpuarch'] == 'aarch64' %}
+nova-libvirt-aarch64-rollup:
+ file.patch:
+ - name: /usr/lib/python2.7/dist-packages
+ - source: salt://armband/files/nova-libvirt-aarch64-rollup.diff
+ - hash: False
+ - options: '-p1'
+ - unless: 'test -f /var/cache/salt/minion/files/base/armband/files/nova-libvirt-aarch64-rollup.diff && cd /usr/lib/python2.7/dist-packages && patch -p1 -R --dry-run -r - < /var/cache/salt/minion/files/base/armband/files/nova-libvirt-aarch64-rollup.diff'
+{% endif %}
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/qemu_efi.sls b/mcp/salt-formulas/salt-formula-armband/armband/qemu_efi.sls
new file mode 100644
index 000000000..aef17f9a8
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/qemu_efi.sls
@@ -0,0 +1,4 @@
+{% if grains['cpuarch'] == 'aarch64' %}
+qemu-efi:
+ pkg.installed
+{% endif %}
diff --git a/mcp/salt-formulas/salt-formula-armband/armband/vgabios.sls b/mcp/salt-formulas/salt-formula-armband/armband/vgabios.sls
new file mode 100644
index 000000000..7f004971c
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-armband/armband/vgabios.sls
@@ -0,0 +1,9 @@
+{% if grains['cpuarch'] == 'aarch64' %}
+vgabios:
+ pkg.installed
+/usr/share/qemu:
+ file.directory
+/usr/share/qemu/vgabios-stdvga.bin:
+ file.symlink:
+ - target: "/usr/share/vgabios/vgabios.bin"
+{% endif %}
diff --git a/mcp/salt-formulas/salt-formula-barbican b/mcp/salt-formulas/salt-formula-barbican
new file mode 160000
+Subproject f70167cf8e99bdd72bcac7dbc946ec1655d5f31
diff --git a/mcp/salt-formulas/salt-formula-ceilometer b/mcp/salt-formulas/salt-formula-ceilometer
new file mode 160000
+Subproject 7478c662b0a41671ebb76af936948d29e1a0448
diff --git a/mcp/salt-formulas/salt-formula-cinder b/mcp/salt-formulas/salt-formula-cinder
new file mode 160000
+Subproject 57837bfba6427fd0d63afae332c724be5c3fa3b
diff --git a/mcp/salt-formulas/salt-formula-etcd b/mcp/salt-formulas/salt-formula-etcd
new file mode 160000
+Subproject b0c13fc3701045df3587d74d37bd947fd84dfda
diff --git a/mcp/salt-formulas/salt-formula-glance b/mcp/salt-formulas/salt-formula-glance
new file mode 160000
+Subproject bab584dc63874210c1bd3e42bcca8fd06d06ed3
diff --git a/mcp/salt-formulas/salt-formula-gnocchi b/mcp/salt-formulas/salt-formula-gnocchi
new file mode 160000
+Subproject 0d4773a7dc87872dd28b3be0fc8a0a81cdaa864
diff --git a/mcp/salt-formulas/salt-formula-heat b/mcp/salt-formulas/salt-formula-heat
new file mode 160000
+Subproject f1218e910d9bbf38c48b67e27cfa3c83024bbae
diff --git a/mcp/salt-formulas/salt-formula-horizon b/mcp/salt-formulas/salt-formula-horizon
new file mode 160000
+Subproject 116b93154b33bea17251b118e84c3eb47eb6893
diff --git a/mcp/salt-formulas/salt-formula-keystone b/mcp/salt-formulas/salt-formula-keystone
new file mode 160000
+Subproject 196016fcaeb572108221933051b67568a52e80b
diff --git a/mcp/salt-formulas/salt-formula-kubernetes b/mcp/salt-formulas/salt-formula-kubernetes
new file mode 160000
+Subproject ffa16d06db002139aca3856b26539e54f17ed6d
diff --git a/mcp/salt-formulas/salt-formula-linux b/mcp/salt-formulas/salt-formula-linux
new file mode 160000
+Subproject a0d8b2d8b108bff351d6ed60ad427dcbdccc57d
diff --git a/mcp/salt-formulas/salt-formula-maas b/mcp/salt-formulas/salt-formula-maas
new file mode 160000
+Subproject decf41b9dd92ab23752b34c21e4f57dc780be33
diff --git a/mcp/salt-formulas/salt-formula-neutron b/mcp/salt-formulas/salt-formula-neutron
new file mode 160000
+Subproject 7e623c0c660715f58ceaaf20e3d8620454c3977
diff --git a/mcp/salt-formulas/salt-formula-nfs b/mcp/salt-formulas/salt-formula-nfs
new file mode 160000
+Subproject 5872d161e3ee149335932bfa364a437fdd367af
diff --git a/mcp/salt-formulas/salt-formula-nova b/mcp/salt-formulas/salt-formula-nova
new file mode 160000
+Subproject 60df8720a81b32f05885e3cbd49a4bb6e8960e2
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/cluster.yml b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/cluster.yml
new file mode 100644
index 000000000..3554d786e
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/cluster.yml
@@ -0,0 +1,60 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc. and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+applications:
+ - opendaylight
+classes:
+ - service.keepalived.cluster.single
+ - service.haproxy.proxy.single
+parameters:
+ _param:
+ opendaylight_version: neon
+ cluster_vip_address: ${_param:opendaylight_service_host}
+ keepalived_vip_virtual_router_id: 199
+ keepalived_vip_password: ${_param:opnfv_main_password}
+ keepalived_vip_interface: ${_param:single_nic}
+ haproxy_odl_api_check_params: check inter 20s fastinter 2s
+ opendaylight:
+ server:
+ enabled: 'True'
+ version: ${_param:opendaylight_version}
+ repo: 'odl-team/${_param:opendaylight_version}'
+ cluster_enabled: true
+ keepalived:
+ cluster:
+ vrrp_scripts:
+ check_pidof:
+ args: haproxy
+ interval: 20
+ rise: 3
+ fall: 1
+ instance:
+ VIP:
+ track_script: check_pidof
+ haproxy:
+ proxy:
+ listen:
+ opendaylight_api:
+ type: general-service
+ balance: source
+ binds:
+ - address: ${_param:opendaylight_service_host}
+ port: ${_param:opendaylight_rest_port}
+ servers:
+ - name: ${_param:opendaylight_server_node01_hostname}
+ host: ${_param:opendaylight_server_node01_address}
+ port: ${_param:opendaylight_rest_port}
+ params: ${_param:haproxy_odl_api_check_params}
+ - name: ${_param:opendaylight_server_node02_hostname}
+ host: ${_param:opendaylight_server_node02_address}
+ port: ${_param:opendaylight_rest_port}
+ params: ${_param:haproxy_odl_api_check_params}
+ - name: ${_param:opendaylight_server_node03_hostname}
+ host: ${_param:opendaylight_server_node03_address}
+ port: ${_param:opendaylight_rest_port}
+ params: ${_param:haproxy_odl_api_check_params}
diff --git a/mcp/metadata/service/opendaylight/server/single.yml b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/single.yml
index 3a762b39a..36736be0a 100644
--- a/mcp/metadata/service/opendaylight/server/single.yml
+++ b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/server/single.yml
@@ -10,11 +10,9 @@ applications:
- opendaylight
parameters:
_param:
- version: oxygen
+ opendaylight_version: neon
opendaylight:
server:
enabled: 'True'
- version: ${_param:version}
- repo: 'odl-team/${_param:version}'
- logging:
- engine: syslog
+ version: ${_param:opendaylight_version}
+ repo: 'odl-team/${_param:opendaylight_version}'
diff --git a/mcp/metadata/service/opendaylight/support.yml b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/support.yml
index bbb204315..bbb204315 100644
--- a/mcp/metadata/service/opendaylight/support.yml
+++ b/mcp/salt-formulas/salt-formula-opendaylight/metadata/service/support.yml
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/config.sls b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/config.sls
new file mode 100644
index 000000000..3f1f81348
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/config.sls
@@ -0,0 +1,94 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc. and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{% from "opendaylight/map.jinja" import server with context %}
+
+/opt/opendaylight/etc/jetty.xml:
+ file.managed:
+ - source: salt://opendaylight/files/jetty.xml
+ - template: jinja
+ - user: odl
+ - group: odl
+
+/opt/opendaylight/bin/setenv:
+ file.managed:
+ - source: salt://opendaylight/files/setenv.shell
+ - template: jinja
+ - mode: 0755
+ - user: odl
+ - group: odl
+
+{%- set features = [] %}
+{%- for f in server.karaf_features.itervalues() %}
+ {%- do features.extend(f) %}
+{%- endfor %}
+
+/opt/opendaylight/etc/org.apache.karaf.features.cfg:
+ ini.options_present:
+ - sections:
+ featuresBoot: {{ features|join(',') }}
+
+/opt/opendaylight/etc/org.ops4j.pax.web.cfg:
+ ini.options_present:
+ - sections:
+ org.ops4j.pax.web.listening.addresses: {{ server.odl_bind_ip }}
+ org.osgi.service.http.port: {{ server.odl_rest_port }}
+
+{%- if not server.pax_logging_enabled|d(false) %}
+ {%-
+ set pax_logging_opts = [
+ 'log4j2.rootLogger.appenderRef.PaxOsgi.ref',
+ 'log4j2.appender.osgi.type',
+ 'log4j2.appender.osgi.name',
+ 'log4j2.appender.osgi.filter'
+ ]
+ %}
+
+ {%- for opt in pax_logging_opts %}
+pax.logging.cfg.{{ opt }}:
+ file.comment:
+ - name: /opt/opendaylight/etc/org.ops4j.pax.logging.cfg
+ - regex: ^{{ opt }}\s*=
+ - backup: false
+ {%- endfor %}
+{%- endif %}
+
+/opt/opendaylight/etc/org.opendaylight.openflowplugin.cfg:
+ file.managed:
+ - user: odl
+ - group: odl
+ ini.options_present:
+ - sections:
+ is-statistics-polling-on: {{ server.stats_polling_enabled }}
+
+{%- if server.get('router_enabled', false) %}
+/opt/opendaylight/etc/custom.properties:
+ ini.options_present:
+ - sections:
+ ovsdb.l3.fwd.enabled: 'yes'
+ ovsdb.of.version: 1.3
+{%- endif %}
+
+{%- if server.netvirt_natservice is defined %}
+/opt/opendaylight/etc/opendaylight/datastore/initial/config/netvirt-natservice-config.xml:
+ file.managed:
+ - source: salt://opendaylight/files/netvirt-natservice-config.xml
+ - template: jinja
+ - makedirs: true
+ - user: odl
+ - group: odl
+{%- endif %}
+
+{%- if server.dhcp.enabled %}
+/opt/opendaylight/etc/opendaylight/datastore/initial/config/netvirt-dhcpservice-config.xml:
+ file.managed:
+ - source: salt://opendaylight/files/netvirt-dhcpservice-config.xml
+ - template: jinja
+ - makedirs: true
+ - user: odl
+ - group: odl
+{%- endif %}
diff --git a/mcp/salt-formulas/opendaylight/files/jetty.xml b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/jetty.xml
index de2ac677d..de2ac677d 100644
--- a/mcp/salt-formulas/opendaylight/files/jetty.xml
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/jetty.xml
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-dhcpservice-config.xml b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-dhcpservice-config.xml
new file mode 100644
index 000000000..336957fd1
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-dhcpservice-config.xml
@@ -0,0 +1,23 @@
+{%- from "opendaylight/map.jinja" import server with context -%}
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+<dhcpservice-config xmlns="urn:opendaylight:params:xml:ns:yang:dhcpservice:config">
+ <controller-dhcp-enabled>{{ server.dhcp.enabled }}</controller-dhcp-enabled>
+ <dhcp-dynamic-allocation-pool-enabled>{{ server.dhcp.dynamic_allocation_pool_enabled }}</dhcp-dynamic-allocation-pool-enabled>
+</dhcpservice-config>
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-natservice-config.xml b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-natservice-config.xml
new file mode 100644
index 000000000..35994e51d
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/netvirt-natservice-config.xml
@@ -0,0 +1,23 @@
+{%- from "opendaylight/map.jinja" import server with context -%}
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements. See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership. The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+<natservice-config xmlns="urn:opendaylight:netvirt:natservice:config">
+ <nat-mode>{{ server.netvirt_natservice.nat_mode|d('controller') }}</nat-mode>
+ <snat-punt-timeout>{{ server.netvirt_natservice.snat_punt_timeout|d('5') }}</snat-punt-timeout>
+</natservice-config>
diff --git a/mcp/salt-formulas/opendaylight/files/setenv.shell b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/setenv.shell
index 2921ade14..2921ade14 100644
--- a/mcp/salt-formulas/opendaylight/files/setenv.shell
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/files/setenv.shell
diff --git a/mcp/salt-formulas/opendaylight/init.sls b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/init.sls
index 132f69b33..132f69b33 100644
--- a/mcp/salt-formulas/opendaylight/init.sls
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/init.sls
diff --git a/mcp/salt-formulas/opendaylight/map.jinja b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/map.jinja
index 38575111e..44f2685d0 100644
--- a/mcp/salt-formulas/opendaylight/map.jinja
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/map.jinja
@@ -1,28 +1,33 @@
##############################################################################
-# Copyright (c) 2017 Mirantis Inc. and others.
+# Copyright (c) 2019 Mirantis Inc. and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
+{%- set pkgs = ['opendaylight'] %}
+{%- do pkgs.append('opendaylight-leveldbjni') if grains['cpuarch'] == 'aarch64' %}
+
{% set server = salt['grains.filter_by']({
'Debian': {
+ 'pkgs': pkgs,
'karaf_features': {'default': ['standard', 'wrap', 'ssh']},
'odl_rest_port': '8282',
'odl_bind_ip': '0.0.0.0',
'repo': 'odl-team/oxygen',
'log_levels': {},
- 'enable_ha': false,
- 'ha_node_ips': [],
- 'ha_node_index': 0,
+ 'cluster_enabled': false,
+ 'seed_nodes_list': [],
+ 'stats_polling_enabled': false,
+ 'dhcp': {
+ 'enabled': false,
+ 'dynamic_allocation_pool_enabled': false,
+ },
'security_group_mode': 'stateful',
'vpp_routing_node': '',
'java_extra_opts': '-Djava.net.preferIPv4Stack=true -XX:+UseG1GC',
'java_min_mem': '1g',
'java_max_mem': '2g',
- },
- 'RedHat': {
- 'repo': 'opendaylight-6-testing'
- },
+ }
}, merge=salt['pillar.get']('opendaylight:server')) %}
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/repo.sls b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/repo.sls
new file mode 100644
index 000000000..1f322c53f
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/repo.sls
@@ -0,0 +1,56 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc. and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{% from "opendaylight/map.jinja" import server with context %}
+
+# NOTE: starting with Salt 2018.3, key_text might be used instead
+opendaylight_repo_key:
+ # Launchpad PPA for ODL Team
+ # pub 4096R/44C05248 2017-01-26
+ cmd.run:
+ - name: |
+ cat <<-EOF | sudo apt-key add -
+ -----BEGIN PGP PUBLIC KEY BLOCK-----
+ Version: GnuPG v1
+
+ mQINBFiKaBEBEADpCtIj8utf/bUfN6iQ+sxGiOPLnXVYoYyKifHDazD4o1Jevfiu
+ EpcDIx9EdnhrCpvKTU+jaw2B7K3pkdqbjbzjZY+2CDENSQXfRHuuI/nWDaYI0stx
+ Tf/evip3cxdutnZNAklzkxppHP+4UZm9HAd7uZsEyff4H9DIsHzZIA4Z++Hx2+lt
+ w9K0iCKh2k6Pon/VVo8Bir3JuKIIdLRAuHmyniYlHDswQnu+1nQHE0F/oboD0Q9Z
+ hOvXAr1L7LWu0hkLV7BqmeI0SPcRA3b5MU3dfaTK8MaPAo8anQTpCyYUnoIBqX8h
+ y324T/dvpFKq2/X3RL+wOSYTA8TLgyhH0fhdIKZg3G8m9kxuAHZYHIHnDtvgJ5yd
+ 72tNY+w8UIX8U2ark/WdkAMZr3O0AuTDlvHcasxO5+puAu8jh0EgtqItqrvKwiF7
+ dmlHVW41Rt+su2fmsUkk4Z0IhWrn3PdrSWAcH2eL6vjuqx6CccpjsjyiSQ90dUox
+ EoMpY+viX59aF0kU4BLt76mQO6YZtCpicLxFGCu97v1mNn+FWjhBOIF08pVsbNlq
+ oMl2j0N8NKZxJvkkmsA/i//ch5FsjzvUy3xajlSzq9ruWS4SlWq2Vzdx/acvF7Oa
+ ABA11wIjzLc9vmhzQNiRa53fJQwi+w/Or9LtH2msKCbcPVHoZ5OT4t6S8QARAQAB
+ tBpMYXVuY2hwYWQgUFBBIGZvciBPREwgVGVhbYkCOAQTAQIAIgUCWIpoEQIbAwYL
+ CQgHAwIGFQgCCQoLBBYCAwECHgECF4AACgkQe4qho0TAUkgAmg//XY/RqU4WcT+p
+ 13oDc3+Dp4aL+rwaNz0o56i0z0cYPxd8GPicCuS8d/di07GnQiBcZ5DZgegnnaYm
+ OUF+phxk4q+jYO/t2GHQlYSf/QyUv7OimidLOHN1FiahmcGobliwih70o6ZcMT84
+ ggSu8jBzA/HLFBIkgStKD/staR5zJ2HfK298yVhiffyrPA+I3nPe7pvTaGa2e8AP
+ BYs5zB5n27upSZIokXFvqlmS4HEKDmPcY061wgmg1cNY1Y+mIuGjxY1Igbi6kAe0
+ yaLN2AN4c2ImhpwOcuazKTe/q2ZhoPTpYvuzmogwau8LBjRBhVS6fkTpSBPEkcwn
+ f/QYmmVLygmpMDHuHapyH8iaUoksq7gd64iBRDJQN7giQSjkTVvcGBqoKG8lbUMV
+ MDT4FGuYYsObWUg7kmHlNq9nIVlAxmxv8ZTg9+8xy3f53aId/51m+gW9LGRAT94T
+ ZIWrF9cBvsPWoHgHkV1At/fPprOvNXqeQiJ7UzC3ikDNCu2AjPEbA4sb019RNgtj
+ jUI6g6RZdzbeKVpptxILCtT3yKbfKj8AfrfaRzS0yMhVudgLolIUA4S6g46p0Cgy
+ gITO49wxxBu6UAOsAG3psDRlsZmmrT4AH09Yt2RzmY0FBWValqpoPagheQqeU+2W
+ FKnV9Lw1SKMtWZbYMvIlB0rwts3k9lE=
+ =xkZ9
+ -----END PGP PUBLIC KEY BLOCK-----
+ EOF
+ - unless: apt-key list | grep -qF '4096R/44C05248 2017-01-26'
+
+opendaylight_repo:
+ pkgrepo.managed:
+ # NOTE(armband): PPA handling behind proxy broken, define it explicitly
+ # https://github.com/saltstack/salt/pull/45224
+ # - ppa: {{ server.repo }}
+ - human_name: opendaylight-ppa
+ - name: deb http://ppa.launchpad.net/{{ server.repo }}/ubuntu {{ grains.oscodename }} main
+ - file: /etc/apt/sources.list.d/odl-team-{{ server.version }}.list
diff --git a/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/server.sls b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/server.sls
new file mode 100644
index 000000000..6e31ffb3f
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-opendaylight/opendaylight/server.sls
@@ -0,0 +1,51 @@
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc. and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{% from "opendaylight/map.jinja" import server with context %}
+
+{%- if server.enabled %}
+
+include:
+ - opendaylight.config
+ - opendaylight.repo
+
+{%- if grains['saltversioninfo'] < [2017, 7] %}
+service.mask:
+ module.run:
+ - m_name: opendaylight
+{%- else %}
+opendaylight_service_mask:
+ service.masked:
+ - name: opendaylight
+{%- endif %}
+ - prereq:
+ - pkg: opendaylight
+
+{%- if server.cluster_enabled %}
+configure_cluster:
+ cmd.run:
+ - name: /opt/opendaylight/bin/configure-cluster-ipdetect.sh {{ server.seed_nodes_list }}
+ - require:
+ - pkg: opendaylight
+{%- endif %}
+
+opendaylight:
+ pkg.installed:
+ - names: {{ server.pkgs }}
+ - require:
+ - sls: opendaylight.repo
+ - require_in:
+ - sls: opendaylight.config
+ service.running:
+ - enable: true
+{%- if grains['saltversioninfo'] >= [2017, 7] %}
+ - unmask: true
+{%- endif %}
+ - watch:
+ - sls: opendaylight.config
+
+{%- endif %}
diff --git a/mcp/salt-formulas/salt-formula-oslo-templates b/mcp/salt-formulas/salt-formula-oslo-templates
new file mode 160000
+Subproject 3534ca9dd05031cbde84cc878122183e3b6daec
diff --git a/mcp/salt-formulas/salt-formula-panko b/mcp/salt-formulas/salt-formula-panko
new file mode 160000
+Subproject 31c752a6eedf41bf4310f7a620acd87b61d0f09
diff --git a/mcp/salt-formulas/salt-formula-quagga/metadata/service/server/single.yml b/mcp/salt-formulas/salt-formula-quagga/metadata/service/server/single.yml
new file mode 100644
index 000000000..2eeca6760
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-quagga/metadata/service/server/single.yml
@@ -0,0 +1,12 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+parameters:
+ quagga:
+ server:
+ enabled: true
diff --git a/mcp/salt-formulas/salt-formula-quagga/quagga/init.sls b/mcp/salt-formulas/salt-formula-quagga/quagga/init.sls
new file mode 100644
index 000000000..810809d77
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-quagga/quagga/init.sls
@@ -0,0 +1,11 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+include:
+{%- if pillar.quagga.server is defined %}
+- quagga.server
+{%- endif %}
diff --git a/mcp/salt-formulas/salt-formula-quagga/quagga/map.jinja b/mcp/salt-formulas/salt-formula-quagga/quagga/map.jinja
new file mode 100644
index 000000000..a76c760d9
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-quagga/quagga/map.jinja
@@ -0,0 +1,21 @@
+{#-
+ Copyright (c) 2018 Intracom Telecom and others.
+ All rights reserved. This program and the accompanying materials
+ are made available under the terms of the Apache License, Version 2.0
+ which accompanies this distribution, and is available at
+ http://www.apache.org/licenses/LICENSE-2.0
+-#}
+{% set server = salt['grains.filter_by']({
+ 'Debian': {
+ 'pkgs': ['libglib2.0-0'],
+ 'quagga_package_url': 'https://wiki.opnfv.org/download/attachments/6827916/quagga-ubuntu.tar.gz',
+ 'quagga_package_checksum': 'https://wiki.opnfv.org/download/attachments/6827916/quagga-ubuntu.checksum',
+ 'install_cmd': 'dpkg -i'
+ },
+ 'RedHat': {
+ 'pkgs': ['glib2', 'glib2-devel'],
+ 'quagga_package_url': 'https://wiki.opnfv.org/download/attachments/6827916/quagga-4.tar.gz',
+ 'quagga_package_checksum': 'https://wiki.opnfv.org/download/attachments/6827916/quagga-4.checksum',
+ 'install_cmd': 'yum -y'
+ },
+}, merge=salt['pillar.get']('quagga:server')) %}
diff --git a/mcp/salt-formulas/salt-formula-quagga/quagga/server.sls b/mcp/salt-formulas/salt-formula-quagga/quagga/server.sls
new file mode 100644
index 000000000..3a9c32906
--- /dev/null
+++ b/mcp/salt-formulas/salt-formula-quagga/quagga/server.sls
@@ -0,0 +1,41 @@
+##############################################################################
+# Copyright (c) 2018 Intracom Telecom and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- from "quagga/map.jinja" import server with context %}
+{%- if server.enabled %}
+
+quagga_packages:
+ pkg.installed:
+ - names: {{ server.pkgs }}
+
+download_quagga:
+ file.managed:
+ - name: /var/cache/quagga.tar.gz
+ - source: {{ server.quagga_package_url }}
+ - source_hash: {{ server.quagga_package_checksum }}
+ - user: root
+ - group: root
+
+unarchive_quagga:
+ archive.extracted:
+ - source: /var/cache/quagga.tar.gz
+ - name: /tmp
+ - user: root
+ - group: root
+
+install_quagga_packages:
+ cmd.run:
+ - name: {{ server.install_cmd }} $(ls |grep -vE 'debuginfo|devel|contrib')
+ - cwd: /tmp/quagga
+ - runas: root
+
+start_zebra_rpc_daemon:
+ cmd.run:
+ - name: /opt/quagga/etc/init.d/zrpcd start
+ - runas: root
+
+{%- endif %}
diff --git a/mcp/salt-formulas/salt-formula-rabbitmq b/mcp/salt-formulas/salt-formula-rabbitmq
new file mode 160000
+Subproject a4d0ca57eec99edd1a6df031a6bd8eabec9a7bd
diff --git a/mcp/salt-formulas/salt-formula-redis b/mcp/salt-formulas/salt-formula-redis
new file mode 160000
+Subproject e6426b6a271b87e90a1c31ddcca36e5449383c4
diff --git a/mcp/metadata/service/tacker/server/single.yml b/mcp/salt-formulas/salt-formula-tacker/metadata/service/server/single.yml
index 597d9f8fb..231e978cc 100644
--- a/mcp/metadata/service/tacker/server/single.yml
+++ b/mcp/salt-formulas/salt-formula-tacker/metadata/service/server/single.yml
@@ -15,7 +15,8 @@ parameters:
server:
bind_host: ${_param:cluster_local_address}
enabled: true
- branch: stable/queens
+ git:
+ branch: stable/${_param:openstack_version}
database:
engine: mysql
host: ${_param:single_address}
diff --git a/mcp/salt-formulas/tacker/files/tacker.conf b/mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.conf
index 7adfd3139..7adfd3139 100644
--- a/mcp/salt-formulas/tacker/files/tacker.conf
+++ b/mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.conf
diff --git a/mcp/salt-formulas/tacker/files/tacker.systemd b/mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.systemd
index 12ff5cdde..12ff5cdde 100644
--- a/mcp/salt-formulas/tacker/files/tacker.systemd
+++ b/mcp/salt-formulas/salt-formula-tacker/tacker/files/tacker.systemd
diff --git a/mcp/salt-formulas/tacker/init.sls b/mcp/salt-formulas/salt-formula-tacker/tacker/init.sls
index 35291b8f9..35291b8f9 100644
--- a/mcp/salt-formulas/tacker/init.sls
+++ b/mcp/salt-formulas/salt-formula-tacker/tacker/init.sls
diff --git a/mcp/salt-formulas/tacker/map.jinja b/mcp/salt-formulas/salt-formula-tacker/tacker/map.jinja
index 9a35c8e52..9a35c8e52 100644
--- a/mcp/salt-formulas/tacker/map.jinja
+++ b/mcp/salt-formulas/salt-formula-tacker/tacker/map.jinja
diff --git a/mcp/salt-formulas/tacker/server.sls b/mcp/salt-formulas/salt-formula-tacker/tacker/server.sls
index eb3468a11..eb3468a11 100644
--- a/mcp/salt-formulas/tacker/server.sls
+++ b/mcp/salt-formulas/salt-formula-tacker/tacker/server.sls
diff --git a/mcp/scripts/.gitignore b/mcp/scripts/.gitignore
index a7f658e4e..d89d60722 100644
--- a/mcp/scripts/.gitignore
+++ b/mcp/scripts/.gitignore
@@ -1,3 +1,5 @@
mcp.rsa*
-user-data.*.sh
+user-data.sh
xdf_data.sh
+docker-compose/files/entrypoint_maas.sh
+docker-compose/files/hosts
diff --git a/mcp/scripts/docker-compose/docker-compose.yaml.j2 b/mcp/scripts/docker-compose/docker-compose.yaml.j2
new file mode 100644
index 000000000..04dc93f98
--- /dev/null
+++ b/mcp/scripts/docker-compose/docker-compose.yaml.j2
@@ -0,0 +1,102 @@
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- import 'net_macros.j2' as ma with context %}
+{#- conf.MCPCONTROL_NET & co are mandatory, defaults are set via globals.sh #}
+{%- set net_mcpcontrol = [conf.MCPCONTROL_NET, conf.MCPCONTROL_PREFIX] | join("/") %}
+version: '2'
+services:
+ opnfv-fuel-salt-master:
+ container_name: "fuel"
+ image: "opnfv/fuel:saltmaster-reclass-{{ conf.MCP_DOCKER_TAG }}"
+ restart: always
+ networks:
+ mcpcontrol:
+ ipv4_address: {{ conf.SALT_MASTER }}
+ pxebr:
+ ipv4_address: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_config_pxe_admin_address') +1) }}
+ mgmt:
+ ipv4_address: {{ nm.net_mgmt | ipnet_hostaddr(nm.start_ip[nm.net_mgmt] + nm.net_mgmt_hosts.index('opnfv_infra_config_address') +1) }}
+ volumes:
+ - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro
+ - {{ conf.MCP_REPO_ROOT_PATH }}:/root/fuel
+ - {{ conf.MCP_REPO_ROOT_PATH }}/mcp/scripts/docker-compose/files/entrypoint.sh:/entrypoint.sh
+ - {{ conf.MCP_STORAGE_DIR }}/pod_config.yml:/root/pod_config.yml
+ - {{ conf.MCP_STORAGE_DIR }}/nodes:/srv/salt/reclass/nodes
+ - {{ conf.MCP_STORAGE_DIR }}/pki:/etc/pki
+ - {{ conf.MCP_STORAGE_DIR }}/salt:/etc/salt
+ - {{ conf.MCP_STORAGE_DIR }}/hosts:/etc/hosts
+{%- if conf.MCP_VCP or '-vcp-' in conf.MCP_DEPLOY_SCENARIO %}
+ - {{ conf.MCP_STORAGE_DIR }}/base_image_opnfv_fuel_vcp.img:/srv/salt/env/prd/salt/files/control/images/base_image_opnfv_fuel_vcp.img
+{%- endif %}
+ hostname: cfg01
+ domainname: {{ conf.cluster.domain }}
+ privileged: true
+ dns:
+{%- for server in nm.dns_public %}
+ - {{ server }}
+{%- endfor %}
+{%- if nm.cluster.has_baremetal_nodes %}
+ opnfv-fuel-maas:
+ container_name: "maas"
+ image: "opnfv/fuel:saltminion-maas-{{ conf.MCP_DOCKER_TAG }}"
+ restart: always
+ networks:
+ mcpcontrol:
+ ipv4_address: {{ conf.MAAS_IP }}
+ pxebr:
+ ipv4_address: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_maas_node01_deploy_address') +1) }}
+ mgmt:
+ ipv4_address: {{ nm.net_mgmt | ipnet_hostaddr(nm.start_ip[nm.net_mgmt] + nm.net_mgmt_hosts.index('opnfv_infra_maas_node01_address') +1) }}
+ volumes:
+ - /lib/modules:/lib/modules:ro
+ - /sys/fs/cgroup:/sys/fs/cgroup:ro
+ - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket:ro
+ - {{ conf.MCP_REPO_ROOT_PATH }}/mcp/scripts/docker-compose/files/entrypoint_maas.sh:/entrypoint.sh:ro
+ - {{ conf.MCP_STORAGE_DIR }}/hosts:/etc/hosts:ro
+ - {{ conf.MCP_STORAGE_DIR }}/mas01/etc/iptables:/etc/iptables
+ - {{ conf.MCP_STORAGE_DIR }}/mas01/var/lib/postgresql:/var/lib/postgresql
+ - {{ conf.MCP_STORAGE_DIR }}/mas01/var/lib/maas:/var/lib/maas
+ - {{ conf.MCP_STORAGE_DIR }}/mas01/var/spool/maas-proxy:/var/spool/maas-proxy
+ - {{ conf.MCP_STORAGE_DIR }}/mas01/etc/maas:/etc/maas
+ hostname: mas01
+ domainname: {{ conf.cluster.domain }}
+ privileged: true
+ dns:
+{%- for server in nm.dns_public %}
+ - {{ server }}
+{%- endfor %}
+ ports:
+ - 5240:5240
+{%- endif %}
+networks:
+ mcpcontrol:
+ driver: bridge
+ driver_opts:
+ com.docker.network.driver.mtu: 9000
+ ipam:
+ config:
+ - subnet: {{ net_mcpcontrol }}
+ pxebr:
+ driver: macvlan
+ driver_opts:
+ parent: veth_mcp1 # Always untagged
+ ipam:
+ config:
+ - subnet: {{ nm.net_admin }}
+ mgmt:
+ driver: macvlan
+ driver_opts:
+{%- if conf.idf.fuel.jumphost.get('trunks', {}).get('mgmt', False) %}
+ parent: {{ ma.interface_str('veth_mcp3', nm.vlan_mgmt) }}
+{%- else %}
+ parent: veth_mcp3 # Untagged by default
+{%- endif %}
+ ipam:
+ config:
+ - subnet: {{ nm.net_mgmt }}
diff --git a/mcp/scripts/docker-compose/files/entrypoint.sh b/mcp/scripts/docker-compose/files/entrypoint.sh
new file mode 100755
index 000000000..baf1f65d2
--- /dev/null
+++ b/mcp/scripts/docker-compose/files/entrypoint.sh
@@ -0,0 +1,104 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+mkdir -p /etc/salt/{master.d,minion.d,proxy.d}
+
+if [ ! -z "$SALT_EXT_PILLAR" ]; then
+ cp -avr "/tmp/${SALT_EXT_PILLAR}.conf" /etc/salt/master.d/
+fi
+
+if [ ! -f /home/ubuntu/.ssh/authorized_keys ]; then
+ install -D -o ubuntu /root/fuel/mcp/scripts/mcp.rsa.pub \
+ /home/ubuntu/.ssh/authorized_keys
+ mkdir -p /root/.ssh/
+ echo 'User ubuntu' > /root/.ssh/config
+ echo 'IdentityFile /root/fuel/mcp/scripts/mcp.rsa' >> /root/.ssh/config
+fi
+
+# salt state does not properly configure file_roots in master.conf, hard set it
+cp -a /root/fuel/mcp/scripts/docker-compose/files/opnfv_master.conf \
+ /etc/salt/master.d/opnfv_master.conf
+echo -e 'master: localhost\nmine_interval: 15' > /etc/salt/minion.d/opnfv_slave.conf
+
+# NOTE: Most Salt and/or reclass tools have issues traversing Docker mounts
+# or detecting them as directories inside the container.
+# For now, let's do a lot of copy operations to bypass this.
+# Later, we will inject the OPNFV patched reclass model during image build.
+rm -rf /srv/salt/reclass/classes/*
+cp -ar /root/fuel/mcp/reclass/classes/* /srv/salt/reclass/classes
+cp -ar /root/fuel/mcp/reclass/nodes/* /srv/salt/reclass/nodes
+# Sensitive data should stay out of /root/fuel, which is exposed via Jenkins WS
+cp -a /root/pod_config.yml \
+ /srv/salt/reclass/classes/cluster/all-mcp-arch-common/opnfv/pod_config.yml
+
+# OPNFV formulas
+prefix=/srv/salt/formula/salt-formulas
+rm -f /root/fuel/mcp/salt-formulas/*/.git
+cp -ar /root/fuel/mcp/salt-formulas/* ${prefix}/
+for formula in 'armband' 'opendaylight' 'tacker' 'quagga'; do
+ ln -sf /root/fuel/mcp/salt-formulas/salt-formula-${formula}/* \
+ /srv/salt/env/prd/
+done
+
+# Re-create classes.service links that we destroyed above
+for formula in ${prefix}/*; do
+ if [ -e "${formula}/metadata/service" ] && [[ ! $formula =~ \. ]]; then
+ ln -sf "${formula}/metadata/service" \
+ "/srv/salt/reclass/classes/service/${formula#${prefix}/salt-formula-}"
+ fi
+done
+
+# Create links for salt-formula-* packages to mimic git-style salt-formulas
+for artifact in /usr/share/salt-formulas/env/_*/*; do
+ ln -sf "${artifact}" "/srv/salt/env/prd/${artifact#/usr/share/salt-formulas/env/}"
+done
+for artifact in /usr/share/salt-formulas/env/*; do
+ if [[ ! ${artifact} =~ ^_ ]]; then
+ ln -sf "${artifact}" "/srv/salt/env/prd/$(basename ${artifact})"
+ fi
+done
+for formula in /usr/share/salt-formulas/reclass/service/*; do
+ ln -sf "${formula}" "/srv/salt/reclass/classes/service/$(basename ${formula})"
+done
+
+# Temporary link rocky configs to stein
+for f in /srv/salt/env/prd/*/files/rocky; do
+ if [ ! -d "$f/../stein" ]; then
+ ln -sf "$f" "$f/../stein"
+ fi
+done
+
+# Tini init system resembles upstart very much, but needs a little adjustment
+sed -i -e "s|return 'start/running' in |return 'is running' in |" \
+ -e "s|ret = _default_runlevel|return _default_runlevel|" \
+ /usr/lib/python2.7/dist-packages/salt/modules/upstart.py
+
+# Workaround for: https://github.com/salt-formulas/reclass/issues/77
+sed -i -e 's|\(ignore_overwritten_missing_references\)defaults.|\1|' \
+ /usr/local/lib/python2.7/dist-packages/reclass/settings.py
+
+# Remove broken symlinks in /srv/salt, silences recurring warnings
+find -L /srv/salt /srv/salt/env/prd/_* -maxdepth 1 -type l -delete
+
+# Fix up any permissions after above file shuffling
+chown root:root -R /srv/salt
+
+# Docker-ce 19.x+ workaround for broken domainname setup
+# shellcheck source=/dev/null
+source /root/fuel/mcp/scripts/xdf_data.sh
+hostname -b "cfg01.${CLUSTER_DOMAIN}"
+
+service ssh start
+service salt-minion start
+
+if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
+ exec /usr/bin/salt-master --log-file-level=quiet --log-level=info "$@"
+else
+ exec "$@"
+fi
diff --git a/mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2 b/mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2
new file mode 100644
index 000000000..d92eeb017
--- /dev/null
+++ b/mcp/scripts/docker-compose/files/entrypoint_maas.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2019 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+{%- import 'net_map.j2' as nm with context %}
+{%- set pxebr_addr = nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_maas_node01_deploy_address') +1) %}
+if [ ! -e /var/lib/postgresql/*/main ]; then
+ cp -ar /var/lib/opnfv/{postgresql,maas} /var/lib/
+ cp -ar /var/lib/opnfv/etc/maas /etc/
+fi
+chown -R maas:maas /var/lib/maas /etc/maas
+chown -R postgres:postgres /var/lib/postgresql
+chown -R proxy:proxy /var/spool/maas-proxy
+
+if [ ! -f /etc/sysctl.d/99-salt.conf ]; then
+ echo 'net.ipv4.ip_forward = 1' > /etc/sysctl.d/99-salt.conf
+fi
+
+cat <<-EOF | tee /etc/resolv.conf
+{%- for server in nm.dns_public %}
+nameserver {{ server }}
+{%- endfor %}
+EOF
+
+cat <<-EOF | tee /etc/salt/minion.d/opnfv.conf
+id: mas01.{{ conf.cluster.domain }}
+master: {{ conf.SALT_MASTER }}
+grains:
+ virtual_subtype: Docker_
+EOF
+rm -f /etc/salt/minion.d/99-master-address.conf
+
+# Work around MaaS issues with PXE/admin using jumbo frames
+MAAS_MTU_SERVICE="/etc/systemd/system/maas-mtu.service"
+cat <<-EOF | tee "${MAAS_MTU_SERVICE}"
+[Unit]
+Requires=network-online.target
+After=network-online.target
+[Service]
+ExecStart=/bin/sh -ec '\
+ /sbin/ifconfig $(/sbin/ip addr | /bin/grep -Po "{{ pxebr_addr }}.* \K(.*)") mtu 1500'
+EOF
+ln -sf "${MAAS_MTU_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+
+# Configure mass-region-controller if not already done previously
+[ ! -e /var/lib/maas/secret ] || exit 0
+MAAS_FIXUP_SERVICE="/etc/systemd/system/maas-fixup.service"
+cat <<-EOF | tee "${MAAS_FIXUP_SERVICE}"
+[Unit]
+After=postgresql.service
+[Service]
+ExecStart=/bin/sh -ec '\
+ echo "debconf debconf/frontend select Noninteractive" | debconf-set-selections && \
+ /var/lib/dpkg/info/maas-region-controller.config configure && \
+ /var/lib/dpkg/info/maas-region-controller.postinst configure'
+EOF
+ln -sf "${MAAS_FIXUP_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+rm "/usr/sbin/policy-rc.d"
diff --git a/mcp/scripts/docker-compose/files/hosts.j2 b/mcp/scripts/docker-compose/files/hosts.j2
new file mode 100644
index 000000000..b42c5a088
--- /dev/null
+++ b/mcp/scripts/docker-compose/files/hosts.j2
@@ -0,0 +1,7 @@
+{{ conf.SALT_MASTER }} cfg01.{{ conf.cluster.domain }}
+127.0.0.1 localhost
+::1 localhost ip6-localhost ip6-loopback
+fe00::0 ip6-localnet
+ff00::0 ip6-mcastprefix
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/mcp/scripts/docker-compose/files/opnfv_master.conf b/mcp/scripts/docker-compose/files/opnfv_master.conf
new file mode 100644
index 000000000..8e6d1af28
--- /dev/null
+++ b/mcp/scripts/docker-compose/files/opnfv_master.conf
@@ -0,0 +1,21 @@
+worker_threads: 20
+timeout: 15
+gather_job_timeout: 30
+max_open_files: 15000
+
+file_roots:
+ base:
+ - /srv/salt/env/prd
+ prd:
+ - /srv/salt/env/prd
+ dev:
+ - /srv/salt/env/dev
+ - /srv/salt/env/prd
+
+user: root
+file_recv: True
+
+open_mode: True
+
+peer:
+ .*: ['x509.sign_remote_certificate']
diff --git a/mcp/scripts/globals.sh b/mcp/scripts/globals.sh
index 54f015cf6..e5d1decc6 100644
--- a/mcp/scripts/globals.sh
+++ b/mcp/scripts/globals.sh
@@ -1,6 +1,6 @@
#!/bin/bash -e
##############################################################################
-# Copyright (c) 2017 Ericsson AB, Mirantis Inc., Enea AB and others.
+# Copyright (c) 2018 Ericsson AB, Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
@@ -12,6 +12,8 @@ export CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
export SSH_KEY=${SSH_KEY:-"/var/lib/opnfv/mcp.rsa"}
export SALT_MASTER=${INSTALLER_IP:-10.20.0.2}
export SALT_MASTER_USER=${SALT_MASTER_USER:-ubuntu}
+export VIRSH=${VIRSH:-'virsh --connect qemu:///system'}
+export MCP_KERNEL_VER=${MCP_KERNEL_VER:-5.0.0-37}
# Derived from INSTALLER_IP
export MCPCONTROL_NET=${MCPCONTROL_NET:-${SALT_MASTER%.*}.0}
@@ -36,7 +38,7 @@ function notify() {
function notify_i() {
tput setaf "${2:-1}" || true
echo -en "${1:-"[WARN] Unsupported opt arg: $3\\n"}"
- tput sgr0
+ tput sgr0 || true
}
# same as `notify` + extra '\n' before and after;
diff --git a/mcp/scripts/lib.sh b/mcp/scripts/lib.sh
index ce5db251f..6455a65c0 100644
--- a/mcp/scripts/lib.sh
+++ b/mcp/scripts/lib.sh
@@ -1,5 +1,5 @@
#!/bin/bash -e
-# shellcheck disable=SC2155,SC1001,SC2015,SC2128
+# shellcheck disable=SC2155,SC2015
##############################################################################
# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
# All rights reserved. This program and the accompanying materials
@@ -8,501 +8,9 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
#
-# Library of shell functions
+# Library of common shell functions used by build/deploy scripts, states etc.
#
-function generate_ssh_key {
- local mcp_ssh_key=$(basename "${SSH_KEY}")
- local user=${USER}
- if [ -n "${SUDO_USER}" ] && [ "${SUDO_USER}" != 'root' ]; then
- user=${SUDO_USER}
- fi
-
- if [ -f "${SSH_KEY}" ]; then
- cp "${SSH_KEY}" .
- ssh-keygen -f "${mcp_ssh_key}" -y > "${mcp_ssh_key}.pub"
- fi
-
- [ -f "${mcp_ssh_key}" ] || ssh-keygen -f "${mcp_ssh_key}" -N ''
- sudo install -D -o "${user}" -m 0600 "${mcp_ssh_key}" "${SSH_KEY}"
-}
-
-function get_base_image {
- local base_image=$1
- local image_dir=$2
-
- mkdir -p "${image_dir}"
- wget --progress=dot:giga -P "${image_dir}" -N "${base_image}"
-}
-
-function __kernel_modules {
- # Load mandatory kernel modules: loop, nbd
- local image_dir=$1
- test -e /dev/loop-control || sudo modprobe loop
- if sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8; then
- return 0
- fi
- if [ -e /dev/nbd0 ]; then return 0; fi # nbd might be inbuilt
- # CentOS (or RHEL family in general) do not provide 'nbd' out of the box
- echo "[WARN] 'nbd' kernel module cannot be loaded!"
- if [ ! -e /etc/redhat-release ]; then
- echo "[ERROR] Non-RHEL system detected, aborting!"
- echo "[ERROR] Try building 'nbd' manually or install it from a 3rd party."
- exit 1
- fi
-
- # Best-effort attempt at building a non-maintaned kernel module
- local __baseurl
- local __subdir
- local __uname_r=$(uname -r)
- local __uname_m=$(uname -m)
- if [ "${__uname_m}" = 'x86_64' ]; then
- __baseurl='http://vault.centos.org/centos'
- __subdir='Source/SPackages'
- __srpm="kernel-${__uname_r%.${__uname_m}}.src.rpm"
- else
- __baseurl='http://vault.centos.org/altarch'
- __subdir="Source/${__uname_m}/Source/SPackages"
- # NOTE: fmt varies across releases (e.g. kernel-alt-4.11.0-44.el7a.src.rpm)
- __srpm="kernel-alt-${__uname_r%.${__uname_m}}.src.rpm"
- fi
-
- local __found='n'
- local __versions=$(curl -s "${__baseurl}/" | grep -Po 'href="\K7\.[\d\.]+')
- for ver in ${__versions}; do
- for comp in os updates; do
- local url="${__baseurl}/${ver}/${comp}/${__subdir}/${__srpm}"
- if wget "${url}" -O "${image_dir}/${__srpm}" > /dev/null 2>&1; then
- __found='y'; break 2
- fi
- done
- done
-
- if [ "${__found}" = 'n' ]; then
- echo "[ERROR] Can't find the linux kernel SRPM for: ${__uname_r}"
- echo "[ERROR] 'nbd' module cannot be built, aborting!"
- echo "[ERROR] Try 'yum upgrade' or building 'nbd' krn module manually ..."
- exit 1
- fi
-
- rpm -ivh "${image_dir}/${__srpm}" 2> /dev/null
- mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
- # shellcheck disable=SC2016
- echo '%_topdir %(echo $HOME)/rpmbuild' > ~/.rpmmacros
- (
- cd ~/rpmbuild/SPECS
- rpmbuild -bp --nodeps --target="${__uname_m}" kernel*.spec
- cd ~/rpmbuild/BUILD/"${__srpm%.src.rpm}"/linux-*
- sed -i 's/^.*\(CONFIG_BLK_DEV_NBD\).*$/\1=m/g' .config
- # http://centosfaq.org/centos/nbd-does-not-compile-for-3100-514262el7x86_64
- if grep -Rq 'REQ_TYPE_DRV_PRIV' drivers/block; then
- sed -i 's/REQ_TYPE_SPECIAL/REQ_TYPE_DRV_PRIV/g' drivers/block/nbd.c
- fi
- gunzip -c "/boot/symvers-${__uname_r}.gz" > Module.symvers
- make prepare modules_prepare
- make M=drivers/block -j
- modinfo drivers/block/nbd.ko
- sudo mkdir -p "/lib/modules/${__uname_r}/extra/"
- sudo cp drivers/block/nbd.ko "/lib/modules/${__uname_r}/extra/"
- )
- sudo depmod -a
- sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8
-}
-
-function mount_image {
- local image=$1
- local image_dir=$2
- OPNFV_MNT_DIR="${image_dir}/ubuntu"
-
- # Find free nbd, loop devices
- for dev in '/sys/class/block/nbd'*; do
- if [ "$(cat "${dev}/size")" = '0' ]; then
- OPNFV_NBD_DEV=/dev/$(basename "${dev}")
- break
- fi
- done
- OPNFV_LOOP_DEV=$(sudo losetup -f)
- OPNFV_MAP_DEV=/dev/mapper/$(basename "${OPNFV_NBD_DEV}")p1
- export OPNFV_MNT_DIR OPNFV_LOOP_DEV
- [ -n "${OPNFV_NBD_DEV}" ] && [ -n "${OPNFV_LOOP_DEV}" ] || exit 1
- qemu-img resize "${image_dir}/${image}" 3G
- sudo qemu-nbd --connect="${OPNFV_NBD_DEV}" --aio=native --cache=none \
- "${image_dir}/${image}"
- sudo kpartx -av "${OPNFV_NBD_DEV}"
- sleep 5 # /dev/nbdNp1 takes some time to come up
- # Hardcode partition index to 1, unlikely to change for Ubuntu UCA image
- if sudo growpart "${OPNFV_NBD_DEV}" 1; then
- sudo kpartx -u "${OPNFV_NBD_DEV}"
- sudo e2fsck -pf "${OPNFV_MAP_DEV}"
- sudo resize2fs "${OPNFV_MAP_DEV}"
- fi
- # grub-update does not like /dev/nbd*, so use a loop device to work around it
- sudo losetup "${OPNFV_LOOP_DEV}" "${OPNFV_MAP_DEV}"
- mkdir -p "${OPNFV_MNT_DIR}"
- sudo mount "${OPNFV_LOOP_DEV}" "${OPNFV_MNT_DIR}"
- sudo mount -t proc proc "${OPNFV_MNT_DIR}/proc"
- sudo mount -t sysfs sys "${OPNFV_MNT_DIR}/sys"
- sudo mount -o bind /dev "${OPNFV_MNT_DIR}/dev"
- sudo mkdir -p "${OPNFV_MNT_DIR}/run/resolvconf"
- sudo cp /etc/resolv.conf "${OPNFV_MNT_DIR}/run/resolvconf"
- echo "GRUB_DISABLE_OS_PROBER=true" | \
- sudo tee -a "${OPNFV_MNT_DIR}/etc/default/grub"
- sudo sed -i -e 's/^\(GRUB_TIMEOUT\)=.*$/\1=1/g' -e 's/^GRUB_HIDDEN.*$//g' \
- "${OPNFV_MNT_DIR}/etc/default/grub"
-}
-
-function apt_repos_pkgs_image {
- local apt_key_urls=(${1//,/ })
- local all_repos=(${2//,/ })
- local pkgs_i=(${3//,/ })
- local pkgs_r=(${4//,/ })
- [ -n "${OPNFV_MNT_DIR}" ] || exit 1
-
- # APT keys
- if [ "${#apt_key_urls[@]}" -gt 0 ]; then
- for apt_key in "${apt_key_urls[@]}"; do
- sudo chroot "${OPNFV_MNT_DIR}" /bin/bash -c \
- "wget -qO - '${apt_key}' | apt-key add -"
- done
- fi
- # Additional repositories
- for repo_line in "${all_repos[@]}"; do
- # <repo_name>|<repo prio>|deb|[arch=<arch>]|<repo url>|<dist>|<repo comp>
- local repo=(${repo_line//|/ })
- [ "${#repo[@]}" -gt 5 ] || continue
- # NOTE: Names and formatting are compatible with Salt linux.system.repo
- cat <<-EOF | sudo tee "${OPNFV_MNT_DIR}/etc/apt/preferences.d/${repo[0]}"
-
- Package: *
- Pin: release a=${repo[-2]}
- Pin-Priority: ${repo[1]}
-
- EOF
- echo "${repo[@]:2}" | sudo tee \
- "${OPNFV_MNT_DIR}/etc/apt/sources.list.d/${repo[0]}.list"
- done
- # Install packages
- if [ "${#pkgs_i[@]}" -gt 0 ]; then
- sudo DEBIAN_FRONTEND="noninteractive" \
- chroot "${OPNFV_MNT_DIR}" apt-get update
- sudo DEBIAN_FRONTEND="noninteractive" FLASH_KERNEL_SKIP="true" \
- chroot "${OPNFV_MNT_DIR}" apt-get install -y "${pkgs_i[@]}"
- fi
- # Remove packages
- if [ "${#pkgs_r[@]}" -gt 0 ]; then
- sudo DEBIAN_FRONTEND="noninteractive" FLASH_KERNEL_SKIP="true" \
- chroot "${OPNFV_MNT_DIR}" apt-get purge -y "${pkgs_r[@]}"
- fi
- # Disable cloud-init metadata service datasource
- sudo mkdir -p "${OPNFV_MNT_DIR}/etc/cloud/cloud.cfg.d"
- echo "datasource_list: [ NoCloud, None ]" | sudo tee \
- "${OPNFV_MNT_DIR}/etc/cloud/cloud.cfg.d/95_real_datasources.cfg"
-}
-
-function cleanup_mounts {
- # Remove any mounts, loop and/or nbd devs created while patching base image
- if [ -n "${OPNFV_MNT_DIR}" ] && [ -d "${OPNFV_MNT_DIR}" ]; then
- if [ -f "${OPNFV_MNT_DIR}/boot/grub/grub.cfg" ]; then
- # Grub thinks it's running from a live CD
- sudo sed -i -e 's/^\s*set root=.*$//g' -e 's/^\s*loopback.*$//g' \
- "${OPNFV_MNT_DIR}/boot/grub/grub.cfg"
- fi
- sudo rm -f "${OPNFV_MNT_DIR}/run/resolvconf/resolv.conf"
- sync
- if mountpoint -q "${OPNFV_MNT_DIR}"; then
- sudo umount -l "${OPNFV_MNT_DIR}" || true
- fi
- fi
- if [ -n "${OPNFV_LOOP_DEV}" ] && \
- sudo losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then
- sudo losetup -d "${OPNFV_LOOP_DEV}"
- fi
- if [ -n "${OPNFV_NBD_DEV}" ]; then
- sudo kpartx -d "${OPNFV_NBD_DEV}" || true
- sudo qemu-nbd -d "${OPNFV_NBD_DEV}" || true
- fi
-}
-
-function cleanup_uefi {
- # Clean up Ubuntu boot entry if cfg01, kvm nodes online from previous deploy
- local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
- ping -c 1 -w 1 "${SALT_MASTER}" || return 0
- [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval'
- ${cmd_str} "sudo salt -C 'kvm* or cmp*' cmd.run \
- \"which efibootmgr > /dev/null 2>&1 && \
- efibootmgr | grep -oP '(?<=Boot)[0-9]+(?=.*ubuntu)' | \
- xargs -I{} efibootmgr --delete-bootnum --bootnum {}; \
- rm -rf /boot/efi/*\"" || true
-}
-
-function cleanup_vms {
- # clean up existing nodes
- for node in $(virsh list --name | grep -P '\w{3}\d{2}'); do
- virsh destroy "${node}"
- done
- for node in $(virsh list --name --all | grep -P '\w{3}\d{2}'); do
- virsh domblklist "${node}" | awk '/^.da/ {print $2}' | \
- xargs --no-run-if-empty -I{} sudo rm -f {}
- virsh undefine "${node}" --remove-all-storage --nvram
- done
-}
-
-function prepare_vms {
- local base_image=$1; shift
- local image_dir=$1; shift
- local repos_pkgs_str=$1; shift # ^-sep list of repos, pkgs to install/rm
- local vnodes=("$@")
- local image=base_image_opnfv_fuel.img
- local vcp_image=${image%.*}_vcp.img
- local _o=${base_image/*\/}
- local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \
- md5sum | cut -c -8)
- local _tmp
-
- cleanup_uefi
- cleanup_vms
- get_base_image "${base_image}" "${image_dir}"
- IFS='^' read -r -a repos_pkgs <<< "${repos_pkgs_str}"
-
- echo "[INFO] Lookup cache / build patched base image for fingerprint: ${_h}"
- _tmp="${image%.*}.${_h}.img"
- if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${image}" ]; then
- echo "[INFO] Patched base image found"
- else
- rm -f "${image_dir}/${image%.*}"*
- if [[ ! "${repos_pkgs_str}" =~ ^\^+$ ]]; then
- echo "[INFO] Patching base image ..."
- cp "${image_dir}/${_o}" "${image_dir}/${_tmp}"
- __kernel_modules "${image_dir}"
- mount_image "${_tmp}" "${image_dir}"
- apt_repos_pkgs_image "${repos_pkgs[@]:0:4}"
- cleanup_mounts
- else
- echo "[INFO] No patching required, using vanilla base image"
- ln -sf "${image_dir}/${_o}" "${image_dir}/${_tmp}"
- fi
- ln -sf "${image_dir}/${_tmp}" "${image_dir}/${image}"
- fi
-
- # Create config ISO and resize OS disk image for each foundation node VM
- for node in "${vnodes[@]}"; do
- if [[ "${node}" =~ ^(cfg01|mas01) ]]; then
- user_data='user-data.mcp.sh'
- else
- user_data='user-data.admin.sh'
- fi
- ./create-config-drive.sh -k "$(basename "${SSH_KEY}").pub" \
- -u "${user_data}" -h "${node}" "${image_dir}/mcp_${node}.iso"
- cp "${image_dir}/${image}" "${image_dir}/mcp_${node}.qcow2"
- qemu-img resize "${image_dir}/mcp_${node}.qcow2" 100G
- # Prepare dedicated drive for cinder on cmp nodes
- if [[ "${node}" =~ ^(cmp) ]]; then
- qemu-img create "${image_dir}/mcp_${node}_storage.qcow2" 100G
- fi
- done
-
- # VCP VMs base image specific changes
- if [[ ! "${repos_pkgs_str}" =~ \^{3}$ ]] && [ -n "${repos_pkgs[*]:4}" ]; then
- echo "[INFO] Lookup cache / build patched VCP image for md5sum: ${_h}"
- _tmp="${vcp_image%.*}.${_h}.img"
- if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${vcp_image}" ]; then
- echo "[INFO] Patched VCP image found"
- else
- echo "[INFO] Patching VCP image ..."
- cp "${image_dir}/${image}" "${image_dir}/${_tmp}"
- __kernel_modules "${image_dir}"
- mount_image "${_tmp}" "${image_dir}"
- apt_repos_pkgs_image "${repos_pkgs[@]:4:4}"
- cleanup_mounts
- ln -sf "${image_dir}/${_tmp}" "${image_dir}/${vcp_image}"
- fi
- fi
-}
-
-function jumpserver_pkg_install {
- if [ -n "$(command -v apt-get)" ]; then
- pkg_type='deb'; pkg_cmd='sudo apt-get install -y'
- else
- pkg_type='rpm'; pkg_cmd='sudo yum install -y --skip-broken'
- fi
- eval "$(parse_yaml "./requirements_${pkg_type}.yaml")"
- for section in 'common' "$(uname -i)"; do
- section_var="requirements_pkg_${section}[*]"
- pkg_list+=" ${!section_var}"
- done
- # shellcheck disable=SC2086
- ${pkg_cmd} ${pkg_list}
-}
-
-function jumpserver_check_requirements {
- # shellcheck disable=SC2178
- local vnodes=$1; shift
- local br=("$@")
- local err_br_not_found='Linux bridge not found!'
- local err_br_virsh_net='is a virtual network, Linux bridge expected!'
- local warn_br_endpoint="Endpoints might be inaccessible from external hosts!"
- # MaaS requires a Linux bridge for PXE/admin
- if [[ "${vnodes}" =~ mas01 ]]; then
- if ! brctl showmacs "${br[0]}" >/dev/null 2>&1; then
- notify_e "[ERROR] PXE/admin (${br[0]}) ${err_br_not_found}"
- fi
- # Assume virsh network name matches bridge name (true if created by us)
- if virsh net-info "${br[0]}" >/dev/null 2>&1; then
- notify_e "[ERROR] ${br[0]} ${err_br_virsh_net}"
- fi
- fi
- # If virtual nodes are present, public should be a Linux bridge
- if [ "$(echo "${vnodes}" | wc -w)" -gt 2 ]; then
- if ! brctl showmacs "${br[3]}" >/dev/null 2>&1; then
- if [[ "${vnodes}" =~ mas01 ]]; then
- # Baremetal nodes *require* a proper public network
- notify_e "[ERROR] Public (${br[3]}) ${err_br_not_found}"
- else
- notify_n "[WARN] Public (${br[3]}) ${err_br_not_found}" 3
- notify_n "[WARN] ${warn_br_endpoint}" 3
- fi
- fi
- if virsh net-info "${br[3]}" >/dev/null 2>&1; then
- if [[ "${vnodes}" =~ mas01 ]]; then
- notify_e "[ERROR] ${br[3]} ${err_br_virsh_net}"
- else
- notify_n "[WARN] ${br[3]} ${err_br_virsh_net}" 3
- notify_n "[WARN] ${warn_br_endpoint}" 3
- fi
- fi
- fi
-}
-
-function create_networks {
- local vnode_networks=("$@")
- # create required networks, including constant "mcpcontrol"
- for net in "mcpcontrol" "${vnode_networks[@]}"; do
- if virsh net-info "${net}" >/dev/null 2>&1; then
- virsh net-destroy "${net}" || true
- virsh net-undefine "${net}"
- fi
- # in case of custom network, host should already have the bridge in place
- if [ -f "virsh_net/net_${net}.xml" ] && \
- [ ! -d "/sys/class/net/${net}/bridge" ]; then
- virsh net-define "virsh_net/net_${net}.xml"
- virsh net-autostart "${net}"
- virsh net-start "${net}"
- fi
- done
-}
-
-function create_vms {
- local image_dir=$1; shift
- # vnode data should be serialized with the following format:
- # '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
- IFS='|' read -r -a vnodes <<< "$1"; shift
-
- # AArch64: prepare arch specific arguments
- local virt_extra_args=""
- if [ "$(uname -i)" = "aarch64" ]; then
- # No Cirrus VGA on AArch64, use virtio instead
- virt_extra_args="$virt_extra_args --video=virtio"
- fi
-
- # create vms with specified options
- for serialized_vnode_data in "${vnodes[@]}"; do
- IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}"
-
- # prepare network args
- local vnode_networks=("$@")
- if [[ "${vnode_data[0]}" =~ ^(cfg01|mas01) ]]; then
- net_args=" --network network=mcpcontrol,model=virtio"
- # 3rd interface gets connected to PXE/Admin Bridge (cfg01, mas01)
- vnode_networks[2]="${vnode_networks[0]}"
- else
- net_args=" --network bridge=${vnode_networks[0]},model=virtio"
- fi
- for net in "${vnode_networks[@]:1}"; do
- net_args="${net_args} --network bridge=${net},model=virtio"
- done
-
- # dedicated storage drive for cinder on cmp nodes
- virt_extra_storage=
- if [[ "${vnode_data[0]}" =~ ^(cmp) ]]; then
- virt_extra_storage="--disk path=${image_dir}/mcp_${vnode_data[0]}_storage.qcow2,format=qcow2,bus=virtio,cache=none,io=native"
- fi
-
- # shellcheck disable=SC2086
- virt-install --name "${vnode_data[0]}" \
- --ram "${vnode_data[1]}" --vcpus "${vnode_data[2]}" \
- --cpu host-passthrough --accelerate ${net_args} \
- --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \
- ${virt_extra_storage} \
- --os-type linux --os-variant none \
- --boot hd --vnc --console pty --autostart --noreboot \
- --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \
- --noautoconsole \
- ${virt_extra_args}
- done
-}
-
-function update_mcpcontrol_network {
- # set static ip address for salt master node, MaaS node
- local cmac=$(virsh domiflist cfg01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
- local amac=$(virsh domiflist mas01 2>&1| awk '/mcpcontrol/ {print $5; exit}')
- virsh net-update "mcpcontrol" add ip-dhcp-host \
- "<host mac='${cmac}' name='cfg01' ip='${SALT_MASTER}'/>" --live --config
- [ -z "${amac}" ] || virsh net-update "mcpcontrol" add ip-dhcp-host \
- "<host mac='${amac}' name='mas01' ip='${MAAS_IP}'/>" --live --config
-}
-
-function start_vms {
- local vnodes=("$@")
-
- # start vms
- for node in "${vnodes[@]}"; do
- virsh start "${node}"
- sleep $((RANDOM%5+1))
- done
-}
-
-function check_connection {
- local total_attempts=60
- local sleep_time=5
-
- set +e
- echo '[INFO] Attempting to get into Salt master ...'
-
- # wait until ssh on Salt master is available
- # shellcheck disable=SC2034
- for attempt in $(seq "${total_attempts}"); do
- # shellcheck disable=SC2086
- ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" uptime
- case $? in
- 0) echo "${attempt}> Success"; break ;;
- *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
- esac
- sleep $sleep_time
- done
- set -e
-}
-
-function parse_yaml {
- local prefix=$2
- local s
- local w
- local fs
- s='[[:space:]]*'
- w='[a-zA-Z0-9_]*'
- fs="$(echo @|tr @ '\034')"
- sed -e 's|---||g' -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
- -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
- awk -F"$fs" '{
- indent = length($1)/2;
- vname[indent] = $2;
- for (i in vname) {if (i > indent) {delete vname[i]}}
- if (length($3) > 0) {
- vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
- printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
- }
- }' | sed 's/_=/+=/g'
-}
-
function wait_for {
# Execute in a subshell to prevent local variable override during recursion
(
@@ -527,16 +35,18 @@ function wait_for {
)
}
-function do_sysctl_cfg {
- local _conf='/etc/sysctl.d/99-opnfv-fuel-bridge.conf'
- # https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf
- if modprobe br_netfilter bridge; then
- echo 'net.bridge.bridge-nf-call-arptables = 0' |& sudo tee "${_conf}"
- echo 'net.bridge.bridge-nf-call-iptables = 0' |& sudo tee -a "${_conf}"
- echo 'net.bridge.bridge-nf-call-ip6tables = 0' |& sudo tee -a "${_conf}"
- # Some distros / sysadmins explicitly blacklist br_netfilter
- sudo sysctl -q -p "${_conf}" || true
- fi
+function cleanup_uefi {
+ # Clean up Ubuntu boot entry if cfg01, baremetal nodes online from previous deploy
+ local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
+ ping -c 1 -w 1 "${SALT_MASTER}" || return 0
+ [ ! "$(hostname)" = 'cfg01' ] || cmd_str='eval'
+ ${cmd_str} "sudo salt -C 'G@virtual:physical and not cfg01*' cmd.run \
+ \"which efibootmgr > /dev/null 2>&1 && \
+ efibootmgr | grep -oP '(?<=Boot)[0-9]+(?=.*ubuntu)' | \
+ xargs -I{} efibootmgr --delete-bootnum --bootnum {}; \
+ rm -rf /boot/efi/*\"" || true
+
+ ${cmd_str} "sudo salt -C 'G@virtual:physical and not cfg01*' cmd.run 'shutdown now'" || true
}
function get_nova_compute_pillar_data {
diff --git a/mcp/scripts/lib_jump_common.sh b/mcp/scripts/lib_jump_common.sh
new file mode 100644
index 000000000..c2bd46649
--- /dev/null
+++ b/mcp/scripts/lib_jump_common.sh
@@ -0,0 +1,213 @@
+#!/bin/bash -e
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Library of shell functions used by build / deploy scripts on jumpserver:
+# - distro package requirements installation (e.g. DEB, RPM);
+# - other package requirements from custom sources (e.g. docker);
+# - jumpserver prerequisites validation (e.g. network bridges);
+# - distro configuration (e.g. udev, sysctl);
+# etc.
+
+##############################################################################
+# private helper functions
+##############################################################################
+
+function __parse_yaml {
+ local prefix=$2
+ local s
+ local w
+ local fs
+ s='[[:space:]]*'
+ w='[a-zA-Z0-9_]*'
+ fs="$(echo @|tr @ '\034')"
+ sed -e 's|---||g' -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
+ -e "s|^\($s\)\($w\)$s[:-]$s\(.*\)$s\$|\1$fs\2$fs\3|p" "$1" |
+ awk -F"$fs" '{
+ indent = length($1)/2;
+ vname[indent] = $2;
+ for (i in vname) {if (i > indent) {delete vname[i]}}
+ if (length($3) > 0) {
+ vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
+ printf("%s%s%s=(\"%s\")\n", "'"$prefix"'",vn, $2, $3);
+ }
+ }' | sed 's/_=/+=/g'
+}
+
+##############################################################################
+# public functions
+##############################################################################
+
+function jumpserver_pkg_install {
+ local req_type=$1
+ if [ -n "$(command -v apt-get)" ]; then
+ pkg_type='deb'; pkg_cmd='sudo apt-get install -y'
+ else
+ pkg_type='rpm'; pkg_cmd='sudo yum install -y --skip-broken'
+ fi
+ eval "$(__parse_yaml "./requirements_${pkg_type}.yaml")"
+ for section in 'common' "$(uname -i)"; do
+ section_var="${req_type}_${section}[*]"
+ pkg_list+=" ${!section_var}"
+ done
+ # shellcheck disable=SC2086
+ ${pkg_cmd} ${pkg_list}
+}
+
+function jumpserver_check_requirements {
+ # shellcheck disable=SC2178
+ local states=$1; shift
+ # shellcheck disable=SC2178
+ local vnodes=$1; shift
+ local br=("$@")
+ local err_br_not_found='Linux bridge not found!'
+ local err_br_virsh_net='is a virtual network, Linux bridge expected!'
+ local warn_br_endpoint="Endpoints might be inaccessible from external hosts!"
+ # MaaS requires a Linux bridge for PXE/admin
+ if [[ "${states}" =~ maas ]]; then
+ if ! brctl showmacs "${br[0]}" >/dev/null 2>&1; then
+ notify_e "[ERROR] PXE/admin (${br[0]}) ${err_br_not_found}"
+ fi
+ # Assume virsh network name matches bridge name (true if created by us)
+ if ${VIRSH} net-info "${br[0]}" >/dev/null 2>&1; then
+ notify_e "[ERROR] ${br[0]} ${err_br_virsh_net}"
+ fi
+ fi
+ # If virtual nodes are present, public should be a Linux bridge
+ if [ -n "${vnodes}" ]; then
+ if ! brctl showmacs "${br[3]}" >/dev/null 2>&1; then
+ if [[ "${states}" =~ maas ]]; then
+ # Baremetal nodes *require* a proper public network
+ notify_e "[ERROR] Public (${br[3]}) ${err_br_not_found}"
+ else
+ notify_n "[WARN] Public (${br[3]}) ${err_br_not_found}" 3
+ notify_n "[WARN] ${warn_br_endpoint}" 3
+ fi
+ fi
+ if ${VIRSH} net-info "${br[3]}" >/dev/null 2>&1; then
+ if [[ "${states}" =~ maas ]]; then
+ notify_e "[ERROR] ${br[3]} ${err_br_virsh_net}"
+ else
+ notify_n "[WARN] ${br[3]} ${err_br_virsh_net}" 3
+ notify_n "[WARN] ${warn_br_endpoint}" 3
+ fi
+ fi
+ # https://bugs.launchpad.net/ubuntu/+source/qemu/+bug/1797332
+ if lsb_release -d | grep -q -e 'Ubuntu 16.04'; then
+ if uname -r | grep -q -e '^4\.4\.'; then
+ notify_n "[WARN] Host kernel too old; nested virtualization issues!" 3
+ notify_n "[WARN] apt install linux-generic-hwe-16.04 && reboot" 3
+ notify_e "[ERROR] Please upgrade the kernel and reboot!"
+ fi
+ fi
+ fi
+}
+
+function docker_install {
+ local image_dir=$1
+ # Mininum effort attempt at installing Docker if missing
+ if ! docker --version; then
+ curl -fsSL https://get.docker.com -o get-docker.sh
+ sudo sh get-docker.sh
+ rm get-docker.sh
+ # On RHEL distros, the Docker service should be explicitly started
+ sudo systemctl start docker
+ else
+ DOCKER_VER=$(docker version --format '{{.Server.Version}}')
+ if [ "${DOCKER_VER%%.*}" -lt 2 ]; then
+ notify_e "[ERROR] Docker version ${DOCKER_VER} is too old, please upgrade it."
+ fi
+ fi
+ # Distro-provided docker-compose might be simply broken (Ubuntu 16.04, CentOS 7)
+ if ! docker-compose --version > /dev/null 2>&1 || \
+ [ "$(docker-compose version --short | tr -d '.')" -lt 1220 ] && \
+ [ "$(uname -m)" = 'x86_64' ]; then
+ COMPOSE_BIN="${image_dir}/docker-compose"
+ COMPOSE_VERSION='1.22.0'
+ notify_n "[WARN] Using docker-compose ${COMPOSE_VERSION} in ${COMPOSE_BIN}" 3
+ if [ ! -e "${COMPOSE_BIN}" ]; then
+ COMPOSE_URL="https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}"
+ sudo curl -L "${COMPOSE_URL}/docker-compose-$(uname -s)-$(uname -m)" -o "${COMPOSE_BIN}"
+ sudo chmod +x "${COMPOSE_BIN}"
+ fi
+ fi
+}
+
+function e2fsprogs_install {
+ local image_dir=$1
+ E2FS_VER=$(e2fsck -V 2>&1 | grep -Pzo "e2fsck \K1\.\d{2}")
+ if [ "${E2FS_VER//./}" -lt 143 ]; then
+ E2FS_TGZ="${image_dir}/e2fsprogs.tar.gz"
+ E2FS_VER='1.43.9'
+ E2FS_URL="https://git.kernel.org/pub/scm/fs/ext2/e2fsprogs.git/snapshot/e2fsprogs-${E2FS_VER}.tar.gz"
+ notify_n "[WARN] Using e2fsprogs ${E2FS_VER} from ${E2FS_TGZ}" 3
+ if [ ! -e "${E2FS_TGZ}" ]; then
+ curl -L "${E2FS_URL}" -o "${E2FS_TGZ}"
+ mkdir -p "${image_dir}/e2fsprogs"
+ tar xzf "${E2FS_TGZ}" -C "${image_dir}/e2fsprogs" --strip-components=1
+ cd "${image_dir}/e2fsprogs" || exit 1
+ ./configure
+ make
+ cd - || exit 1
+ fi
+ fi
+}
+
+function virtinst_install {
+ local image_dir=$1
+ VIRT_VER=$(virt-install --version 2>&1)
+ if [ "${VIRT_VER//./}" -lt 140 ]; then
+ VIRT_TGZ="${image_dir}/virt-manager.tar.gz"
+ VIRT_VER='1.4.3'
+ VIRT_URL="https://github.com/virt-manager/virt-manager/archive/v${VIRT_VER}.tar.gz"
+ notify_n "[WARN] Using virt-install ${VIRT_VER} from ${VIRT_TGZ}" 3
+ if [ ! -e "${VIRT_TGZ}" ]; then
+ curl -L "${VIRT_URL}" -o "${VIRT_TGZ}"
+ mkdir -p "${image_dir}/virt-manager"
+ tar xzf "${VIRT_TGZ}" -C "${image_dir}/virt-manager" --strip-components=1
+ fi
+ fi
+}
+
+function do_udev_cfg {
+ local _conf='/etc/udev/rules.d/99-opnfv-fuel-vnet-mtu.rules'
+ # http://linuxaleph.blogspot.com/2013/01/how-to-network-jumbo-frames-to-kvm-guest.html
+ echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="vnet*", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee "${_conf}"
+ echo 'SUBSYSTEM=="net", ACTION=="add|change", KERNEL=="*-nic", RUN+="/bin/sh -c '"'/bin/sleep 1; /sbin/ip link set %k mtu 9000'\"" |& sudo tee -a "${_conf}"
+ sudo udevadm control --reload
+ sudo udevadm trigger
+}
+
+function do_sysctl_cfg {
+ local _conf='/etc/sysctl.d/99-opnfv-fuel-bridge.conf'
+ # https://wiki.libvirt.org/page/Net.bridge.bridge-nf-call_and_sysctl.conf
+ if modprobe br_netfilter bridge; then
+ echo 'net.bridge.bridge-nf-call-arptables = 0' |& sudo tee "${_conf}"
+ echo 'net.bridge.bridge-nf-call-iptables = 0' |& sudo tee -a "${_conf}"
+ echo 'net.bridge.bridge-nf-call-ip6tables = 0' |& sudo tee -a "${_conf}"
+ # Some distros / sysadmins explicitly blacklist br_netfilter
+ sudo sysctl -q -p "${_conf}" || true
+ fi
+}
+
+function generate_ssh_key {
+ # shellcheck disable=SC2155
+ local mcp_ssh_key=$(basename "${SSH_KEY}")
+ local user=${USER}
+ if [ -n "${SUDO_USER}" ] && [ "${SUDO_USER}" != 'root' ]; then
+ user=${SUDO_USER}
+ fi
+
+ if [ -f "${SSH_KEY}" ]; then
+ cp "${SSH_KEY}" .
+ ssh-keygen -f "${mcp_ssh_key}" -y > "${mcp_ssh_key}.pub"
+ fi
+
+ [ -f "${mcp_ssh_key}" ] || ssh-keygen -f "${mcp_ssh_key}" -N ''
+ sudo install -D -o "${user}" -m 0600 "${mcp_ssh_key}" "${SSH_KEY}"
+}
diff --git a/mcp/scripts/lib_jump_deploy.sh b/mcp/scripts/lib_jump_deploy.sh
new file mode 100644
index 000000000..3dc3c4436
--- /dev/null
+++ b/mcp/scripts/lib_jump_deploy.sh
@@ -0,0 +1,591 @@
+#!/bin/bash -e
+# shellcheck disable=SC2155,SC1001,SC2015,SC2128
+##############################################################################
+# Copyright (c) 2018 Mirantis Inc., Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+#
+# Library of shell functions used by deploy script on jumpserver:
+# - base cloud image (used by FN VMs and VCP VMs) processing:
+# * download;
+# * tooling for offline image modification (without libguestfs);
+# * package pre-installation (requires nbd, loop krn mods);
+# - virtualized hosts processing:
+# * virsh-managed VMs boilerplate;
+# * salt master container tooling;
+# * virsh & docker network plumbing;
+# etc.
+
+##############################################################################
+# private helper functions
+##############################################################################
+
+function __get_base_image {
+ local base_image=$1
+ local image_dir=$2
+
+ mkdir -p "${image_dir}"
+ wget --progress=dot:giga -P "${image_dir}" -N "${base_image}"
+}
+
+function __kernel_modules {
+ # Load mandatory kernel modules: loop, nbd
+ local image_dir=$1
+ test -e /dev/loop-control || sudo modprobe loop
+ if sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8; then
+ return 0
+ fi
+ if [ -e /dev/nbd0 ]; then return 0; fi # nbd might be inbuilt
+ # CentOS (or RHEL family in general) do not provide 'nbd' out of the box
+ echo "[WARN] 'nbd' kernel module cannot be loaded!"
+ if [ ! -e /etc/redhat-release ]; then
+ echo "[ERROR] Non-RHEL system detected, aborting!"
+ echo "[ERROR] Try building 'nbd' manually or install it from a 3rd party."
+ exit 1
+ fi
+
+ # Best-effort attempt at building a non-maintaned kernel module
+ local __baseurl='http://vault.centos.org/centos'
+ local __subdir='Source/SPackages'
+ local __uname_r=$(uname -r)
+ local __uname_m=$(uname -m)
+ if [ "${__uname_m}" = 'x86_64' ]; then
+ __srpm="kernel-${__uname_r%.${__uname_m}}.src.rpm"
+ else
+ # NOTE: fmt varies across releases (e.g. kernel-alt-4.11.0-44.el7a.src.rpm)
+ __srpm="kernel-alt-${__uname_r%.${__uname_m}}.src.rpm"
+ fi
+
+ local __found='n'
+ local __versions=$(curl -s "${__baseurl}/" | grep -Po 'href="\K7\.[\d\.]+')
+ for ver in ${__versions}; do
+ for comp in os updates; do
+ local url="${__baseurl}/${ver}/${comp}/${__subdir}/${__srpm}"
+ if wget "${url}" -O "${image_dir}/${__srpm}" > /dev/null 2>&1; then
+ __found='y'; break 2
+ fi
+ done
+ done
+
+ if [ "${__found}" = 'n' ]; then
+ echo "[ERROR] Can't find the linux kernel SRPM for: ${__uname_r}"
+ echo "[ERROR] 'nbd' module cannot be built, aborting!"
+ echo "[ERROR] Try 'yum upgrade' or building 'nbd' krn module manually ..."
+ exit 1
+ fi
+
+ rpm -ivh "${image_dir}/${__srpm}" 2> /dev/null
+ mkdir -p ~/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
+ # shellcheck disable=SC2016
+ echo '%_topdir %(echo $HOME)/rpmbuild' > ~/.rpmmacros
+ (
+ cd ~/rpmbuild/SPECS
+ rpmbuild -bp --nodeps --target="${__uname_m}" kernel*.spec
+ cd ~/rpmbuild/BUILD/"${__srpm%.src.rpm}"/linux-*
+ sed -i 's/^.*\(CONFIG_BLK_DEV_NBD\).*$/\1=m/g' .config
+ # http://centosfaq.org/centos/nbd-does-not-compile-for-3100-514262el7x86_64
+ if grep -Rq 'REQ_TYPE_DRV_PRIV' drivers/block; then
+ sed -i 's/REQ_TYPE_SPECIAL/REQ_TYPE_DRV_PRIV/g' drivers/block/nbd.c
+ fi
+ gunzip -c "/boot/symvers-${__uname_r}.gz" > Module.symvers
+ make prepare modules_prepare
+ make M=drivers/block -j
+ modinfo drivers/block/nbd.ko
+ sudo mkdir -p "/lib/modules/${__uname_r}/extra/"
+ sudo cp drivers/block/nbd.ko "/lib/modules/${__uname_r}/extra/"
+ )
+ sudo depmod -a
+ sudo modprobe nbd max_part=8 || sudo modprobe -f nbd max_part=8
+}
+
+function __mount_image {
+ local image=$1
+ local image_dir=$2
+ OPNFV_MNT_DIR="${image_dir}/mnt"
+
+ # Find free nbd, loop devices
+ for dev in '/sys/class/block/nbd'*; do
+ if [ "$(cat "${dev}/size")" = '0' ]; then
+ OPNFV_NBD_DEV=/dev/$(basename "${dev}")
+ break
+ fi
+ done
+ OPNFV_LOOP_DEV=$(sudo losetup -f)
+ OPNFV_MAP_DEV=/dev/mapper/$(basename "${OPNFV_NBD_DEV}")p1
+ export OPNFV_MNT_DIR OPNFV_LOOP_DEV
+ [ -n "${OPNFV_NBD_DEV}" ] && [ -n "${OPNFV_LOOP_DEV}" ] || exit 1
+ [[ "${MCP_OS:-}" =~ centos ]] || \
+ qemu-img resize "${image_dir}/${image}" 3G
+ sudo qemu-nbd --connect="${OPNFV_NBD_DEV}" --aio=native --cache=none \
+ "${image_dir}/${image}"
+ sudo kpartx -av "${OPNFV_NBD_DEV}"
+ # Hardcode partition index to 1, unlikely to change for Ubuntu UCA image
+ sudo partx -uvn 1:1 "${OPNFV_NBD_DEV}"
+ if [[ "${MCP_OS:-}" =~ ubuntu ]] && sudo growpart "${OPNFV_NBD_DEV}" 1
+ then
+ if [ -e "${image_dir}/e2fsprogs" ]; then
+ E2FSCK_PREFIX="${image_dir}/e2fsprogs/e2fsck/"
+ RESIZE_PREFIX="${image_dir}/e2fsprogs/resize/"
+ fi
+ sudo kpartx -u "${OPNFV_NBD_DEV}"
+ sudo "${E2FSCK_PREFIX}e2fsck" -pf "${OPNFV_MAP_DEV}"
+ sudo "${RESIZE_PREFIX}resize2fs" "${OPNFV_MAP_DEV}"
+ else
+ sleep 5 # /dev/nbdNp1 takes some time to come up
+ fi
+ sudo partx -d "${OPNFV_NBD_DEV}"
+ mkdir -p "${OPNFV_MNT_DIR}"
+ if [ "$(uname -i)" = "aarch64" ] && [[ "${MCP_OS:-}" =~ centos ]]; then
+ # AArch64 CentOS cloud image contains a broken shim binary
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1527283
+ sudo mount "${OPNFV_MAP_DEV}" "${OPNFV_MNT_DIR}"
+ sudo cp -f --remove-destination "${OPNFV_MNT_DIR}/EFI/BOOT/fbaa64.efi" \
+ "${OPNFV_MNT_DIR}/EFI/BOOT/BOOTAA64.EFI"
+ sudo umount -l "${OPNFV_MNT_DIR}"
+ # AArch64 CentOS cloud image has root partition at index 4 instead of 1
+ sudo mount "${OPNFV_MAP_DEV/p1/p4}" "${OPNFV_MNT_DIR}"
+ else
+ # grub-update does not like /dev/nbd*, so use a loop device to work around it
+ sudo losetup "${OPNFV_LOOP_DEV}" "${OPNFV_MAP_DEV}"
+ sudo mount "${OPNFV_LOOP_DEV}" "${OPNFV_MNT_DIR}"
+ fi
+ sudo mount -t proc proc "${OPNFV_MNT_DIR}/proc"
+ sudo mount -t sysfs sys "${OPNFV_MNT_DIR}/sys"
+ sudo mount -o bind /dev "${OPNFV_MNT_DIR}/dev"
+ if [[ "${MCP_OS:-}" =~ ubuntu1804 ]]; then
+ # Ubuntu Bionic (18.04) or newer defaults to using netplan.io, revert it
+ sudo mkdir -p "${OPNFV_MNT_DIR}/run/systemd/resolve"
+ echo "nameserver ${dns_public}" | sudo tee \
+ "${OPNFV_MNT_DIR}/run/systemd/resolve/stub-resolv.conf"
+ sudo chroot "${OPNFV_MNT_DIR}" systemctl stop \
+ systemd-networkd.socket systemd-networkd \
+ networkd-dispatcher systemd-networkd-wait-online systemd-resolved
+ sudo chroot "${OPNFV_MNT_DIR}" systemctl disable \
+ systemd-networkd.socket systemd-networkd \
+ networkd-dispatcher systemd-networkd-wait-online systemd-resolved
+ sudo chroot "${OPNFV_MNT_DIR}" systemctl mask \
+ systemd-networkd.socket systemd-networkd \
+ networkd-dispatcher systemd-networkd-wait-online systemd-resolved
+ sudo chroot "${OPNFV_MNT_DIR}" apt --assume-yes purge nplan netplan.io
+ echo "source /etc/network/interfaces.d/*" | \
+ sudo tee "${OPNFV_MNT_DIR}/etc/network/interfaces"
+ elif [[ "${MCP_OS:-}" =~ centos ]]; then
+ sudo sed -i -e 's/^\(SELINUX\)=.*$/\1=permissive/g' \
+ "${OPNFV_MNT_DIR}/etc/selinux/config"
+ fi
+ sudo rm -f "${OPNFV_MNT_DIR}/etc/resolv.conf"
+ echo "nameserver ${dns_public}" | sudo tee \
+ "${OPNFV_MNT_DIR}/etc/resolv.conf"
+ echo "GRUB_DISABLE_OS_PROBER=true" | \
+ sudo tee -a "${OPNFV_MNT_DIR}/etc/default/grub"
+ sudo sed -i -e 's/^\(GRUB_TIMEOUT\)=.*$/\1=1/g' -e 's/^GRUB_HIDDEN.*$//g' \
+ "${OPNFV_MNT_DIR}/etc/default/grub"
+}
+
+function __apt_repos_pkgs_image {
+ local apt_key_urls=(${1//,/ })
+ local all_repos=(${2//,/ })
+ local pkgs_i=(${3//,/ })
+ local pkgs_r=(${4//,/ })
+ [ -n "${OPNFV_MNT_DIR}" ] || exit 1
+
+ # NOTE: We don't support (yet) some features for non-APT repos: keys, prio
+
+ # APT keys
+ if [[ "${MCP_OS:-}" =~ ubuntu ]] && [ "${#apt_key_urls[@]}" -gt 0 ]; then
+ for apt_key in "${apt_key_urls[@]}"; do
+ sudo chroot "${OPNFV_MNT_DIR}" /bin/bash -c \
+ "wget -qO - '${apt_key}' | apt-key add -"
+ done
+ fi
+ # Additional repositories
+ for repo_line in "${all_repos[@]}"; do
+ # <repo_name>|<repo prio>|deb|[arch=<arch>]|<repo url>|<dist>|<repo comp>
+ local repo=(${repo_line//|/ })
+
+ if [[ "${MCP_OS:-}" =~ centos ]]; then
+ cat <<-EOF | sudo tee "${OPNFV_MNT_DIR}/etc/yum.repos.d/${repo[0]}.repo"
+ [${repo[0]}]
+ baseurl=${repo[3]}
+ enabled=1
+ gpgcheck=0
+ EOF
+ continue
+ fi
+ [ "${#repo[@]}" -gt 5 ] || continue
+ # NOTE: Names and formatting are compatible with Salt linux.system.repo
+ cat <<-EOF | sudo tee "${OPNFV_MNT_DIR}/etc/apt/preferences.d/${repo[0]}"
+
+ Package: *
+ Pin: release a=${repo[-2]}
+ Pin-Priority: ${repo[1]}
+
+ EOF
+ echo "${repo[@]:2}" | sudo tee \
+ "${OPNFV_MNT_DIR}/etc/apt/sources.list.d/${repo[0]}.list"
+ done
+ # Install packages
+ if [ "${#pkgs_i[@]}" -gt 0 ]; then
+ if [[ "${MCP_OS:-}" =~ ubuntu ]]; then
+ sudo DEBIAN_FRONTEND="noninteractive" \
+ chroot "${OPNFV_MNT_DIR}" apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" FLASH_KERNEL_SKIP="true" \
+ chroot "${OPNFV_MNT_DIR}" apt-get install -y "${pkgs_i[@]}"
+ else
+ sudo chroot "${OPNFV_MNT_DIR}" yum install -y "${pkgs_i[@]}"
+ fi
+ fi
+ # Remove packages
+ if [ "${#pkgs_r[@]}" -gt 0 ]; then
+ if [[ "${MCP_OS:-}" =~ ubuntu ]]; then
+ sudo DEBIAN_FRONTEND="noninteractive" FLASH_KERNEL_SKIP="true" \
+ chroot "${OPNFV_MNT_DIR}" apt-get purge -y "${pkgs_r[@]}"
+ else
+ sudo chroot "${OPNFV_MNT_DIR}" yum remove -y "${pkgs_r[@]}"
+ fi
+ fi
+ # Disable cloud-init metadata service datasource
+ sudo mkdir -p "${OPNFV_MNT_DIR}/etc/cloud/cloud.cfg.d"
+ echo "datasource_list: [ NoCloud, None ]" | sudo tee \
+ "${OPNFV_MNT_DIR}/etc/cloud/cloud.cfg.d/95_real_datasources.cfg"
+}
+
+function __cleanup_vms {
+ # clean up existing nodes
+ for node in $(${VIRSH} list --name | grep -P '\w{3}\d{2}'); do
+ ${VIRSH} destroy "${node}" 2>/dev/null || true
+ done
+ for node in $(${VIRSH} list --name --all | grep -P '\w{3}\d{2}'); do
+ ${VIRSH} domblklist "${node}" | awk '/^.da/ {print $2}' | \
+ xargs --no-run-if-empty -I{} sudo rm -f {}
+ ${VIRSH} undefine "${node}" --remove-all-storage --nvram || \
+ ${VIRSH} undefine "${node}" --remove-all-storage
+ done
+}
+
+##############################################################################
+# public functions
+##############################################################################
+
+function prepare_vms {
+ local base_image_f=$1; shift
+ local base_image=${base_image_f%.xz}
+ local image_dir=$1; shift
+ local repos_pkgs_str=$1; shift # ^-sep list of repos, pkgs to install/rm
+ local image=base_image_opnfv_fuel.img
+ local vcp_image=${image%.*}_vcp.img
+ local _o=${base_image/*\/}
+ [ -n "${image_dir}" ] || exit 1
+
+ cleanup_uefi
+ __cleanup_vms
+ __get_base_image "${base_image_f}" "${image_dir}"
+ [ "${base_image}" == "${base_image_f}" ] || unxz -fk "${image_dir}/${_o}.xz"
+ IFS='^' read -r -a repos_pkgs <<< "${repos_pkgs_str}"
+
+ local _h=$(echo "${repos_pkgs_str}.$(md5sum "${image_dir}/${_o}")" | \
+ md5sum | cut -c -8)
+ local _tmp="${image%.*}.${_h}.img"
+ echo "[INFO] Lookup cache / build patched base image for fingerprint: ${_h}"
+ if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${image}" ]; then
+ echo "[INFO] Patched base image found"
+ else
+ # shellcheck disable=SC2115
+ rm -rf "${image_dir}/${image%.*}"*
+ if [[ ! "${repos_pkgs_str}" =~ ^\^+$ ]]; then
+ echo "[INFO] Patching base image ..."
+ cp "${image_dir}/${_o}" "${image_dir}/${_tmp}"
+ __kernel_modules "${image_dir}"
+ __mount_image "${_tmp}" "${image_dir}"
+ __apt_repos_pkgs_image "${repos_pkgs[@]:0:4}"
+ cleanup_mounts
+ else
+ echo "[INFO] No patching required, using vanilla base image"
+ ln -sf "${image_dir}/${_o}" "${image_dir}/${_tmp}"
+ fi
+ ln -sf "${image_dir}/${_tmp}" "${image_dir}/${image}"
+ fi
+
+ # VCP VMs base image specific changes
+ if [[ ! "${repos_pkgs_str}" =~ \^{3}$ ]] && [ -n "${repos_pkgs[*]:4}" ]; then
+ echo "[INFO] Lookup cache / build patched VCP image for md5sum: ${_h}"
+ _tmp="${vcp_image%.*}.${_h}.img"
+ if [ "${image_dir}/${_tmp}" -ef "${image_dir}/${vcp_image}" ]; then
+ echo "[INFO] Patched VCP image found"
+ else
+ echo "[INFO] Patching VCP image ..."
+ cp "${image_dir}/${image}" "${image_dir}/${_tmp}"
+ __kernel_modules "${image_dir}"
+ __mount_image "${_tmp}" "${image_dir}"
+ __apt_repos_pkgs_image "${repos_pkgs[@]:4:4}"
+ cleanup_mounts
+ ln -sf "${image_dir}/${_tmp}" "${image_dir}/${vcp_image}"
+ fi
+ fi
+}
+
+function create_networks {
+ local all_vnode_networks=("$@")
+ # create required networks
+ for net in "mcpcontrol" "${all_vnode_networks[@]}"; do
+ if ${VIRSH} net-info "${net}" >/dev/null 2>&1; then
+ ${VIRSH} net-destroy "${net}" || true
+ ${VIRSH} net-undefine "${net}"
+ fi
+ # in case of custom network, host should already have the bridge in place
+ if [ -f "virsh_net/net_${net}.xml" ] && \
+ [ ! -d "/sys/class/net/${net}/bridge" ]; then
+ ${VIRSH} net-define "virsh_net/net_${net}.xml"
+ ${VIRSH} net-autostart "${net}"
+ ${VIRSH} net-start "${net}"
+ fi
+ done
+
+ sudo ip link del veth_mcp0 || true
+ sudo ip link del veth_mcp2 || true
+ # Create systemd service for veth creation after reboot
+ FUEL_VETHC_SERVICE="/etc/systemd/system/opnfv-fuel-vethc.service"
+ FUEL_VETHA_SERVICE="/etc/systemd/system/opnfv-fuel-vetha.service"
+ test -f /usr/sbin/ip && PREFIX=/usr/sbin || PREFIX=/sbin
+ cat <<-EOF | sudo tee "${FUEL_VETHC_SERVICE}"
+ [Unit]
+ After=libvirtd.service
+ Before=docker.service
+ [Service]
+ ExecStart=/bin/sh -ec '\
+ ${PREFIX}/ip link add veth_mcp0 type veth peer name veth_mcp1 && \
+ ${PREFIX}/ip link add veth_mcp2 type veth peer name veth_mcp3 && \
+ ${PREFIX}/ip link set veth_mcp0 up mtu 9000 && \
+ ${PREFIX}/ip link set veth_mcp1 up mtu 9000 && \
+ ${PREFIX}/ip link set veth_mcp2 up mtu 9000 && \
+ ${PREFIX}/ip link set veth_mcp3 up mtu 9000'
+ EOF
+ cat <<-EOF | sudo tee "${FUEL_VETHA_SERVICE}"
+ [Unit]
+ StartLimitInterval=200
+ StartLimitBurst=10
+ After=opnfv-fuel-vethc.service
+ [Service]
+ Restart=on-failure
+ RestartSec=10
+ ExecStartPre=/bin/sh -ec '\
+ ${PREFIX}/brctl showstp ${all_vnode_networks[0]} > /dev/null 2>&1 && \
+ ${PREFIX}/brctl showstp ${all_vnode_networks[1]} > /dev/null 2>&1'
+ ExecStart=/bin/sh -ec '\
+ ${PREFIX}/brctl addif ${all_vnode_networks[0]} veth_mcp0 && \
+ ${PREFIX}/brctl addif ${all_vnode_networks[1]} veth_mcp2 && \
+ while ${PREFIX}/ip rule del to ${SALT_MASTER} iif docker0 table 200 2>/dev/null; do true; done && \
+ ${PREFIX}/ip rule add to ${SALT_MASTER} iif docker0 table 200 && \
+ ${PREFIX}/ip route replace ${SALT_MASTER} dev ${all_vnode_networks[0]} table 200'
+ EOF
+ sudo ln -sf "${FUEL_VETHC_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+ sudo ln -sf "${FUEL_VETHA_SERVICE}" "/etc/systemd/system/multi-user.target.wants/"
+ sudo systemctl daemon-reload
+ sudo systemctl restart opnfv-fuel-vethc
+ sudo systemctl restart opnfv-fuel-vetha
+}
+
+function cleanup_all {
+ local image_dir=$1; shift
+ local all_vnode_networks=("$@")
+ [ ! -e "${image_dir}/docker-compose" ] || COMPOSE_PREFIX="${image_dir}/"
+
+ cleanup_uefi
+ __cleanup_vms
+ sudo ip link del veth_mcp0 || true
+ sudo ip link del veth_mcp2 || true
+ for net in "mcpcontrol" "${all_vnode_networks[@]}"; do
+ if ${VIRSH} net-info "${net}" >/dev/null 2>&1; then
+ ${VIRSH} net-destroy "${net}" || true
+ ${VIRSH} net-undefine "${net}"
+ fi
+ done
+ sudo rm -f "/etc/systemd/system/multi-user.target.wants/opnfv-fuel"* \
+ "/etc/systemd/system/opnfv-fuel"*
+ sudo systemctl daemon-reload
+ "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml down
+}
+
+function create_vms {
+ local image_dir=$1; shift
+ local image=base_image_opnfv_fuel.img
+ # vnode data should be serialized with the following format:
+ # <name0>,<disks0>,<ram0>,<vcpu0>[,<sockets0>,<cores0>,<threads0>[,<cell0name0>,<cell0memory0>,
+ # <cell0cpus0>,<cell1name0>,<cell1memory0>,<cell1cpus0>]]|<name1>,...'
+ IFS='|' read -r -a vnodes <<< "$1"; shift
+
+ # AArch64: prepare arch specific arguments
+ local virt_extra_args=""
+ if [ "$(uname -i)" = "aarch64" ]; then
+ # No Cirrus VGA on AArch64, use virtio instead
+ virt_extra_args="$virt_extra_args --video=virtio"
+ fi
+
+ # create vms with specified options
+ for serialized_vnode_data in "${vnodes[@]}"; do
+ if [ -z "${serialized_vnode_data}" ]; then continue; fi
+ IFS=',' read -r -a vnode_data <<< "${serialized_vnode_data}"
+ IFS=';' read -r -a disks_data <<< "${vnode_data[1]}"
+
+ # Create config ISO and resize OS disk image for each foundation node VM
+ ./create-config-drive.sh -k "$(basename "${SSH_KEY}").pub" \
+ -u 'user-data.sh' -h "${vnode_data[0]}" "${image_dir}/mcp_${vnode_data[0]}.iso"
+ cp "${image_dir}/${image}" "${image_dir}/mcp_${vnode_data[0]}.qcow2"
+ qemu-img resize "${image_dir}/mcp_${vnode_data[0]}.qcow2" "${disks_data[0]}"
+ # Prepare additional drives if present
+ idx=0
+ virt_extra_storage=
+ for dsize in "${disks_data[@]:1}"; do
+ ((idx+=1))
+ qcow_file="${image_dir}/mcp_${vnode_data[0]}_${idx}.qcow2"
+ qemu-img create "${qcow_file}" "${dsize}"
+ virt_extra_storage+=" --disk path=${qcow_file},format=qcow2,bus=virtio,cache=none,io=native"
+ done
+
+ # prepare VM CPU model, count, topology (optional), NUMA cells (optional, requires topo)
+ local virt_cpu_args=' --cpu host-passthrough'
+ local idx=7 # cell0.name index in serialized data
+ while [ -n "${vnode_data[${idx}]}" ]; do
+ virt_cpu_args+=",${vnode_data[${idx}]}.memory=${vnode_data[$((idx + 1))]}"
+ virt_cpu_args+=",${vnode_data[${idx}]}.cpus=${vnode_data[$((idx + 2))]}"
+ idx=$((idx + 3))
+ done
+ virt_cpu_args+=" --vcpus vcpus=${vnode_data[3]}"
+ if [ -n "${vnode_data[6]}" ]; then
+ virt_cpu_args+=",sockets=${vnode_data[4]},cores=${vnode_data[5]},threads=${vnode_data[6]}"
+ fi
+
+ # prepare network args
+ local vnode_networks=("$@")
+ local net_args=
+ for net in "${vnode_networks[@]}"; do
+ net_args="${net_args} --network bridge=${net},model=virtio"
+ done
+
+ [ ! -e "${image_dir}/virt-manager" ] || VIRT_PREFIX="${image_dir}/virt-manager/"
+ # shellcheck disable=SC2086
+ ${VIRT_PREFIX}${VIRSH/virsh/virt-install} --name "${vnode_data[0]}" \
+ ${virt_cpu_args} --accelerate \
+ ${net_args} \
+ --ram "${vnode_data[2]}" \
+ --disk path="${image_dir}/mcp_${vnode_data[0]}.qcow2",format=qcow2,bus=virtio,cache=none,io=native \
+ ${virt_extra_storage} \
+ --os-type linux --os-variant none \
+ --boot hd --vnc --console pty --autostart --noreboot \
+ --disk path="${image_dir}/mcp_${vnode_data[0]}.iso",device=cdrom \
+ --noautoconsole \
+ ${virt_extra_args}
+ done
+}
+
+function reset_vms {
+ local vnodes=("$@")
+ local cmd_str="ssh ${SSH_OPTS} ${SSH_SALT}"
+
+ # reset non-infrastructure vms, wait for them to come back online
+ for node in "${vnodes[@]}"; do
+ ${VIRSH} reset "${node}"
+ done
+ for node in "${vnodes[@]}"; do
+ wait_for 20.0 "${cmd_str} sudo salt -C '${node}*' saltutil.sync_all"
+ done
+}
+
+function start_vms {
+ local vnodes=("$@")
+
+ # start vms
+ for node in "${vnodes[@]}"; do
+ ${VIRSH} start "${node}"
+ sleep $((RANDOM%5+1))
+ done
+}
+
+function prepare_containers {
+ local image_dir=$1
+ [ -n "${image_dir}" ] || exit 1
+ [ -n "${MCP_REPO_ROOT_PATH}" ] || exit 1
+ [ ! -e "${image_dir}/docker-compose" ] || COMPOSE_PREFIX="${image_dir}/"
+
+ "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml down
+ if [[ ! "${MCP_DOCKER_TAG}" =~ 'verify' ]]; then
+ "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml pull
+ fi
+ # overwrite hosts only on first container up, to preserve cluster nodes
+ sudo cp docker-compose/files/hosts "${image_dir}/hosts"
+ sudo rm -rf "${image_dir}/"{salt,pki,mas01/etc} "${image_dir}/nodes/"*
+ find "${image_dir}/mas01/var/lib/" \
+ -mindepth 2 -maxdepth 2 -not -name boot-resources \
+ -exec sudo rm -rf {} \; || true
+ mkdir -p "${image_dir}/"{salt/master.d,salt/minion.d}
+
+ if grep -q -e 'maas' 'docker-compose/docker-compose.yaml'; then
+ # Apparmor workaround for bind9 inside Docker containers using AUFS
+ for profile in 'usr.sbin.ntpd' 'usr.sbin.named' \
+ 'usr.sbin.dhcpd' 'usr.sbin.tcpdump' 'usr.bin.tcpdump'; do
+ if [ -e "/etc/apparmor.d/${profile}" ] && \
+ [ ! -e "/etc/apparmor.d/disable/${profile}" ]; then
+ sudo ln -sf "/etc/apparmor.d/${profile}" "/etc/apparmor.d/disable/"
+ sudo apparmor_parser -R "/etc/apparmor.d/${profile}" || true
+ fi
+ done
+ fi
+}
+
+function start_containers {
+ local image_dir=$1
+ [ -n "${image_dir}" ] || exit 1
+ [ ! -e "${image_dir}/docker-compose" ] || COMPOSE_PREFIX="${image_dir}/"
+ if grep -q -e 'maas' 'docker-compose/docker-compose.yaml'; then
+ chmod +x docker-compose/files/entrypoint*.sh
+ fi
+ "${COMPOSE_PREFIX}docker-compose" -f docker-compose/docker-compose.yaml up -d
+}
+
+function check_connection {
+ local total_attempts=60
+ local sleep_time=5
+
+ set +e
+ echo '[INFO] Attempting to get into Salt master ...'
+
+ # wait until ssh on Salt master is available
+ # shellcheck disable=SC2034
+ for attempt in $(seq "${total_attempts}"); do
+ # shellcheck disable=SC2086
+ ssh ${SSH_OPTS} "ubuntu@${SALT_MASTER}" uptime
+ case $? in
+ 0) echo "${attempt}> Success"; break ;;
+ *) echo "${attempt}/${total_attempts}> ssh server ain't ready yet, waiting for ${sleep_time} seconds ..." ;;
+ esac
+ sleep $sleep_time
+ done
+ set -e
+}
+
+function cleanup_mounts {
+ # Remove any mounts, loop and/or nbd devs created while patching base image
+ if [ -n "${OPNFV_MNT_DIR}" ] && [ -d "${OPNFV_MNT_DIR}" ]; then
+ if [ -f "${OPNFV_MNT_DIR}/boot/grub/grub.cfg" ]; then
+ # Grub thinks it's running from a live CD
+ sudo sed -i -e 's/^\s*set root=.*$//g' -e 's/^\s*loopback.*$//g' \
+ "${OPNFV_MNT_DIR}/boot/grub/grub.cfg"
+ fi
+ sync
+ if mountpoint -q "${OPNFV_MNT_DIR}"; then
+ sudo umount -l "${OPNFV_MNT_DIR}" || true
+ fi
+ fi
+ if [ -n "${OPNFV_LOOP_DEV}" ] && \
+ sudo losetup "${OPNFV_LOOP_DEV}" 1>&2 > /dev/null; then
+ sudo losetup -d "${OPNFV_LOOP_DEV}"
+ fi
+ if [ -n "${OPNFV_NBD_DEV}" ]; then
+ sudo partx -d "${OPNFV_NBD_DEV}" || true
+ sudo kpartx -d "${OPNFV_NBD_DEV}" || true
+ sudo qemu-nbd -d "${OPNFV_NBD_DEV}" || true
+ fi
+}
diff --git a/mcp/scripts/lib_template.sh b/mcp/scripts/lib_template.sh
index 4b5b0563d..822d14116 100644
--- a/mcp/scripts/lib_template.sh
+++ b/mcp/scripts/lib_template.sh
@@ -23,16 +23,14 @@ function do_templates_scenario {
local target_lab=$1; shift
local target_pod=$1; shift
local lab_config_uri=$1; shift
- local scenario_dir=$1
+ local scenario_dir=$1; shift
+ local extra_yaml=("$@")
BASE_CONFIG_PDF="${lab_config_uri}/labs/${target_lab}/${target_pod}.yaml"
BASE_CONFIG_IDF="${lab_config_uri}/labs/${target_lab}/idf-${target_pod}.yaml"
LOCAL_PDF="${image_dir}/$(basename "${BASE_CONFIG_PDF}")"
LOCAL_IDF="${image_dir}/$(basename "${BASE_CONFIG_IDF}")"
- # Make sample PDF/IDF available via default lab-config (pharos submodule)
- ln -sf "$(readlink -f "../config/labs/local")" "./pharos/labs/"
-
# Expand scenario file and main reclass input (pod_config.yaml) based on PDF
if ! curl --create-dirs -o "${LOCAL_PDF}" "${BASE_CONFIG_PDF}"; then
notify_e "[ERROR] Could not retrieve PDF (Pod Descriptor File)!"
@@ -49,19 +47,22 @@ function do_templates_scenario {
notify_e "[ERROR] IDF does not match yaml schema!"
fi
fi
- if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \
- -i "$(dirname "$(readlink -f "${PHAROS_IA}")")" \
- -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then
- notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!"
- fi
printenv | \
awk '/^(SALT|MCP|MAAS).*=/ { gsub(/=/,": "); print }' >> "${LOCAL_PDF}"
j2args=$(find "${scenario_dir}" -name '*.j2' -exec echo -j {} \;)
# shellcheck disable=SC2086
- if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
+ if ! python3 "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
-i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then
notify_e '[ERROR] Could not convert j2 scenario definitions!'
fi
+ for _yaml in "${extra_yaml[@]}"; do
+ awk '/^---$/{f=1;next;}f' "${_yaml}" >> "${LOCAL_PDF}"
+ done
+ if ! python3 "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" \
+ -i "$(dirname "$(readlink -f "${PHAROS_IA}")")" \
+ -j "${PHAROS_IA}" -v > "${image_dir}/pod_config.yml"; then
+ notify_e "[ERROR] Could not convert PDF+IDF to reclass model input!"
+ fi
}
# Expand reclass and virsh network templates based on PDF + IDF + others
@@ -79,10 +80,11 @@ function do_templates_cluster {
awk '/^---$/{f=1;next;}f' "${_yaml}" >> "${LOCAL_PDF}"
done
# shellcheck disable=SC2046
- j2args=$(find "${RECLASS_CLUSTER_DIR}" "$(readlink -f virsh_net)" $(readlink -f ./*j2) \
+ j2args=$(find "${RECLASS_CLUSTER_DIR}" "$(readlink -f virsh_net)" \
+ "$(readlink -f docker-compose)" $(readlink -f ./*j2) \
-name '*.j2' -exec echo -j {} \;)
# shellcheck disable=SC2086
- if ! "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
+ if ! python3 "${PHAROS_GEN_CFG}" -y "${LOCAL_PDF}" ${j2args} -b -v \
-i "$(dirname "$(readlink -f "${PHAROS_IA}")")"; then
notify_e '[ERROR] Could not convert PDF to network definitions!'
fi
diff --git a/mcp/scripts/pharos b/mcp/scripts/pharos
-Subproject 2b78607e0252eed27f1ed50134c55e01f12a749
+Subproject 061b5588d40253193eddf76139c361d62e6fbeb
diff --git a/mcp/scripts/requirements_deb.yaml b/mcp/scripts/requirements_deb.yaml
index 04ddcf631..58fc533b0 100644
--- a/mcp/scripts/requirements_deb.yaml
+++ b/mcp/scripts/requirements_deb.yaml
@@ -6,31 +6,41 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-requirements_pkg:
+build:
+ # Common pkgs required for all builds, no matter the type, arch etc.
+ common:
+ - curl
+ - git
+ - make
+ - python3-pip
+deploy:
# Common pkgs required for all deploys, no matter the type, arch etc.
common:
- bridge-utils
+ - build-essential
- cloud-guest-utils
- cpu-checker
- curl
+ - docker-compose
- e2fsprogs
- git
- kpartx
+ - libglib2.0-bin
- libvirt-bin
- - make
- mkisofs
- qemu-kvm
- rsync
- uuid-runtime
- virtinst
# python is indirectly required for PDF parsing
- - python
- - python-ipaddress
- - python-jinja2
- - python-yaml
- - python-jsonschema
+ - python3
+ - python3-jinja2
+ - python3-yaml
+ - python3-jsonschema
# Optional, arch-specific requirements, matched by key name = $(uname -m)
aarch64:
# AArch64 VMs use AAVMF (guest UEFI)
- ipxe-qemu
- qemu-efi
+ # AArch64 CentOS cloud image is archived with xz
+ - xz-utils
diff --git a/mcp/scripts/requirements_rpm.yaml b/mcp/scripts/requirements_rpm.yaml
index 3df4d0a80..b2ee0dcd1 100644
--- a/mcp/scripts/requirements_rpm.yaml
+++ b/mcp/scripts/requirements_rpm.yaml
@@ -6,13 +6,20 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
---
-requirements_pkg:
+build:
+ # Common pkgs required for all builds, no matter the type, arch etc.
+ common:
+ - git
+ - make
+ - python3-pip
+deploy:
# Common pkgs required for all deploys, no matter the type, arch etc.
common:
- bc
- bridge-utils
- cloud-utils-growpart
- curl
+ - docker-compose
- elfutils-libelf-devel
- e2fsprogs
- genisoimage
@@ -30,12 +37,13 @@ requirements_pkg:
- virt-install
- wget
# For python is indirectly required for PDF parsing
- - python
- - python-ipaddress
- - python-jinja2
- - python-yaml
- - python-jsonschema
+ - python3
+ - python36-jinja2
+ - python36-yaml
+ - python36-jsonschema
# Optional, arch-specific requirements, matched by key name = $(uname -m)
aarch64:
# AArch64 VMs use AAVMF (guest UEFI)
- AAVMF
+ # AArch64 CentOS cloud image is archived with xz
+ - xz
diff --git a/mcp/scripts/salt.sh b/mcp/scripts/salt.sh
deleted file mode 100755
index dc2226e34..000000000
--- a/mcp/scripts/salt.sh
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/bin/bash -e
-##############################################################################
-# Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-# All rights reserved. This program and the accompanying materials
-# are made available under the terms of the Apache License, Version 2.0
-# which accompanies this distribution, and is available at
-# http://www.apache.org/licenses/LICENSE-2.0
-##############################################################################
-#
-# Deploy Salt Master
-#
-
-CI_DEBUG=${CI_DEBUG:-0}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-F_GIT_ROOT=$(git rev-parse --show-toplevel)
-F_GIT_DIR=$(cd "${F_GIT_ROOT}/mcp" && git rev-parse --git-dir)
-F_GIT_SUBD=${F_GIT_ROOT#${F_GIT_DIR%%/.git*}}
-OPNFV_TMP_DIR="/home/${SALT_MASTER_USER}/opnfv"
-OPNFV_GIT_DIR="/root/opnfv"
-OPNFV_FUEL_DIR="/root/fuel" # Should be in sync with patch.sh, scripts patches
-OPNFV_RDIR="reclass/classes/cluster/all-mcp-arch-common"
-OPNFV_VCP_IMG="mcp/scripts/base_image_opnfv_fuel_vcp.img"
-OPNFV_VCP_DIR="/srv/salt/env/prd/salt/files/control/images"
-LOCAL_GIT_DIR="${F_GIT_ROOT%${F_GIT_SUBD}}"
-LOCAL_PDF_RECLASS=$1; shift
-# shellcheck disable=SC2116,SC2086
-LOCAL_VIRT_NODES=$(echo ${*//cfg01/}) # unquoted to filter space
-NODE_MASK="${LOCAL_VIRT_NODES// /|}"
-
-# push to cfg01 current git repo first (including submodules), at ~ubuntu/opnfv
-# later we move it to ~root/opnfv (and ln as ~root/fuel); delete the temp clone
-remote_tmp="${SSH_SALT}:$(basename "${OPNFV_TMP_DIR}")"
-STORAGE_DIR=$(dirname "${LOCAL_PDF_RECLASS}")
-REL_STORAGE_DIR_PATH=${STORAGE_DIR#${LOCAL_GIT_DIR}}
-if [[ "${REL_STORAGE_DIR_PATH}" == "${STORAGE_DIR}" ]]
-then
- REL_STORAGE_DIR_PATH=""
-fi
-rsync -Erl --delete -e "ssh ${SSH_OPTS}" \
- --exclude={.gitignore,"$REL_STORAGE_DIR_PATH"} \
- "${LOCAL_GIT_DIR}/" "${remote_tmp}/"
-if [ -n "${LOCAL_PDF_RECLASS}" ] && [ -f "${LOCAL_PDF_RECLASS}" ]; then
- rsync -e "ssh ${SSH_OPTS}" "${LOCAL_PDF_RECLASS}" \
- "${remote_tmp}${F_GIT_SUBD}/mcp/${OPNFV_RDIR}/opnfv/"
-fi
-local_vcp_img=$(dirname "${LOCAL_PDF_RECLASS}")/$(basename "${OPNFV_VCP_IMG}")
-if [ -e "${local_vcp_img}" ]; then
- rsync -L -e "ssh ${SSH_OPTS}" "${local_vcp_img}" \
- "${remote_tmp}${F_GIT_SUBD}/${OPNFV_VCP_IMG}"
-fi
-
-# ssh to cfg01
-# shellcheck disable=SC2086,2087
-ssh ${SSH_OPTS} "${SSH_SALT}" bash -s -e << SALT_INSTALL_END
- sudo -i
- set -e
- export TERM=${TERM}
- export CI_DEBUG=${CI_DEBUG}; [[ "${CI_DEBUG}" =~ (false|0) ]] || set -x
-
- echo -n 'Checking out cloud-init has finished running ...'
- while [ ! -f /var/lib/cloud/instance/boot-finished ]; do echo -n '.'; sleep 1; done
- echo ' done'
-
- mkdir -p /srv/salt /usr/share/salt-formulas/reclass
- rm -rf ${OPNFV_GIT_DIR} ${OPNFV_FUEL_DIR}
- mv ${OPNFV_TMP_DIR} ${OPNFV_GIT_DIR} && chown -R root.root ${OPNFV_GIT_DIR}
- find ${OPNFV_GIT_DIR} -name '.git' -type f | while read f_git; do
- sed -i 's@${LOCAL_GIT_DIR}@${OPNFV_GIT_DIR}@g' \$f_git
- done
- ln -sf ${OPNFV_GIT_DIR}${F_GIT_SUBD} ${OPNFV_FUEL_DIR}
- ln -sf ${OPNFV_FUEL_DIR}/mcp/reclass /srv/salt
- ln -sf ${OPNFV_FUEL_DIR}/mcp/deploy/scripts /srv/salt
- ln -sf ${OPNFV_FUEL_DIR}/mcp/scripts/mcp.rsa $(dirname "${OPNFV_FUEL_DIR}")
-
- cp -r ${OPNFV_FUEL_DIR}/mcp/metadata/service /usr/share/salt-formulas/reclass
- cd /srv/salt/reclass/classes/service && \
- ln -sf /usr/share/salt-formulas/reclass/service/opendaylight
-
- # Armband APT-MK nightly/extra repo for forked & extended reclass
- wget -qO - https://linux.enea.com/apt-mk/public.gpg | apt-key add -
- echo 'deb http://linux.enea.com/apt-mk/xenial nightly extra' > \
- '/etc/apt/sources.list.d/armband_mcp_extra.list'
- apt-get update
-
- cd /srv/salt/scripts
- export DEBIAN_FRONTEND=noninteractive
- echo 'Dpkg::Use-Pty "0";' > /etc/apt/apt.conf.d/90silence-dpkg
- OLD_DOMAIN=\$(grep -sPzo "id: cfg01\.\K(\S*)" /etc/salt/minion.d/minion.conf) || true
- BOOTSTRAP_SALTSTACK_OPTS=" -r -dX stable 2016.11 " \
- MASTER_HOSTNAME=cfg01.${CLUSTER_DOMAIN} DISTRIB_REVISION=nightly \
- EXTRA_FORMULAS="nfs panko gnocchi oslo-templates" \
- ./salt-master-init.sh
- salt-key -Ay
-
- cp -r ${OPNFV_FUEL_DIR}/mcp/salt-formulas/* /usr/share/salt-formulas/env
- cd ${OPNFV_FUEL_DIR}/mcp/patches && ./patch.sh patches.list formulas
- cd ${OPNFV_FUEL_DIR}/mcp/patches && ./patch.sh patches.list reclass
-
- source ${OPNFV_FUEL_DIR}/mcp/scripts/lib.sh
- wait_for 3.0 "salt-call state.apply salt"
-
- # In case scenario changed (and implicitly domain name), re-register minions
- if [ -n "\${OLD_DOMAIN}" ] && [ "\${OLD_DOMAIN}" != "${CLUSTER_DOMAIN}" ]; then
- salt "*.\${OLD_DOMAIN}" cmd.run "grep \${OLD_DOMAIN} -sRl /etc/salt | \
- xargs --no-run-if-empty sed -i 's/\${OLD_DOMAIN}/${CLUSTER_DOMAIN}/g'; \
- service salt-minion restart" || true
- salt-key -yd "*.\${OLD_DOMAIN}"
- salt-key -Ay
- fi
-
- # Init specific to VMs on FN (all for virtual, cfg|mas for baremetal)
- wait_for 3.0 'salt -C "cfg01*" state.apply linux'
- if [[ "${LOCAL_VIRT_NODES}" =~ mas ]]; then
- wait_for 3.0 'salt -C "mas*" test.ping'
- else
- wait_for 3.0 '(for n in ${LOCAL_VIRT_NODES}; do salt -C \${n}.* test.ping || exit; done)'
- fi
- wait_for 3.0 'salt -C "E@^(${NODE_MASK}|cfg01).*" saltutil.sync_all'
- wait_for 3.0 'salt -C "E@^(${NODE_MASK}|cfg01).*" state.apply salt'
-
- wait_for 3.0 'salt -C "E@^(${NODE_MASK}).*" state.sls linux.system,linux.storage'
- wait_for 2.0 'salt -C "E@^(${NODE_MASK}).*" state.sls linux.network'
- salt -C "E@^(${NODE_MASK}).*" state.sls opnfv.route_wrapper
- salt -C "E@^(${NODE_MASK}).*" system.reboot
- wait_for 90.0 'salt -C "E@^(${NODE_MASK}).*" test.ping'
- wait_for 3.0 'salt -C "E@^(${NODE_MASK}).*" pkg.upgrade refresh=False dist_upgrade=True'
-
- wait_for 3.0 'salt -C "E@^(${NODE_MASK}|cfg01).*" state.sls ntp'
-
- if [ -f "${OPNFV_FUEL_DIR}/${OPNFV_VCP_IMG}" ]; then
- mkdir -p "${OPNFV_VCP_DIR}"
- mv "${OPNFV_FUEL_DIR}/${OPNFV_VCP_IMG}" "${OPNFV_VCP_DIR}/"
- fi
-SALT_INSTALL_END
diff --git a/mcp/scripts/user-data.admin.sh.j2 b/mcp/scripts/user-data.sh.j2
index d77773260..8b80e32d0 100644
--- a/mcp/scripts/user-data.admin.sh.j2
+++ b/mcp/scripts/user-data.sh.j2
@@ -7,8 +7,14 @@
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
{%- import 'net_map.j2' as nm with context %}
-rm /etc/salt/minion_id
-rm -f /etc/salt/pki/minion/minion_master.pub
+rm -f /etc/salt/minion_id /etc/salt/pki/minion/minion_master.pub
echo "id: $(hostname).{{ conf.cluster.domain }}" > /etc/salt/minion
echo "master: {{ nm.net_admin | ipnet_hostaddr(nm.start_ip[nm.net_admin] + nm.net_admin_hosts.index('opnfv_infra_config_pxe_admin_address') +1) }}" >> /etc/salt/minion
-service salt-minion restart
+ldconfig
+{%- if 'ubuntu1804' in conf.MCP_OS %}
+systemctl unmask networking.service || true
+systemctl enable networking.service || true
+systemctl start networking.service || true
+{%- endif %}
+systemctl enable salt-minion.service
+systemctl restart salt-minion.service
diff --git a/mcp/scripts/virsh_net/net_mcpcontrol.xml.j2 b/mcp/scripts/virsh_net/net_mcpcontrol.xml.j2
deleted file mode 100644
index 569fa7089..000000000
--- a/mcp/scripts/virsh_net/net_mcpcontrol.xml.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-<!--
- Copyright (c) 2017 Mirantis Inc., Enea AB and others.
-
- All rights reserved. This program and the accompanying materials
- are made available under the terms of the Apache License, Version 2.0
- which accompanies this distribution, and is available at
- http://www.apache.org/licenses/LICENSE-2.0
--->
-{#- conf.MCPCONTROL_NET & co are mandatory, defaults are set via globals.sh #}
-{%- set net_mcpcontrol = [conf.MCPCONTROL_NET, conf.MCPCONTROL_PREFIX] | join("/") %}
-<network>
- <name>mcpcontrol</name>
- <bridge name="mcpcontrol"/>
- <forward mode="nat"/>
- <ip address="{{ net_mcpcontrol | ipnet_hostaddr(1) }}" netmask="{{ net_mcpcontrol | ipnet_netmask }}">
- <dhcp>
- <range start="{{ net_mcpcontrol | ipnet_hostaddr(2) }}" end="{{ net_mcpcontrol | ipnet_hostmax }}"/>
- </dhcp>
- </ip>
-</network>
diff --git a/mcp/scripts/xdf_data.sh.j2 b/mcp/scripts/xdf_data.sh.j2
index 8c9d5d969..6aca36f32 100644
--- a/mcp/scripts/xdf_data.sh.j2
+++ b/mcp/scripts/xdf_data.sh.j2
@@ -11,7 +11,14 @@
# Data derived from XDF (PDF/IDF/SDF/etc), used as input in deploy.sh
#
+{%- import 'net_map.j2' as nm with context -%}
+{%- set cluster_states = conf.cluster.states if conf.MCP_NO_DEPLOY_ENVIRONMENT < 2 else [] -%}
{%- set arch = conf[conf.MCP_JUMP_ARCH] -%}
+{%- set V = conf.virtual -%}
+{%- set section_map = {
+ 'control': nm.ctl01.idx,
+ 'compute': nm.cmp001.idx
+} -%}
{%- macro bash_arr(_l) -%}
({%- for n in _l -%}'{{ n }}' {% endfor -%})
@@ -24,25 +31,60 @@
{#- Pack all vnode data as string -#}
{%- macro serialize_vnodes() -%}
- {%- set V = conf.virtual -%}
{%- set arr = [] -%}
- {%- for n in V.nodes -%}
- {%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%}
- {%- do arr.append(pack([n, V[n].ram or arch.default.ram,
- V[n].vcpus or arch.default.vcpus])) -%}
+ {%- for section in section_map -%}
+ {%- for n in V.nodes[section] or [] -%}
+ {%- if ( section_map[section] < conf.nodes | length and
+ conf.nodes[section_map[section] + loop.index0].node.type == 'virtual' ) -%}
+ {%- if n not in V -%}{%- do V.update({n: {}}) -%}{%- endif -%}
+ {%- set cpu_topo = 'cpu_topology' in V[n] and not conf.MCP_CMP_SS -%}
+ {%- if 'numa' in V[n] and cpu_topo -%}
+ {%- for k, v in V[n].numa.items() -%}
+ {%- set c = pack([k, v.memory, v.cpus]) -%}
+ {%- do V[n].update({'s_numa': c if 's_numa' not in V[n] else pack([c, V[n].s_numa])}) -%}
+ {%- endfor -%}
+ {%- endif -%}
+ {%- do arr.append(pack([n, V[n].disks or arch.default.disks,
+ V[n].ram or arch.default.ram,
+ V[n].vcpus or arch.default.vcpus,
+ '' if not cpu_topo else pack([
+ V[n].cpu_topology.sockets,
+ V[n].cpu_topology.cores,
+ V[n].cpu_topology.threads,
+ '' if 's_numa' not in V[n] else V[n].s_numa])])) -%}
+ {%- endif -%}
+ {%- endfor -%}
{%- endfor -%}
'{{ pack(arr, '|') }}'
{%- endmacro -%}
-{#- Pack apt_pkg data as string -#}
-{%- macro serialize_apt_pkg() -%}
+{#- Return a bash array of node names or a Salt query, optionally filtered by type #}
+{%- macro filter_nodes(type, output_as_query = False, sections = section_map) -%}
{%- set arr = [] -%}
- {%- set sections = [arch.common] -%}
- {%- if conf.MCP_VCP -%}
- {%- do sections.append(arch.control) -%}
+ {%- for section in sections -%}
+ {%- for n in V.nodes[section] or [] -%}
+ {%- if ( section_map[section] < conf.nodes | length and
+ conf.nodes[section_map[section] + loop.index0].node.type in type ) -%}
+ {%- do arr.append(n) -%}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- endfor -%}
+ {%- if output_as_query -%}
+ '{{ arr | join('* or ') }}*'
+ {%- else -%}
+ {{ bash_arr(arr) }}
+ {%- endif -%}
+{%- endmacro -%}
+
+{#- Pack repo_pkg data as string -#}
+{%- macro serialize_repo_pkg() -%}
+ {%- set arr = [] -%}
+ {%- set sections = [arch[conf.MCP_OS].common] -%}
+ {%- if conf.MCP_VCP or '-vcp-' in conf.MCP_DEPLOY_SCENARIO -%}
+ {%- do sections.append(arch[conf.MCP_OS].control) -%}
{%- endif -%}
{%- for c in sections -%}
- {%- do arr.append(pack([pack(c.apt['keys']), pack(c.apt.repos),
+ {%- do arr.append(pack([pack(c.repo['keys']), pack(c.repo.repos),
pack(c.pkg.install), pack(c.pkg.remove)], '^')) -%}
{%- endfor -%}
'{{ pack(arr, '^') }}'
@@ -58,14 +100,18 @@ OPNFV_BRIDGES=(
)
export CLUSTER_DOMAIN={{ conf.cluster.domain }}
-cluster_states={{ bash_arr(conf.cluster.states) }}
-virtual_nodes={{ bash_arr(conf.virtual.nodes) }}
-base_image={{ arch.base_image }}
+dns_public={{ nm.dns_public[0] }}
+cluster_states={{ bash_arr(arch.default.cluster.states + cluster_states) }}
+virtual_nodes={{ filter_nodes('virtual') }}
+control_nodes_query={{ filter_nodes(['baremetal', 'virtual'], True, ['control']) }}
+base_image={{ arch[conf.MCP_OS].base_image }}
-# Serialize vnode data as '<name0>,<ram0>,<vcpu0>|<name1>,<ram1>,<vcpu1>[...]'
+# Serialize vnode data as:
+# <name0>,<ram0>,<vcpu0>[,<sockets0>,<cores0>,<threads0>[,<cell0name0>,<cell0memory0>,
+# <cell0cpus0>,<cell1name0>,<cell1memory0>,<cell1cpus0>]]|<name1>,...'
virtual_nodes_data={{ serialize_vnodes() }}
# Serialize repos, packages to (pre-)install/remove for:
# - foundation node VM base image (virtual: all VMs, baremetal: cfg01|mas01)
# - virtualized control plane VM base image (only when VCP is used)
-virtual_repos_pkgs={{ serialize_apt_pkg() }}
+virtual_repos_pkgs={{ serialize_repo_pkg() }}
diff --git a/onboarding.txt b/onboarding.txt
deleted file mode 100644
index c9c45ac0d..000000000
--- a/onboarding.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-###########################################################################
-This document is protected/licensed under the following conditions
-(c) Jonas Bjurel (Ericsson AB)
-Licensed under a Creative Commons Attribution 4.0 International License.
-You should have received a copy of the license along with this work.
-If not, see <http://creativecommons.org/licenses/by/4.0/>.
-###########################################################################
-Get on board by filling this out and submitting it for review.
-This is all optional, it's just to give you a taste of the workflow.
-
-Full Name:
-IRC Nick:
-Linux Foundation ID:
-Favourite Open Source project:
-How would you like to help this project:
diff --git a/prototypes/sfc_tacker/README.rst b/prototypes/sfc_tacker/README.rst
deleted file mode 100644
index e219abd34..000000000
--- a/prototypes/sfc_tacker/README.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-.. This work is licensed under a Creative Commons Attribution 4.0 International License.
-.. SPDX-License-Identifier: CC-BY-4.0
-.. (c) 2016 Telefonaktiebolaget L. M. ERICSSON
-
-README SFC + Tacker
-===================
-
-The Enclosed shell script builds, deploys, orchestrates Tacker,
-an Open NFV Orchestrator with in-built general purpose VNF Manager
-to deploy and operate Virtual Network Functions (VNFs).
-
-The provided deployment tool is experimental, not fault
-tolerant but as idempotent as possible. To use the provided shell
-script for provision/deployment, transfer the script to the Openstack
-primary controller node, where Your deployed OpenDaylight SDN
-controller runs. The deployment tool (poc.tacker-up.sh), expects that
-Your primary controller reaches all your OPNFV/Fuel cluster nodes and
-has internet connection either directly or via an http proxy, note
-that a working and consistent DNS name resolution is a must.
-
-Theory of operation: the deployment tool downloads the source
-python packages from GitHub and a json rpc library developed by Josh
-Marshall. Besides these sources, downloads software for python/debian
-software release. When building succeeds the script deploys the software
-components to the OPNFV Cluster nodes. Finally orchestrates the deployed
-tacker binaries as an infrastucture/service. The Tacker has two
-components:
-
-#. Tacker server
-
- - what interacts with Openstack and OpenDayLight.
-
-#. Tacker client
-
- - a command line software talks with the server,
- available on all cluster nodes and the access point
- to the Tacker service. Note that the tacker
- distribution provides a a plugin to the Horizon
- OpenStack Gui, but thus Horizon plugin is out of the
- scope of this Proof of Concept setup/deployment.
-
-As mentioned, this compilation contains an OpenDayLight SDN controller
-with Service Function Chaining and Group based Policy features enabled.
-
-To acces for your cluster information ssh to the fuel master (10.20.0.2)
-and issue command: fuel node.
-Here is an output of an example deployment:
-
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-| **id** | **status** | **name** | **cluster** | **ip** | **mac** | **roles** | **pending_roles** | **online** | **group_id** |
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-| 1 | ready | Untitled (cc:51) | 1 | 10.20.0.6 | 52:54:00:1e:cc:51 | compute | | True | 1 |
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-| 2 | ready | Untitled (e6:3e) | 1 | 10.20.0.4 | 52:54:00:0c:e6:3e | compute | | True | 1 |
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-| 3 | ready | Untitled (a2:4c) | 1 | 10.20.0.5 | 52:54:00:d3:a2:4c | compute | | True | 1 |
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-| 4 | ready | Untitled (c7:d8) | 1 | 10.20.0.3 | 52:54:00:00:c7:d8 | cinder, controller, opendaylight | | True | 1 |
-+--------+------------+------------------+-------------+-----------+-------------------+----------------------------------+-------------------+------------+--------------+
-
-As You can see in this case the poc.tacker-up.sh script should be
-transferred and run on node having IP address 10.20.0.3
diff --git a/prototypes/sfc_tacker/poc.tacker-up.sh b/prototypes/sfc_tacker/poc.tacker-up.sh
deleted file mode 100755
index caad3f86a..000000000
--- a/prototypes/sfc_tacker/poc.tacker-up.sh
+++ /dev/null
@@ -1,385 +0,0 @@
-#!/bin/bash
-
-#
-# POC Script to build/install/deploy/orchestrate Tacker on an OPNFV Brhamaputra Fuel cluster
-# Script assuming it runs on the openstack primary controller (where is opendaylight
-# present) and there is a fuel master on 10.20.0.2 and can be reached with default
-# credentials.
-#
-# author: Ferenc Cserepkei <ferenc.cserepkei@ericsson.com>
-#
-# (c) 2016 Telefonaktiebolaget L. M. ERICSSON
-#
-# All rights reserved. This program and the accompanying materials are made available
-# under the terms of the Apache License, Version 2.0 which accompanies this distribution,
-# and is available at http://www.apache.org/licenses/LICENSE-2.0
-#
-
-
-SSH_OPTIONS=(-o StrictHostKeyChecking=no -o GlobalKnownHostsFile=/dev/null -o UserKnownHostsFile=/dev/null -o LogLevel=error)
-
-MYDIR=$(dirname $(readlink -f "$0"))
-MYREPO="tacker-server"
-CLIREPO="tacker-client"
-DEPREPO="jsonrpclib"
-
-CLIENT=$(echo python-python-tackerclient_*_all.deb)
-JSONRPC=$(echo python-jsonrpclib_*_all.deb)
-SERVER=$(echo python-tacker_*_all.deb)
-
-#fuel admin user name
-fadm="fadm"
-
-# Function checks whether crudini is available, if not - installs
-function chkCrudini () {
- if [[ ! -f '/usr/bin/crudini' ]]; then
- wget -N http://mirrors.kernel.org/ubuntu/pool/universe/p/python-iniparse/python-iniparse_0.4-2.1build1_all.deb
- wget -N http://archive.ubuntu.com/ubuntu/pool/universe/c/crudini/crudini_0.3-1_amd64.deb
- dpkg -i python-iniparse_0.4-2.1build1_all.deb crudini_0.3-1_amd64.deb
- fi
-}
-
-# Function checks whether a python egg is available, if not, installs
-function chkPPkg () {
- PKG="$1"
- IPPACK=$(python - <<'____EOF'
-import pip
-from os.path import join
-for package in pip.get_installed_distributions():
- print(package.location)
- print(join(package.location, *package._get_metadata("top_level.txt")))
-____EOF
-)
- echo "$IPPACK" | grep -q "$PKG"
- if [ $? -ne 0 ];then
- pip install "$PKG"
- fi
-}
-
-# Function setting up the build/deploy environment
-function envSetup () {
- apt-key adv --keyserver keyserver.ubuntu.com --recv-keys BCE5CC461FA22B08
- apt-get update
- apt-get install -y git python-pip python-all debhelper
- chkPPkg stdeb
- chkCrudini
-}
-
-# Function installs jsonrpclib from github
-function deployJsonrpclib () {
- if [[ -e "${MYDIR}/${JSONRPC}" ]]; then
- echo "$JSONRPC exists."
- return 1
- fi
- cd $MYDIR
- rm -rf $DEPREPO
- git clone https://github.com/joshmarshall/jsonrpclib.git $DEPREPO
- cd $DEPREPO
- dpkg --purge python-jsonrpclib
- python setup.py --command-packages=stdeb.command bdist_deb
- cd "deb_dist"
- JSONRPC=$(echo python-jsonrpclib_*_all.deb)
- cp $JSONRPC $MYDIR
- dpkg -i $JSONRPC
-}
-
-# Function builds Tacker server from github
-function buildTackerServer () {
- if [[ -e "${MYDIR}/${SERVER}" ]]; then
- echo "$SERVER exists."
- return 1
- fi
- cd $MYDIR
- rm -rf $MYREPO
- git clone -b 'SFC_colorado' https://github.com/trozet/tacker.git $MYREPO
- cd $MYREPO
- patch -p 1 <<EOFSCP
-diff -ruN a/setup.cfg b/setup.cfg
---- a/setup.cfg 2016-02-08 10:54:37.416525934 +0100
-+++ b/setup.cfg 2016-02-08 10:55:29.293428896 +0100
-@@ -22,14 +22,14 @@
- packages =
- tacker
- data_files =
-- etc/tacker =
-+ /etc/tacker =
- etc/tacker/api-paste.ini
- etc/tacker/policy.json
- etc/tacker/tacker.conf
- etc/tacker/rootwrap.conf
-- etc/rootwrap.d =
-+ /etc/rootwrap.d =
- etc/tacker/rootwrap.d/servicevm.filters
-- etc/init.d = etc/init.d/tacker-server
-+ /etc/init.d = etc/init.d/tacker-server
-
- [global]
- setup-hooks =
-EOFSCP
- dpkg --purge python-tacker
- python setup.py --command-packages=stdeb.command bdist_deb
-}
-
-# Function corrects and installs the Tacker-server debian package
-function blessPackage () {
- pushd "${MYDIR}/${MYREPO}/deb_dist"
- SERVER=$(echo python-tacker_*_all.deb)
- popd
- DEBFILE="${MYDIR}/${MYREPO}/deb_dist/${SERVER}"
- TMPDIR=$(mktemp -d /tmp/deb.XXXXXX) || exit 1
- OUTPUT=$(basename "$DEBFILE")
- if [[ -e "${MYDIR}/${OUTPUT}" ]]; then
- echo "$OUTPUT exists."
- rm -r "$TMPDIR"
- return 1
- fi
- dpkg-deb -x "$DEBFILE" "$TMPDIR"
- dpkg-deb --control "$DEBFILE" "${TMPDIR}/DEBIAN"
- cd "$TMPDIR"
- patch -p 1 <<EOFDC
-diff -ruN DEBIAN/control DEBIAN/control
---- a/DEBIAN/control 2016-08-19 11:53:10.000000000 +0000
-+++ b/DEBIAN/control 2016-08-19 12:01:49.629096317 +0000
-@@ -4,7 +4,7 @@
- Architecture: all
- Maintainer: OpenStack <openstack-dev@lists.openstack.org>
- Installed-Size: 1566
--Depends: python (>= 2.7), python (<< 2.8), python:any (>= 2.7.1-0ubuntu2), python-pbr, python-paste, python-pastedeploy, python-routes, python-anyjson, python-babel, python-eventlet, python-greenlet, python-httplib2, python-requests, python-iso8601, python-jsonrpclib, python-jinja2, python-kombu, python-netaddr, python-sqlalchemy (>= 1.0~), python-sqlalchemy (<< 1.1), python-webob, python-heatclient, python-keystoneclient, alembic, python-six, python-stevedore, python-oslo.config, python-oslo.messaging-, python-oslo.rootwrap, python-novaclient
-+Depends: python (>= 2.7), python (<< 2.8), python:any (>= 2.7.1-0ubuntu2), python-pbr, python-paste, python-pastedeploy, python-routes, python-anyjson, python-babel, python-eventlet, python-greenlet, python-httplib2, python-requests, python-iso8601, python-jsonrpclib, python-jinja2, python-kombu, python-netaddr, python-sqlalchemy (>= 1.0~), python-sqlalchemy (<< 1.1), python-webob, python-heatclient, python-keystoneclient, alembic, python-six, python-stevedore, python-oslo.config, python-oslo.messaging, python-oslo.rootwrap, python-novaclient
- Section: python
- Priority: optional
- Description: OpenStack servicevm/device manager
-EOFDC
- cd "$MYDIR"
- echo "Patching deb..."
- dpkg -b "$TMPDIR" "${MYDIR}/${SERVER}"
- rm -r "$TMPDIR"
- dpkg -i "${MYDIR}/${SERVER}"
-}
-
-# Function deploys Tacker-server (installs missing mandatory files: upstart, default)
-function deployTackerServer () {
- rm -rf /etc/default/tacker-server
- cat > /etc/default/tacker-server <<EOFTD
-ENABLED=true
-PIDFILE=/var/run/tacker/tacker-server.pid
-LOGFILE=/var/log/tacker/tacker-server.log
-PATH="\${PATH:+\$PATH:}/usr/sbin:/sbin"
-TMPDIR=/var/lib/tacker/tmp
-EOFTD
- rm -rf /etc/init/tacker.conf
- cat > /etc/init/tacker.conf <<EOFSC
-# tacker-server - Provides the Tacker servicevm/device manager service
-description "Openstack Tacker Server"
-author "Ferenc Cserepkei <ferenc.cserepkei@ericsson.com>"
-
-start on runlevel [2345]
-stop on runlevel [!2345]
-
-respawn
-respawn limit 20 5
-limit nofile 65535 65535
-
-chdir /var/run
-
-pre-start script
- # stop job from continuing if no config file found for daemon
- [ ! -f /etc/default/tacker-server ] && { stop; exit 0; }
- [ ! -f /etc/tacker/tacker.conf ] && { stop; exit 0; }
-
- # source the config file
- . /etc/default/tacker-server
-
- # stop job from continuing if admin has not enabled service in
- # config file.
- [ -z "\$ENABLED" ] && { stop; exit 0; }
-
- mkdir -p /var/run/tacker
- mkdir -p /var/log/tacker
- echo "Starting tacker server"
-end script
-
-pre-stop script
- echo "Stopping tacker server"
-end script
-
-exec /usr/bin/python /usr/bin/tacker-server --log-file=/var/log/tacker/tacker-server.log -v -d --config-file=/etc/tacker/tacker.conf
-EOFSC
-}
-
-# Function installs python-tackerclient from github
-function deployTackerClient() {
- if [[ -e "${MYDIR}/${CLIENT}" ]]; then
- echo "$CLIENT exists."
- return 1
- fi
- cd $MYDIR
- rm -rf $CLIREPO
- dpkg --purge python-tackerclient
- git clone -b 'SFC_refactor' https://github.com/trozet/python-tackerclient.git $CLIREPO
- cd $CLIREPO
- python setup.py --command-packages=stdeb.command bdist_deb
- cd "deb_dist"
- CLIENT=$(echo python-python-tackerclient_*_all.deb)
- cp $CLIENT $MYDIR
- dpkg -i "${MYDIR}/${CLIENT}"
-}
-
-# Function removes the cloned git repositories
-function remove_repo () {
- if [[ -d "${MYDIR}/${1}" ]]; then
- rm -r "$1"
- fi
-}
-
-# Funcion copies and installs built artifact on all remaining cluster nodes
-function populate_client() {
- wget -O deb http://archive.ubuntu.com/ubuntu/pool/universe/s/sshpass/sshpass_1.05-1_amd64.deb &&\
- dpkg -i deb &&\
- rm deb
-
- clusternodes=$(sshpass -p "r00tme" ssh ${SSH_OPTIONS[@]} root@10.20.0.2 fuel node | cut -d '|' -f 5 | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" )
- myaddr=$(ifconfig br-fw-admin | sed -n '/inet addr/s/.*addr.\([^ ]*\) .*/\1/p')
- for anode in $clusternodes ; do
- if [ "$anode" != "$myaddr" ] ; then
- echo "Installing $CLIENT on $anode"
- scp ${SSH_OPTIONS[@]} -i "${MYDIR}/.ssh/id_rsa" ${CLIENT} ${fadm}@${anode}:${CLIENT}
- ssh ${SSH_OPTIONS[@]} -i "${MYDIR}/.ssh/id_rsa" ${fadm}@${anode} sudo dpkg -i ${CLIENT}
- ssh ${SSH_OPTIONS[@]} -i "${MYDIR}/.ssh/id_rsa" ${fadm}@${anode} rm ${CLIENT}
- fi
- done
-}
-
-# Function orchestrate the Tacker service
-function orchestrate () {
- rm -rf /etc/puppet/modules/tacker
- pushd /etc/puppet/modules
- git clone https://github.com/trozet/puppet-tacker.git tacker
- rm -rf /etc/puppet/modules/tacker/.git
- popd
-
- ### Facts ###
-
- # Port(s) Protocol ServiceDetails Source
- # 8805-8872 tcp,udp Unassigned IANA
- bind_port='8808'
-
- auth_uri=$(crudini --get '/etc/heat/heat.conf' 'keystone_authtoken' 'auth_uri')
- identity_uri=$(crudini --get '/etc/heat/heat.conf' 'keystone_authtoken' 'identity_uri')
- int_addr=$(ifconfig br-mesh | sed -n '/inet addr/s/.*addr.\([^ ]*\) .*/\1/p')
- odl_addr=$(hiera management_vip)
- mgmt_addr=$(ifconfig br-mgmt | sed -n '/inet addr/s/.*addr.\([^ ]*\) .*/\1/p')
- pub_addr=$(ifconfig br-ex-lnx | sed -n '/inet addr/s/.*addr.\([^ ]*\) .*/\1/p')
- rabbit_host=$(crudini --get '/etc/heat/heat.conf' 'oslo_messaging_rabbit' 'rabbit_hosts'| cut -d ':' -f 1)
- rabbit_password=$(crudini --get '/etc/heat/heat.conf' 'oslo_messaging_rabbit' 'rabbit_password')
- sql_host=$(hiera database_vip)
- database_connection="mysql://tacker:tacker@${sql_host}/tacker"
- internal_url="http://${int_addr}:${bind_port}"
- admin_url="http://${mgmt_addr}:${bind_port}"
- public_url="http://${pub_addr}:${bind_port}"
- heat_api_vip=$(crudini --get '/etc/heat/heat.conf' 'heat_api' 'bind_host')
- allowed_hosts="[ '${sql_host}', '${HOSTNAME%%.domain.tld}', 'localhost', '127.0.0.1', '%' ]"
- heat_uri="http://${heat_api_vip}:8004/v1"
- odl_port='8282'
- service_tenant='services'
- myRegion='RegionOne'
- myPassword='tacker'
-
- cat > configure_tacker.pp << EOF
- class mysql::config {}
- include mysql::config
- class mysql::server {}
- include mysql::server
-
- class { 'tacker':
- package_ensure => 'absent',
- client_package_ensure => 'absent',
- bind_port => '${bind_port}',
- keystone_password => '${myPassword}',
- keystone_tenant => '${service_tenant}',
- auth_uri => '${auth_uri}',
- identity_uri => '${identity_uri}',
- database_connection => '${database_connection}',
- rabbit_host => '${rabbit_host}',
- rabbit_password => '${rabbit_password}',
- heat_uri => '${heat_uri}',
- opendaylight_host => '${odl_addr}',
- opendaylight_port => '${odl_port}',
- }
-
- class { 'tacker::db::mysql':
- password => '${myPassword}',
- allowed_hosts => ${allowed_hosts},
- }
-
- class { 'tacker::keystone::auth':
- password => '${myPassword}',
- tenant => '${service_tenant}',
- admin_url => '${admin_url}',
- internal_url => '${internal_url}',
- public_url => '${public_url}',
- region => '${myRegion}',
- }
-EOF
-
- apt-get install -y mysql-client-5.5
- cat > /usr/bin/tacker-manage <<EOFAKEMANAGE
-#!/bin/bash
-EOFAKEMANAGE
- chmod +x /usr/bin/tacker-manage
- puppet apply configure_tacker.pp
- rm -f tackerc
- cat > tackerc <<EOFRC
-#!/bin/sh
-export LC_ALL=C
-export OS_NO_CACHE='true'
-export OS_TENANT_NAME='${service_tenant}'
-export OS_PROJECT_NAME='${service_tenant}'
-export OS_USERNAME='tacker'
-export OS_PASSWORD='${myPassword}'
-export OS_AUTH_URL='${auth_uri}'
-export OS_DEFAULT_DOMAIN='default'
-export OS_AUTH_STRATEGY='keystone'
-export OS_REGION_NAME='${myRegion}'
-export TACKER_ENDPOINT_TYPE='internalURL'
-EOFRC
- chmod +x tackerc
-}
-
-# Funcion copies and installs built environment settings on all remaining cluster nodes
-function populate_rc() {
- wget -O deb http://archive.ubuntu.com/ubuntu/pool/universe/s/sshpass/sshpass_1.05-1_amd64.deb &&\
- dpkg -i deb &&\
- rm deb
-
- clusternodes=$(sshpass -p "r00tme" ssh ${SSH_OPTIONS[@]} root@10.20.0.2 fuel node | cut -d '|' -f 5 | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" )
- myaddr=$(ifconfig br-fw-admin | sed -n '/inet addr/s/.*addr.\([^ ]*\) .*/\1/p')
- for anode in $clusternodes ; do
- if [ "$anode" != "$myaddr" ] ; then
- echo "Populating settings to $anode"
- scp ${SSH_OPTIONS[@]} -i "${MYDIR}/.ssh/id_rsa" tackerc ${fadm}@${anode}:tackerc
- fi
- done
-}
-
-
-envSetup
-deployTackerClient
-deployJsonrpclib
-buildTackerServer
-blessPackage
-deployTackerServer
-populate_client
-orchestrate
-populate_rc
-
-git clone https://github.com/trozet/sfc-random.git
-
-remove_repo "$MYREPO"
-remove_repo "$DEPREPO"
-remove_repo "$CLIREPO"
-
-echo "Built: ${MYDIR}/${OUTPUT}"
-echo "Built: ${MYDIR}/${CLIENT}"
-echo "Built: ${MYDIR}/${JSONRPC}"
-echo "tackerc - mandatory environmental parameters file created"
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..d7b4d2acd
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,26 @@
+##############################################################################
+# Copyright (c) 2018 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+basepython = python3
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck